LCOV - code coverage report
Current view: top level - pageserver/src/http - routes.rs (source / functions) Coverage Total Hit
Test: 42f947419473a288706e86ecdf7c2863d760d5d7.info Lines: 0.0 % 2169 0
Test Date: 2024-08-02 21:34:27 Functions: 0.0 % 677 0

            Line data    Source code
       1              : //!
       2              : //! Management HTTP API
       3              : //!
       4              : use std::cmp::Reverse;
       5              : use std::collections::BinaryHeap;
       6              : use std::collections::HashMap;
       7              : use std::str::FromStr;
       8              : use std::sync::Arc;
       9              : use std::time::Duration;
      10              : 
      11              : use anyhow::{anyhow, Context, Result};
      12              : use enumset::EnumSet;
      13              : use futures::StreamExt;
      14              : use futures::TryFutureExt;
      15              : use humantime::format_rfc3339;
      16              : use hyper::header;
      17              : use hyper::StatusCode;
      18              : use hyper::{Body, Request, Response, Uri};
      19              : use metrics::launch_timestamp::LaunchTimestamp;
      20              : use pageserver_api::models::AuxFilePolicy;
      21              : use pageserver_api::models::DownloadRemoteLayersTaskSpawnRequest;
      22              : use pageserver_api::models::IngestAuxFilesRequest;
      23              : use pageserver_api::models::ListAuxFilesRequest;
      24              : use pageserver_api::models::LocationConfig;
      25              : use pageserver_api::models::LocationConfigListResponse;
      26              : use pageserver_api::models::LocationConfigMode;
      27              : use pageserver_api::models::LsnLease;
      28              : use pageserver_api::models::LsnLeaseRequest;
      29              : use pageserver_api::models::ShardParameters;
      30              : use pageserver_api::models::TenantDetails;
      31              : use pageserver_api::models::TenantLocationConfigRequest;
      32              : use pageserver_api::models::TenantLocationConfigResponse;
      33              : use pageserver_api::models::TenantScanRemoteStorageResponse;
      34              : use pageserver_api::models::TenantScanRemoteStorageShard;
      35              : use pageserver_api::models::TenantShardLocation;
      36              : use pageserver_api::models::TenantShardSplitRequest;
      37              : use pageserver_api::models::TenantShardSplitResponse;
      38              : use pageserver_api::models::TenantSorting;
      39              : use pageserver_api::models::TimelineArchivalConfigRequest;
      40              : use pageserver_api::models::TopTenantShardItem;
      41              : use pageserver_api::models::TopTenantShardsRequest;
      42              : use pageserver_api::models::TopTenantShardsResponse;
      43              : use pageserver_api::shard::ShardCount;
      44              : use pageserver_api::shard::TenantShardId;
      45              : use remote_storage::DownloadError;
      46              : use remote_storage::GenericRemoteStorage;
      47              : use remote_storage::TimeTravelError;
      48              : use tenant_size_model::{svg::SvgBranchKind, SizeResult, StorageModel};
      49              : use tokio_util::io::StreamReader;
      50              : use tokio_util::sync::CancellationToken;
      51              : use tracing::*;
      52              : use utils::auth::JwtAuth;
      53              : use utils::failpoint_support::failpoints_handler;
      54              : use utils::http::endpoint::prometheus_metrics_handler;
      55              : use utils::http::endpoint::request_span;
      56              : use utils::http::request::must_parse_query_param;
      57              : use utils::http::request::{get_request_param, must_get_query_param, parse_query_param};
      58              : 
      59              : use crate::context::{DownloadBehavior, RequestContext};
      60              : use crate::deletion_queue::DeletionQueueClient;
      61              : use crate::pgdatadir_mapping::LsnForTimestamp;
      62              : use crate::task_mgr::TaskKind;
      63              : use crate::tenant::config::{LocationConf, TenantConfOpt};
      64              : use crate::tenant::mgr::GetActiveTenantError;
      65              : use crate::tenant::mgr::{
      66              :     GetTenantError, TenantManager, TenantMapError, TenantMapInsertError, TenantSlotError,
      67              :     TenantSlotUpsertError, TenantStateError,
      68              : };
      69              : use crate::tenant::mgr::{TenantSlot, UpsertLocationError};
      70              : use crate::tenant::remote_timeline_client;
      71              : use crate::tenant::remote_timeline_client::download_index_part;
      72              : use crate::tenant::remote_timeline_client::list_remote_tenant_shards;
      73              : use crate::tenant::remote_timeline_client::list_remote_timelines;
      74              : use crate::tenant::secondary::SecondaryController;
      75              : use crate::tenant::size::ModelInputs;
      76              : use crate::tenant::storage_layer::LayerAccessStatsReset;
      77              : use crate::tenant::storage_layer::LayerName;
      78              : use crate::tenant::timeline::CompactFlags;
      79              : use crate::tenant::timeline::CompactionError;
      80              : use crate::tenant::timeline::Timeline;
      81              : use crate::tenant::GetTimelineError;
      82              : use crate::tenant::{LogicalSizeCalculationCause, PageReconstructError};
      83              : use crate::{config::PageServerConf, tenant::mgr};
      84              : use crate::{disk_usage_eviction_task, tenant};
      85              : use pageserver_api::models::{
      86              :     StatusResponse, TenantConfigRequest, TenantInfo, TimelineCreateRequest, TimelineGcRequest,
      87              :     TimelineInfo,
      88              : };
      89              : use utils::{
      90              :     auth::SwappableJwtAuth,
      91              :     generation::Generation,
      92              :     http::{
      93              :         endpoint::{self, attach_openapi_ui, auth_middleware, check_permission_with},
      94              :         error::{ApiError, HttpErrorBody},
      95              :         json::{json_request, json_response},
      96              :         request::parse_request_param,
      97              :         RequestExt, RouterBuilder,
      98              :     },
      99              :     id::{TenantId, TimelineId},
     100              :     lsn::Lsn,
     101              : };
     102              : 
     103              : // For APIs that require an Active tenant, how long should we block waiting for that state?
     104              : // This is not functionally necessary (clients will retry), but avoids generating a lot of
     105              : // failed API calls while tenants are activating.
     106              : #[cfg(not(feature = "testing"))]
     107              : pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(5000);
     108              : 
     109              : // Tests run on slow/oversubscribed nodes, and may need to wait much longer for tenants to
     110              : // finish attaching, if calls to remote storage are slow.
     111              : #[cfg(feature = "testing")]
     112              : pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);
     113              : 
     114              : pub struct State {
     115              :     conf: &'static PageServerConf,
     116              :     tenant_manager: Arc<TenantManager>,
     117              :     auth: Option<Arc<SwappableJwtAuth>>,
     118              :     allowlist_routes: Vec<Uri>,
     119              :     remote_storage: GenericRemoteStorage,
     120              :     broker_client: storage_broker::BrokerClientChannel,
     121              :     disk_usage_eviction_state: Arc<disk_usage_eviction_task::State>,
     122              :     deletion_queue_client: DeletionQueueClient,
     123              :     secondary_controller: SecondaryController,
     124              :     latest_utilization: tokio::sync::Mutex<Option<(std::time::Instant, bytes::Bytes)>>,
     125              : }
     126              : 
     127              : impl State {
     128              :     #[allow(clippy::too_many_arguments)]
     129            0 :     pub fn new(
     130            0 :         conf: &'static PageServerConf,
     131            0 :         tenant_manager: Arc<TenantManager>,
     132            0 :         auth: Option<Arc<SwappableJwtAuth>>,
     133            0 :         remote_storage: GenericRemoteStorage,
     134            0 :         broker_client: storage_broker::BrokerClientChannel,
     135            0 :         disk_usage_eviction_state: Arc<disk_usage_eviction_task::State>,
     136            0 :         deletion_queue_client: DeletionQueueClient,
     137            0 :         secondary_controller: SecondaryController,
     138            0 :     ) -> anyhow::Result<Self> {
     139            0 :         let allowlist_routes = ["/v1/status", "/v1/doc", "/swagger.yml", "/metrics"]
     140            0 :             .iter()
     141            0 :             .map(|v| v.parse().unwrap())
     142            0 :             .collect::<Vec<_>>();
     143            0 :         Ok(Self {
     144            0 :             conf,
     145            0 :             tenant_manager,
     146            0 :             auth,
     147            0 :             allowlist_routes,
     148            0 :             remote_storage,
     149            0 :             broker_client,
     150            0 :             disk_usage_eviction_state,
     151            0 :             deletion_queue_client,
     152            0 :             secondary_controller,
     153            0 :             latest_utilization: Default::default(),
     154            0 :         })
     155            0 :     }
     156              : }
     157              : 
     158              : #[inline(always)]
     159            0 : fn get_state(request: &Request<Body>) -> &State {
     160            0 :     request
     161            0 :         .data::<Arc<State>>()
     162            0 :         .expect("unknown state type")
     163            0 :         .as_ref()
     164            0 : }
     165              : 
     166              : #[inline(always)]
     167            0 : fn get_config(request: &Request<Body>) -> &'static PageServerConf {
     168            0 :     get_state(request).conf
     169            0 : }
     170              : 
     171              : /// Check that the requester is authorized to operate on given tenant
     172            0 : fn check_permission(request: &Request<Body>, tenant_id: Option<TenantId>) -> Result<(), ApiError> {
     173            0 :     check_permission_with(request, |claims| {
     174            0 :         crate::auth::check_permission(claims, tenant_id)
     175            0 :     })
     176            0 : }
     177              : 
     178              : impl From<PageReconstructError> for ApiError {
     179            0 :     fn from(pre: PageReconstructError) -> ApiError {
     180            0 :         match pre {
     181            0 :             PageReconstructError::Other(pre) => ApiError::InternalServerError(pre),
     182            0 :             PageReconstructError::MissingKey(e) => {
     183            0 :                 ApiError::InternalServerError(anyhow::anyhow!("{e}"))
     184              :             }
     185            0 :             PageReconstructError::Cancelled => ApiError::Cancelled,
     186            0 :             PageReconstructError::AncestorLsnTimeout(e) => ApiError::Timeout(format!("{e}").into()),
     187            0 :             PageReconstructError::WalRedo(pre) => ApiError::InternalServerError(pre),
     188              :         }
     189            0 :     }
     190              : }
     191              : 
     192              : impl From<TenantMapInsertError> for ApiError {
     193            0 :     fn from(tmie: TenantMapInsertError) -> ApiError {
     194            0 :         match tmie {
     195            0 :             TenantMapInsertError::SlotError(e) => e.into(),
     196            0 :             TenantMapInsertError::SlotUpsertError(e) => e.into(),
     197            0 :             TenantMapInsertError::Other(e) => ApiError::InternalServerError(e),
     198              :         }
     199            0 :     }
     200              : }
     201              : 
     202              : impl From<TenantSlotError> for ApiError {
     203            0 :     fn from(e: TenantSlotError) -> ApiError {
     204            0 :         use TenantSlotError::*;
     205            0 :         match e {
     206            0 :             NotFound(tenant_id) => {
     207            0 :                 ApiError::NotFound(anyhow::anyhow!("NotFound: tenant {tenant_id}").into())
     208              :             }
     209              :             InProgress => {
     210            0 :                 ApiError::ResourceUnavailable("Tenant is being modified concurrently".into())
     211              :             }
     212            0 :             MapState(e) => e.into(),
     213              :         }
     214            0 :     }
     215              : }
     216              : 
     217              : impl From<TenantSlotUpsertError> for ApiError {
     218            0 :     fn from(e: TenantSlotUpsertError) -> ApiError {
     219            0 :         use TenantSlotUpsertError::*;
     220            0 :         match e {
     221            0 :             InternalError(e) => ApiError::InternalServerError(anyhow::anyhow!("{e}")),
     222            0 :             MapState(e) => e.into(),
     223            0 :             ShuttingDown(_) => ApiError::ShuttingDown,
     224              :         }
     225            0 :     }
     226              : }
     227              : 
     228              : impl From<UpsertLocationError> for ApiError {
     229            0 :     fn from(e: UpsertLocationError) -> ApiError {
     230            0 :         use UpsertLocationError::*;
     231            0 :         match e {
     232            0 :             BadRequest(e) => ApiError::BadRequest(e),
     233            0 :             Unavailable(_) => ApiError::ShuttingDown,
     234            0 :             e @ InProgress => ApiError::Conflict(format!("{e}")),
     235            0 :             Flush(e) | InternalError(e) => ApiError::InternalServerError(e),
     236              :         }
     237            0 :     }
     238              : }
     239              : 
     240              : impl From<TenantMapError> for ApiError {
     241            0 :     fn from(e: TenantMapError) -> ApiError {
     242            0 :         use TenantMapError::*;
     243            0 :         match e {
     244              :             StillInitializing | ShuttingDown => {
     245            0 :                 ApiError::ResourceUnavailable(format!("{e}").into())
     246            0 :             }
     247            0 :         }
     248            0 :     }
     249              : }
     250              : 
     251              : impl From<TenantStateError> for ApiError {
     252            0 :     fn from(tse: TenantStateError) -> ApiError {
     253            0 :         match tse {
     254              :             TenantStateError::IsStopping(_) => {
     255            0 :                 ApiError::ResourceUnavailable("Tenant is stopping".into())
     256              :             }
     257            0 :             TenantStateError::SlotError(e) => e.into(),
     258            0 :             TenantStateError::SlotUpsertError(e) => e.into(),
     259            0 :             TenantStateError::Other(e) => ApiError::InternalServerError(anyhow!(e)),
     260              :         }
     261            0 :     }
     262              : }
     263              : 
     264              : impl From<GetTenantError> for ApiError {
     265            0 :     fn from(tse: GetTenantError) -> ApiError {
     266            0 :         match tse {
     267            0 :             GetTenantError::NotFound(tid) => ApiError::NotFound(anyhow!("tenant {}", tid).into()),
     268              :             GetTenantError::NotActive(_) => {
     269              :                 // Why is this not `ApiError::NotFound`?
     270              :                 // Because we must be careful to never return 404 for a tenant if it does
     271              :                 // in fact exist locally. If we did, the caller could draw the conclusion
     272              :                 // that it can attach the tenant to another PS and we'd be in split-brain.
     273            0 :                 ApiError::ResourceUnavailable("Tenant not yet active".into())
     274              :             }
     275            0 :             GetTenantError::MapState(e) => ApiError::ResourceUnavailable(format!("{e}").into()),
     276              :         }
     277            0 :     }
     278              : }
     279              : 
     280              : impl From<GetTimelineError> for ApiError {
     281            0 :     fn from(gte: GetTimelineError) -> Self {
     282            0 :         // Rationale: tenant is activated only after eligble timelines activate
     283            0 :         ApiError::NotFound(gte.into())
     284            0 :     }
     285              : }
     286              : 
     287              : impl From<GetActiveTenantError> for ApiError {
     288            0 :     fn from(e: GetActiveTenantError) -> ApiError {
     289            0 :         match e {
     290            0 :             GetActiveTenantError::Broken(reason) => {
     291            0 :                 ApiError::InternalServerError(anyhow!("tenant is broken: {}", reason))
     292              :             }
     293            0 :             GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{}", e)),
     294            0 :             GetActiveTenantError::Cancelled => ApiError::ShuttingDown,
     295            0 :             GetActiveTenantError::NotFound(gte) => gte.into(),
     296              :             GetActiveTenantError::WaitForActiveTimeout { .. } => {
     297            0 :                 ApiError::ResourceUnavailable(format!("{}", e).into())
     298              :             }
     299              :             GetActiveTenantError::SwitchedTenant => {
     300              :                 // in our HTTP handlers, this error doesn't happen
     301              :                 // TODO: separate error types
     302            0 :                 ApiError::ResourceUnavailable("switched tenant".into())
     303              :             }
     304              :         }
     305            0 :     }
     306              : }
     307              : 
     308              : impl From<crate::tenant::DeleteTimelineError> for ApiError {
     309            0 :     fn from(value: crate::tenant::DeleteTimelineError) -> Self {
     310            0 :         use crate::tenant::DeleteTimelineError::*;
     311            0 :         match value {
     312            0 :             NotFound => ApiError::NotFound(anyhow::anyhow!("timeline not found").into()),
     313            0 :             HasChildren(children) => ApiError::PreconditionFailed(
     314            0 :                 format!("Cannot delete timeline which has child timelines: {children:?}")
     315            0 :                     .into_boxed_str(),
     316            0 :             ),
     317            0 :             a @ AlreadyInProgress(_) => ApiError::Conflict(a.to_string()),
     318            0 :             Other(e) => ApiError::InternalServerError(e),
     319              :         }
     320            0 :     }
     321              : }
     322              : 
     323              : impl From<crate::tenant::mgr::DeleteTimelineError> for ApiError {
     324            0 :     fn from(value: crate::tenant::mgr::DeleteTimelineError) -> Self {
     325              :         use crate::tenant::mgr::DeleteTimelineError::*;
     326            0 :         match value {
     327              :             // Report Precondition failed so client can distinguish between
     328              :             // "tenant is missing" case from "timeline is missing"
     329            0 :             Tenant(GetTenantError::NotFound(..)) => ApiError::PreconditionFailed(
     330            0 :                 "Requested tenant is missing".to_owned().into_boxed_str(),
     331            0 :             ),
     332            0 :             Tenant(t) => ApiError::from(t),
     333            0 :             Timeline(t) => ApiError::from(t),
     334              :         }
     335            0 :     }
     336              : }
     337              : 
     338              : impl From<crate::tenant::mgr::DeleteTenantError> for ApiError {
     339            0 :     fn from(value: crate::tenant::mgr::DeleteTenantError) -> Self {
     340            0 :         use crate::tenant::mgr::DeleteTenantError::*;
     341            0 :         match value {
     342            0 :             SlotError(e) => e.into(),
     343            0 :             Other(o) => ApiError::InternalServerError(o),
     344            0 :             Cancelled => ApiError::ShuttingDown,
     345              :         }
     346            0 :     }
     347              : }
     348              : 
     349              : // Helper function to construct a TimelineInfo struct for a timeline
     350            0 : async fn build_timeline_info(
     351            0 :     timeline: &Arc<Timeline>,
     352            0 :     include_non_incremental_logical_size: bool,
     353            0 :     force_await_initial_logical_size: bool,
     354            0 :     ctx: &RequestContext,
     355            0 : ) -> anyhow::Result<TimelineInfo> {
     356            0 :     crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id();
     357            0 : 
     358            0 :     if force_await_initial_logical_size {
     359            0 :         timeline.clone().await_initial_logical_size().await
     360            0 :     }
     361              : 
     362            0 :     let mut info = build_timeline_info_common(
     363            0 :         timeline,
     364            0 :         ctx,
     365            0 :         tenant::timeline::GetLogicalSizePriority::Background,
     366            0 :     )
     367            0 :     .await?;
     368            0 :     if include_non_incremental_logical_size {
     369              :         // XXX we should be using spawn_ondemand_logical_size_calculation here.
     370              :         // Otherwise, if someone deletes the timeline / detaches the tenant while
     371              :         // we're executing this function, we will outlive the timeline on-disk state.
     372              :         info.current_logical_size_non_incremental = Some(
     373            0 :             timeline
     374            0 :                 .get_current_logical_size_non_incremental(info.last_record_lsn, ctx)
     375            0 :                 .await?,
     376              :         );
     377            0 :     }
     378            0 :     Ok(info)
     379            0 : }
     380              : 
     381            0 : async fn build_timeline_info_common(
     382            0 :     timeline: &Arc<Timeline>,
     383            0 :     ctx: &RequestContext,
     384            0 :     logical_size_task_priority: tenant::timeline::GetLogicalSizePriority,
     385            0 : ) -> anyhow::Result<TimelineInfo> {
     386            0 :     crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id();
     387            0 :     let initdb_lsn = timeline.initdb_lsn;
     388            0 :     let last_record_lsn = timeline.get_last_record_lsn();
     389            0 :     let (wal_source_connstr, last_received_msg_lsn, last_received_msg_ts) = {
     390            0 :         let guard = timeline.last_received_wal.lock().unwrap();
     391            0 :         if let Some(info) = guard.as_ref() {
     392            0 :             (
     393            0 :                 Some(format!("{}", info.wal_source_connconf)), // Password is hidden, but it's for statistics only.
     394            0 :                 Some(info.last_received_msg_lsn),
     395            0 :                 Some(info.last_received_msg_ts),
     396            0 :             )
     397              :         } else {
     398            0 :             (None, None, None)
     399              :         }
     400              :     };
     401              : 
     402            0 :     let ancestor_timeline_id = timeline.get_ancestor_timeline_id();
     403            0 :     let ancestor_lsn = match timeline.get_ancestor_lsn() {
     404            0 :         Lsn(0) => None,
     405            0 :         lsn @ Lsn(_) => Some(lsn),
     406              :     };
     407            0 :     let current_logical_size = timeline.get_current_logical_size(logical_size_task_priority, ctx);
     408            0 :     let current_physical_size = Some(timeline.layer_size_sum().await);
     409            0 :     let state = timeline.current_state();
     410            0 :     let remote_consistent_lsn_projected = timeline
     411            0 :         .get_remote_consistent_lsn_projected()
     412            0 :         .unwrap_or(Lsn(0));
     413            0 :     let remote_consistent_lsn_visible = timeline
     414            0 :         .get_remote_consistent_lsn_visible()
     415            0 :         .unwrap_or(Lsn(0));
     416            0 : 
     417            0 :     let walreceiver_status = timeline.walreceiver_status();
     418            0 : 
     419            0 :     let (pitr_history_size, within_ancestor_pitr) = timeline.get_pitr_history_stats();
     420              : 
     421            0 :     let info = TimelineInfo {
     422            0 :         tenant_id: timeline.tenant_shard_id,
     423            0 :         timeline_id: timeline.timeline_id,
     424            0 :         ancestor_timeline_id,
     425            0 :         ancestor_lsn,
     426            0 :         disk_consistent_lsn: timeline.get_disk_consistent_lsn(),
     427            0 :         remote_consistent_lsn: remote_consistent_lsn_projected,
     428            0 :         remote_consistent_lsn_visible,
     429            0 :         initdb_lsn,
     430            0 :         last_record_lsn,
     431            0 :         prev_record_lsn: Some(timeline.get_prev_record_lsn()),
     432            0 :         latest_gc_cutoff_lsn: *timeline.get_latest_gc_cutoff_lsn(),
     433            0 :         current_logical_size: current_logical_size.size_dont_care_about_accuracy(),
     434            0 :         current_logical_size_is_accurate: match current_logical_size.accuracy() {
     435            0 :             tenant::timeline::logical_size::Accuracy::Approximate => false,
     436            0 :             tenant::timeline::logical_size::Accuracy::Exact => true,
     437              :         },
     438            0 :         directory_entries_counts: timeline.get_directory_metrics().to_vec(),
     439            0 :         current_physical_size,
     440            0 :         current_logical_size_non_incremental: None,
     441            0 :         pitr_history_size,
     442            0 :         within_ancestor_pitr,
     443            0 :         timeline_dir_layer_file_size_sum: None,
     444            0 :         wal_source_connstr,
     445            0 :         last_received_msg_lsn,
     446            0 :         last_received_msg_ts,
     447            0 :         pg_version: timeline.pg_version,
     448            0 : 
     449            0 :         state,
     450            0 : 
     451            0 :         walreceiver_status,
     452            0 : 
     453            0 :         last_aux_file_policy: timeline.last_aux_file_policy.load(),
     454            0 :     };
     455            0 :     Ok(info)
     456            0 : }
     457              : 
     458              : // healthcheck handler
     459            0 : async fn status_handler(
     460            0 :     request: Request<Body>,
     461            0 :     _cancel: CancellationToken,
     462            0 : ) -> Result<Response<Body>, ApiError> {
     463            0 :     check_permission(&request, None)?;
     464            0 :     let config = get_config(&request);
     465            0 :     json_response(StatusCode::OK, StatusResponse { id: config.id })
     466            0 : }
     467              : 
     468            0 : async fn reload_auth_validation_keys_handler(
     469            0 :     request: Request<Body>,
     470            0 :     _cancel: CancellationToken,
     471            0 : ) -> Result<Response<Body>, ApiError> {
     472            0 :     check_permission(&request, None)?;
     473            0 :     let config = get_config(&request);
     474            0 :     let state = get_state(&request);
     475            0 :     let Some(shared_auth) = &state.auth else {
     476            0 :         return json_response(StatusCode::BAD_REQUEST, ());
     477              :     };
     478              :     // unwrap is ok because check is performed when creating config, so path is set and exists
     479            0 :     let key_path = config.auth_validation_public_key_path.as_ref().unwrap();
     480            0 :     info!("Reloading public key(s) for verifying JWT tokens from {key_path:?}");
     481              : 
     482            0 :     match JwtAuth::from_key_path(key_path) {
     483            0 :         Ok(new_auth) => {
     484            0 :             shared_auth.swap(new_auth);
     485            0 :             json_response(StatusCode::OK, ())
     486              :         }
     487            0 :         Err(e) => {
     488            0 :             let err_msg = "Error reloading public keys";
     489            0 :             warn!("Error reloading public keys from {key_path:?}: {e:}");
     490            0 :             json_response(
     491            0 :                 StatusCode::INTERNAL_SERVER_ERROR,
     492            0 :                 HttpErrorBody::from_msg(err_msg.to_string()),
     493            0 :             )
     494              :         }
     495              :     }
     496            0 : }
     497              : 
     498            0 : async fn timeline_create_handler(
     499            0 :     mut request: Request<Body>,
     500            0 :     _cancel: CancellationToken,
     501            0 : ) -> Result<Response<Body>, ApiError> {
     502            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     503            0 :     let request_data: TimelineCreateRequest = json_request(&mut request).await?;
     504            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     505              : 
     506            0 :     let new_timeline_id = request_data.new_timeline_id;
     507            0 : 
     508            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Error);
     509            0 : 
     510            0 :     let state = get_state(&request);
     511              : 
     512            0 :     async {
     513            0 :         let tenant = state
     514            0 :             .tenant_manager
     515            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     516              : 
     517            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
     518              : 
     519            0 :         if let Some(ancestor_id) = request_data.ancestor_timeline_id.as_ref() {
     520            0 :             tracing::info!(%ancestor_id, "starting to branch");
     521              :         } else {
     522            0 :             tracing::info!("bootstrapping");
     523              :         }
     524              : 
     525            0 :         match tenant
     526            0 :             .create_timeline(
     527            0 :                 new_timeline_id,
     528            0 :                 request_data.ancestor_timeline_id,
     529            0 :                 request_data.ancestor_start_lsn,
     530            0 :                 request_data.pg_version.unwrap_or(crate::DEFAULT_PG_VERSION),
     531            0 :                 request_data.existing_initdb_timeline_id,
     532            0 :                 state.broker_client.clone(),
     533            0 :                 &ctx,
     534            0 :             )
     535            0 :             .await
     536              :         {
     537            0 :             Ok(new_timeline) => {
     538              :                 // Created. Construct a TimelineInfo for it.
     539            0 :                 let timeline_info = build_timeline_info_common(
     540            0 :                     &new_timeline,
     541            0 :                     &ctx,
     542            0 :                     tenant::timeline::GetLogicalSizePriority::User,
     543            0 :                 )
     544            0 :                 .await
     545            0 :                 .map_err(ApiError::InternalServerError)?;
     546            0 :                 json_response(StatusCode::CREATED, timeline_info)
     547              :             }
     548            0 :             Err(_) if tenant.cancel.is_cancelled() => {
     549            0 :                 // In case we get some ugly error type during shutdown, cast it into a clean 503.
     550            0 :                 json_response(
     551            0 :                     StatusCode::SERVICE_UNAVAILABLE,
     552            0 :                     HttpErrorBody::from_msg("Tenant shutting down".to_string()),
     553            0 :                 )
     554              :             }
     555            0 :             Err(e @ tenant::CreateTimelineError::Conflict) => {
     556            0 :                 json_response(StatusCode::CONFLICT, HttpErrorBody::from_msg(e.to_string()))
     557              :             }
     558            0 :             Err(e @ tenant::CreateTimelineError::AlreadyCreating) => json_response(
     559            0 :                 StatusCode::TOO_MANY_REQUESTS,
     560            0 :                 HttpErrorBody::from_msg(e.to_string()),
     561            0 :             ),
     562            0 :             Err(tenant::CreateTimelineError::AncestorLsn(err)) => json_response(
     563            0 :                 StatusCode::NOT_ACCEPTABLE,
     564            0 :                 HttpErrorBody::from_msg(format!("{err:#}")),
     565            0 :             ),
     566            0 :             Err(e @ tenant::CreateTimelineError::AncestorNotActive) => json_response(
     567            0 :                 StatusCode::SERVICE_UNAVAILABLE,
     568            0 :                 HttpErrorBody::from_msg(e.to_string()),
     569            0 :             ),
     570            0 :             Err(tenant::CreateTimelineError::ShuttingDown) => json_response(
     571            0 :                 StatusCode::SERVICE_UNAVAILABLE,
     572            0 :                 HttpErrorBody::from_msg("tenant shutting down".to_string()),
     573            0 :             ),
     574            0 :             Err(tenant::CreateTimelineError::Other(err)) => Err(ApiError::InternalServerError(err)),
     575              :         }
     576            0 :     }
     577            0 :     .instrument(info_span!("timeline_create",
     578              :         tenant_id = %tenant_shard_id.tenant_id,
     579            0 :         shard_id = %tenant_shard_id.shard_slug(),
     580              :         timeline_id = %new_timeline_id,
     581              :         lsn=?request_data.ancestor_start_lsn,
     582              :         pg_version=?request_data.pg_version
     583              :     ))
     584            0 :     .await
     585            0 : }
     586              : 
     587            0 : async fn timeline_list_handler(
     588            0 :     request: Request<Body>,
     589            0 :     _cancel: CancellationToken,
     590            0 : ) -> Result<Response<Body>, ApiError> {
     591            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     592            0 :     let include_non_incremental_logical_size: Option<bool> =
     593            0 :         parse_query_param(&request, "include-non-incremental-logical-size")?;
     594            0 :     let force_await_initial_logical_size: Option<bool> =
     595            0 :         parse_query_param(&request, "force-await-initial-logical-size")?;
     596            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     597              : 
     598            0 :     let state = get_state(&request);
     599            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
     600              : 
     601            0 :     let response_data = async {
     602            0 :         let tenant = state
     603            0 :             .tenant_manager
     604            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     605              : 
     606            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
     607              : 
     608            0 :         let timelines = tenant.list_timelines();
     609            0 : 
     610            0 :         let mut response_data = Vec::with_capacity(timelines.len());
     611            0 :         for timeline in timelines {
     612            0 :             let timeline_info = build_timeline_info(
     613            0 :                 &timeline,
     614            0 :                 include_non_incremental_logical_size.unwrap_or(false),
     615            0 :                 force_await_initial_logical_size.unwrap_or(false),
     616            0 :                 &ctx,
     617            0 :             )
     618            0 :             .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id))
     619            0 :             .await
     620            0 :             .context("Failed to convert tenant timeline {timeline_id} into the local one: {e:?}")
     621            0 :             .map_err(ApiError::InternalServerError)?;
     622              : 
     623            0 :             response_data.push(timeline_info);
     624              :         }
     625            0 :         Ok::<Vec<TimelineInfo>, ApiError>(response_data)
     626            0 :     }
     627            0 :     .instrument(info_span!("timeline_list",
     628              :                 tenant_id = %tenant_shard_id.tenant_id,
     629            0 :                 shard_id = %tenant_shard_id.shard_slug()))
     630            0 :     .await?;
     631              : 
     632            0 :     json_response(StatusCode::OK, response_data)
     633            0 : }
     634              : 
     635            0 : async fn timeline_preserve_initdb_handler(
     636            0 :     request: Request<Body>,
     637            0 :     _cancel: CancellationToken,
     638            0 : ) -> Result<Response<Body>, ApiError> {
     639            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     640            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
     641            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     642            0 :     let state = get_state(&request);
     643              : 
     644              :     // Part of the process for disaster recovery from safekeeper-stored WAL:
     645              :     // If we don't recover into a new timeline but want to keep the timeline ID,
     646              :     // then the initdb archive is deleted. This endpoint copies it to a different
     647              :     // location where timeline recreation cand find it.
     648              : 
     649            0 :     async {
     650            0 :         let tenant = state
     651            0 :             .tenant_manager
     652            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     653              : 
     654            0 :         let timeline = tenant.get_timeline(timeline_id, false)?;
     655              : 
     656            0 :         timeline
     657            0 :             .preserve_initdb_archive()
     658            0 :             .await
     659            0 :             .context("preserving initdb archive")
     660            0 :             .map_err(ApiError::InternalServerError)?;
     661              : 
     662            0 :         Ok::<_, ApiError>(())
     663            0 :     }
     664            0 :     .instrument(info_span!("timeline_preserve_initdb_archive",
     665              :                 tenant_id = %tenant_shard_id.tenant_id,
     666            0 :                 shard_id = %tenant_shard_id.shard_slug(),
     667              :                 %timeline_id))
     668            0 :     .await?;
     669              : 
     670            0 :     json_response(StatusCode::OK, ())
     671            0 : }
     672              : 
     673            0 : async fn timeline_archival_config_handler(
     674            0 :     mut request: Request<Body>,
     675            0 :     _cancel: CancellationToken,
     676            0 : ) -> Result<Response<Body>, ApiError> {
     677            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     678            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
     679              : 
     680            0 :     let request_data: TimelineArchivalConfigRequest = json_request(&mut request).await?;
     681            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     682            0 :     let state = get_state(&request);
     683              : 
     684            0 :     async {
     685            0 :         let tenant = state
     686            0 :             .tenant_manager
     687            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     688              : 
     689            0 :         tenant
     690            0 :             .apply_timeline_archival_config(timeline_id, request_data.state)
     691            0 :             .await
     692            0 :             .context("applying archival config")
     693            0 :             .map_err(ApiError::InternalServerError)?;
     694            0 :         Ok::<_, ApiError>(())
     695            0 :     }
     696            0 :     .instrument(info_span!("timeline_archival_config",
     697              :                 tenant_id = %tenant_shard_id.tenant_id,
     698            0 :                 shard_id = %tenant_shard_id.shard_slug(),
     699              :                 state = ?request_data.state,
     700              :                 %timeline_id))
     701            0 :     .await?;
     702              : 
     703            0 :     json_response(StatusCode::OK, ())
     704            0 : }
     705              : 
     706            0 : async fn timeline_detail_handler(
     707            0 :     request: Request<Body>,
     708            0 :     _cancel: CancellationToken,
     709            0 : ) -> Result<Response<Body>, ApiError> {
     710            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     711            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
     712            0 :     let include_non_incremental_logical_size: Option<bool> =
     713            0 :         parse_query_param(&request, "include-non-incremental-logical-size")?;
     714            0 :     let force_await_initial_logical_size: Option<bool> =
     715            0 :         parse_query_param(&request, "force-await-initial-logical-size")?;
     716            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     717              : 
     718              :     // Logical size calculation needs downloading.
     719            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
     720            0 :     let state = get_state(&request);
     721              : 
     722            0 :     let timeline_info = async {
     723            0 :         let tenant = state
     724            0 :             .tenant_manager
     725            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     726              : 
     727            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
     728              : 
     729            0 :         let timeline = tenant.get_timeline(timeline_id, false)?;
     730              : 
     731            0 :         let timeline_info = build_timeline_info(
     732            0 :             &timeline,
     733            0 :             include_non_incremental_logical_size.unwrap_or(false),
     734            0 :             force_await_initial_logical_size.unwrap_or(false),
     735            0 :             &ctx,
     736            0 :         )
     737            0 :         .await
     738            0 :         .context("get local timeline info")
     739            0 :         .map_err(ApiError::InternalServerError)?;
     740              : 
     741            0 :         Ok::<_, ApiError>(timeline_info)
     742            0 :     }
     743            0 :     .instrument(info_span!("timeline_detail",
     744              :                 tenant_id = %tenant_shard_id.tenant_id,
     745            0 :                 shard_id = %tenant_shard_id.shard_slug(),
     746              :                 %timeline_id))
     747            0 :     .await?;
     748              : 
     749            0 :     json_response(StatusCode::OK, timeline_info)
     750            0 : }
     751              : 
     752            0 : async fn get_lsn_by_timestamp_handler(
     753            0 :     request: Request<Body>,
     754            0 :     cancel: CancellationToken,
     755            0 : ) -> Result<Response<Body>, ApiError> {
     756            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     757            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     758            0 :     let state = get_state(&request);
     759            0 : 
     760            0 :     if !tenant_shard_id.is_shard_zero() {
     761              :         // Requires SLRU contents, which are only stored on shard zero
     762            0 :         return Err(ApiError::BadRequest(anyhow!(
     763            0 :             "Size calculations are only available on shard zero"
     764            0 :         )));
     765            0 :     }
     766              : 
     767            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
     768            0 :     let timestamp_raw = must_get_query_param(&request, "timestamp")?;
     769            0 :     let timestamp = humantime::parse_rfc3339(&timestamp_raw)
     770            0 :         .with_context(|| format!("Invalid time: {:?}", timestamp_raw))
     771            0 :         .map_err(ApiError::BadRequest)?;
     772            0 :     let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp);
     773              : 
     774            0 :     let with_lease = parse_query_param(&request, "with_lease")?.unwrap_or(false);
     775            0 : 
     776            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
     777              : 
     778            0 :     let timeline =
     779            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
     780            0 :             .await?;
     781            0 :     let result = timeline
     782            0 :         .find_lsn_for_timestamp(timestamp_pg, &cancel, &ctx)
     783            0 :         .await?;
     784              : 
     785              :     #[derive(serde::Serialize, Debug)]
     786              :     struct Result {
     787              :         lsn: Lsn,
     788              :         kind: &'static str,
     789              :         #[serde(default)]
     790              :         #[serde(skip_serializing_if = "Option::is_none")]
     791              :         #[serde(flatten)]
     792              :         lease: Option<LsnLease>,
     793              :     }
     794            0 :     let (lsn, kind) = match result {
     795            0 :         LsnForTimestamp::Present(lsn) => (lsn, "present"),
     796            0 :         LsnForTimestamp::Future(lsn) => (lsn, "future"),
     797            0 :         LsnForTimestamp::Past(lsn) => (lsn, "past"),
     798            0 :         LsnForTimestamp::NoData(lsn) => (lsn, "nodata"),
     799              :     };
     800              : 
     801            0 :     let lease = if with_lease {
     802            0 :         timeline
     803            0 :             .make_lsn_lease(lsn, timeline.get_lsn_lease_length_for_ts(), &ctx)
     804            0 :             .inspect_err(|_| {
     805            0 :                 warn!("fail to grant a lease to {}", lsn);
     806            0 :             })
     807            0 :             .ok()
     808              :     } else {
     809            0 :         None
     810              :     };
     811              : 
     812            0 :     let result = Result { lsn, kind, lease };
     813            0 :     let valid_until = result
     814            0 :         .lease
     815            0 :         .as_ref()
     816            0 :         .map(|l| humantime::format_rfc3339_millis(l.valid_until).to_string());
     817            0 :     tracing::info!(
     818              :         lsn=?result.lsn,
     819              :         kind=%result.kind,
     820              :         timestamp=%timestamp_raw,
     821              :         valid_until=?valid_until,
     822            0 :         "lsn_by_timestamp finished"
     823              :     );
     824            0 :     json_response(StatusCode::OK, result)
     825            0 : }
     826              : 
     827            0 : async fn get_timestamp_of_lsn_handler(
     828            0 :     request: Request<Body>,
     829            0 :     _cancel: CancellationToken,
     830            0 : ) -> Result<Response<Body>, ApiError> {
     831            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     832            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     833            0 :     let state = get_state(&request);
     834            0 : 
     835            0 :     if !tenant_shard_id.is_shard_zero() {
     836              :         // Requires SLRU contents, which are only stored on shard zero
     837            0 :         return Err(ApiError::BadRequest(anyhow!(
     838            0 :             "Size calculations are only available on shard zero"
     839            0 :         )));
     840            0 :     }
     841              : 
     842            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
     843              : 
     844            0 :     let lsn_str = must_get_query_param(&request, "lsn")?;
     845            0 :     let lsn = Lsn::from_str(&lsn_str)
     846            0 :         .with_context(|| format!("Invalid LSN: {lsn_str:?}"))
     847            0 :         .map_err(ApiError::BadRequest)?;
     848              : 
     849            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
     850            0 :     let timeline =
     851            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
     852            0 :             .await?;
     853            0 :     let result = timeline.get_timestamp_for_lsn(lsn, &ctx).await?;
     854              : 
     855            0 :     match result {
     856            0 :         Some(time) => {
     857            0 :             let time = format_rfc3339(postgres_ffi::from_pg_timestamp(time)).to_string();
     858            0 :             json_response(StatusCode::OK, time)
     859              :         }
     860            0 :         None => Err(ApiError::NotFound(
     861            0 :             anyhow::anyhow!("Timestamp for lsn {} not found", lsn).into(),
     862            0 :         )),
     863              :     }
     864            0 : }
     865              : 
     866            0 : async fn timeline_delete_handler(
     867            0 :     request: Request<Body>,
     868            0 :     _cancel: CancellationToken,
     869            0 : ) -> Result<Response<Body>, ApiError> {
     870            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     871            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
     872            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     873              : 
     874            0 :     let state = get_state(&request);
     875              : 
     876            0 :     let tenant = state
     877            0 :         .tenant_manager
     878            0 :         .get_attached_tenant_shard(tenant_shard_id)
     879            0 :         .map_err(|e| {
     880            0 :             match e {
     881              :                 // GetTenantError has a built-in conversion to ApiError, but in this context we don't
     882              :                 // want to treat missing tenants as 404, to avoid ambiguity with successful deletions.
     883            0 :                 GetTenantError::NotFound(_) => ApiError::PreconditionFailed(
     884            0 :                     "Requested tenant is missing".to_string().into_boxed_str(),
     885            0 :                 ),
     886            0 :                 e => e.into(),
     887              :             }
     888            0 :         })?;
     889            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
     890            0 :     tenant.delete_timeline(timeline_id).instrument(info_span!("timeline_delete", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id))
     891            0 :         .await?;
     892              : 
     893            0 :     json_response(StatusCode::ACCEPTED, ())
     894            0 : }
     895              : 
     896            0 : async fn tenant_reset_handler(
     897            0 :     request: Request<Body>,
     898            0 :     _cancel: CancellationToken,
     899            0 : ) -> Result<Response<Body>, ApiError> {
     900            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     901            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     902              : 
     903            0 :     let drop_cache: Option<bool> = parse_query_param(&request, "drop_cache")?;
     904              : 
     905            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
     906            0 :     let state = get_state(&request);
     907            0 :     state
     908            0 :         .tenant_manager
     909            0 :         .reset_tenant(tenant_shard_id, drop_cache.unwrap_or(false), &ctx)
     910            0 :         .await
     911            0 :         .map_err(ApiError::InternalServerError)?;
     912              : 
     913            0 :     json_response(StatusCode::OK, ())
     914            0 : }
     915              : 
     916            0 : async fn tenant_list_handler(
     917            0 :     request: Request<Body>,
     918            0 :     _cancel: CancellationToken,
     919            0 : ) -> Result<Response<Body>, ApiError> {
     920            0 :     check_permission(&request, None)?;
     921            0 :     let state = get_state(&request);
     922              : 
     923            0 :     let response_data = state
     924            0 :         .tenant_manager
     925            0 :         .list_tenants()
     926            0 :         .map_err(|_| {
     927            0 :             ApiError::ResourceUnavailable("Tenant map is initializing or shutting down".into())
     928            0 :         })?
     929            0 :         .iter()
     930            0 :         .map(|(id, state, gen)| TenantInfo {
     931            0 :             id: *id,
     932            0 :             state: state.clone(),
     933            0 :             current_physical_size: None,
     934            0 :             attachment_status: state.attachment_status(),
     935            0 :             generation: (*gen)
     936            0 :                 .into()
     937            0 :                 .expect("Tenants are always attached with a generation"),
     938            0 :         })
     939            0 :         .collect::<Vec<TenantInfo>>();
     940            0 : 
     941            0 :     json_response(StatusCode::OK, response_data)
     942            0 : }
     943              : 
     944            0 : async fn tenant_status(
     945            0 :     request: Request<Body>,
     946            0 :     _cancel: CancellationToken,
     947            0 : ) -> Result<Response<Body>, ApiError> {
     948            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     949            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     950            0 :     let state = get_state(&request);
     951            0 : 
     952            0 :     // In tests, sometimes we want to query the state of a tenant without auto-activating it if it's currently waiting.
     953            0 :     let activate = true;
     954              :     #[cfg(feature = "testing")]
     955            0 :     let activate = parse_query_param(&request, "activate")?.unwrap_or(activate);
     956              : 
     957            0 :     let tenant_info = async {
     958            0 :         let tenant = state
     959            0 :             .tenant_manager
     960            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     961              : 
     962            0 :         if activate {
     963              :             // This is advisory: we prefer to let the tenant activate on-demand when this function is
     964              :             // called, but it is still valid to return 200 and describe the current state of the tenant
     965              :             // if it doesn't make it into an active state.
     966            0 :             tenant
     967            0 :                 .wait_to_become_active(ACTIVE_TENANT_TIMEOUT)
     968            0 :                 .await
     969            0 :                 .ok();
     970            0 :         }
     971              : 
     972              :         // Calculate total physical size of all timelines
     973            0 :         let mut current_physical_size = 0;
     974            0 :         for timeline in tenant.list_timelines().iter() {
     975            0 :             current_physical_size += timeline.layer_size_sum().await;
     976              :         }
     977              : 
     978            0 :         let state = tenant.current_state();
     979            0 :         Result::<_, ApiError>::Ok(TenantDetails {
     980            0 :             tenant_info: TenantInfo {
     981            0 :                 id: tenant_shard_id,
     982            0 :                 state: state.clone(),
     983            0 :                 current_physical_size: Some(current_physical_size),
     984            0 :                 attachment_status: state.attachment_status(),
     985            0 :                 generation: tenant
     986            0 :                     .generation()
     987            0 :                     .into()
     988            0 :                     .expect("Tenants are always attached with a generation"),
     989            0 :             },
     990            0 :             walredo: tenant.wal_redo_manager_status(),
     991            0 :             timelines: tenant.list_timeline_ids(),
     992            0 :         })
     993            0 :     }
     994            0 :     .instrument(info_span!("tenant_status_handler",
     995              :                 tenant_id = %tenant_shard_id.tenant_id,
     996            0 :                 shard_id = %tenant_shard_id.shard_slug()))
     997            0 :     .await?;
     998              : 
     999            0 :     json_response(StatusCode::OK, tenant_info)
    1000            0 : }
    1001              : 
    1002            0 : async fn tenant_delete_handler(
    1003            0 :     request: Request<Body>,
    1004            0 :     _cancel: CancellationToken,
    1005            0 : ) -> Result<Response<Body>, ApiError> {
    1006              :     // TODO openapi spec
    1007            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1008            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1009              : 
    1010            0 :     let state = get_state(&request);
    1011            0 : 
    1012            0 :     state
    1013            0 :         .tenant_manager
    1014            0 :         .delete_tenant(tenant_shard_id)
    1015            0 :         .instrument(info_span!("tenant_delete_handler",
    1016              :             tenant_id = %tenant_shard_id.tenant_id,
    1017            0 :             shard_id = %tenant_shard_id.shard_slug()
    1018              :         ))
    1019            0 :         .await?;
    1020              : 
    1021            0 :     json_response(StatusCode::OK, ())
    1022            0 : }
    1023              : 
    1024              : /// HTTP endpoint to query the current tenant_size of a tenant.
    1025              : ///
    1026              : /// This is not used by consumption metrics under [`crate::consumption_metrics`], but can be used
    1027              : /// to debug any of the calculations. Requires `tenant_id` request parameter, supports
    1028              : /// `inputs_only=true|false` (default false) which supports debugging failure to calculate model
    1029              : /// values.
    1030              : ///
    1031              : /// 'retention_period' query parameter overrides the cutoff that is used to calculate the size
    1032              : /// (only if it is shorter than the real cutoff).
    1033              : ///
    1034              : /// Note: we don't update the cached size and prometheus metric here.
    1035              : /// The retention period might be different, and it's nice to have a method to just calculate it
    1036              : /// without modifying anything anyway.
    1037            0 : async fn tenant_size_handler(
    1038            0 :     request: Request<Body>,
    1039            0 :     cancel: CancellationToken,
    1040            0 : ) -> Result<Response<Body>, ApiError> {
    1041            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1042            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1043            0 :     let inputs_only: Option<bool> = parse_query_param(&request, "inputs_only")?;
    1044            0 :     let retention_period: Option<u64> = parse_query_param(&request, "retention_period")?;
    1045            0 :     let headers = request.headers();
    1046            0 :     let state = get_state(&request);
    1047            0 : 
    1048            0 :     if !tenant_shard_id.is_shard_zero() {
    1049            0 :         return Err(ApiError::BadRequest(anyhow!(
    1050            0 :             "Size calculations are only available on shard zero"
    1051            0 :         )));
    1052            0 :     }
    1053            0 : 
    1054            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    1055            0 :     let tenant = state
    1056            0 :         .tenant_manager
    1057            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    1058            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1059              : 
    1060              :     // this can be long operation
    1061            0 :     let inputs = tenant
    1062            0 :         .gather_size_inputs(
    1063            0 :             retention_period,
    1064            0 :             LogicalSizeCalculationCause::TenantSizeHandler,
    1065            0 :             &cancel,
    1066            0 :             &ctx,
    1067            0 :         )
    1068            0 :         .await
    1069            0 :         .map_err(|e| match e {
    1070            0 :             crate::tenant::size::CalculateSyntheticSizeError::Cancelled => ApiError::ShuttingDown,
    1071            0 :             other => ApiError::InternalServerError(anyhow::anyhow!(other)),
    1072            0 :         })?;
    1073              : 
    1074            0 :     let mut sizes = None;
    1075            0 :     let accepts_html = headers
    1076            0 :         .get(header::ACCEPT)
    1077            0 :         .map(|v| v == "text/html")
    1078            0 :         .unwrap_or_default();
    1079            0 :     if !inputs_only.unwrap_or(false) {
    1080            0 :         let storage_model = inputs.calculate_model();
    1081            0 :         let size = storage_model.calculate();
    1082            0 : 
    1083            0 :         // If request header expects html, return html
    1084            0 :         if accepts_html {
    1085            0 :             return synthetic_size_html_response(inputs, storage_model, size);
    1086            0 :         }
    1087            0 :         sizes = Some(size);
    1088            0 :     } else if accepts_html {
    1089            0 :         return Err(ApiError::BadRequest(anyhow!(
    1090            0 :             "inputs_only parameter is incompatible with html output request"
    1091            0 :         )));
    1092            0 :     }
    1093              : 
    1094              :     /// The type resides in the pageserver not to expose `ModelInputs`.
    1095              :     #[derive(serde::Serialize)]
    1096              :     struct TenantHistorySize {
    1097              :         id: TenantId,
    1098              :         /// Size is a mixture of WAL and logical size, so the unit is bytes.
    1099              :         ///
    1100              :         /// Will be none if `?inputs_only=true` was given.
    1101              :         size: Option<u64>,
    1102              :         /// Size of each segment used in the model.
    1103              :         /// Will be null if `?inputs_only=true` was given.
    1104              :         segment_sizes: Option<Vec<tenant_size_model::SegmentSizeResult>>,
    1105              :         inputs: crate::tenant::size::ModelInputs,
    1106              :     }
    1107              : 
    1108            0 :     json_response(
    1109            0 :         StatusCode::OK,
    1110            0 :         TenantHistorySize {
    1111            0 :             id: tenant_shard_id.tenant_id,
    1112            0 :             size: sizes.as_ref().map(|x| x.total_size),
    1113            0 :             segment_sizes: sizes.map(|x| x.segments),
    1114            0 :             inputs,
    1115            0 :         },
    1116            0 :     )
    1117            0 : }
    1118              : 
    1119            0 : async fn tenant_shard_split_handler(
    1120            0 :     mut request: Request<Body>,
    1121            0 :     _cancel: CancellationToken,
    1122            0 : ) -> Result<Response<Body>, ApiError> {
    1123            0 :     let req: TenantShardSplitRequest = json_request(&mut request).await?;
    1124              : 
    1125            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1126            0 :     let state = get_state(&request);
    1127            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
    1128              : 
    1129            0 :     let tenant = state
    1130            0 :         .tenant_manager
    1131            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    1132            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1133              : 
    1134            0 :     let new_shards = state
    1135            0 :         .tenant_manager
    1136            0 :         .shard_split(
    1137            0 :             tenant,
    1138            0 :             ShardCount::new(req.new_shard_count),
    1139            0 :             req.new_stripe_size,
    1140            0 :             &ctx,
    1141            0 :         )
    1142            0 :         .await
    1143            0 :         .map_err(ApiError::InternalServerError)?;
    1144              : 
    1145            0 :     json_response(StatusCode::OK, TenantShardSplitResponse { new_shards })
    1146            0 : }
    1147              : 
    1148            0 : async fn layer_map_info_handler(
    1149            0 :     request: Request<Body>,
    1150            0 :     _cancel: CancellationToken,
    1151            0 : ) -> Result<Response<Body>, ApiError> {
    1152            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1153            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1154            0 :     let reset: LayerAccessStatsReset =
    1155            0 :         parse_query_param(&request, "reset")?.unwrap_or(LayerAccessStatsReset::NoReset);
    1156            0 :     let state = get_state(&request);
    1157            0 : 
    1158            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1159              : 
    1160            0 :     let timeline =
    1161            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1162            0 :             .await?;
    1163            0 :     let layer_map_info = timeline.layer_map_info(reset).await;
    1164              : 
    1165            0 :     json_response(StatusCode::OK, layer_map_info)
    1166            0 : }
    1167              : 
    1168            0 : async fn layer_download_handler(
    1169            0 :     request: Request<Body>,
    1170            0 :     _cancel: CancellationToken,
    1171            0 : ) -> Result<Response<Body>, ApiError> {
    1172            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1173            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1174            0 :     let layer_file_name = get_request_param(&request, "layer_file_name")?;
    1175            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1176            0 :     let layer_name = LayerName::from_str(layer_file_name)
    1177            0 :         .map_err(|s| ApiError::BadRequest(anyhow::anyhow!(s)))?;
    1178            0 :     let state = get_state(&request);
    1179              : 
    1180            0 :     let timeline =
    1181            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1182            0 :             .await?;
    1183            0 :     let downloaded = timeline
    1184            0 :         .download_layer(&layer_name)
    1185            0 :         .await
    1186            0 :         .map_err(ApiError::InternalServerError)?;
    1187              : 
    1188            0 :     match downloaded {
    1189            0 :         Some(true) => json_response(StatusCode::OK, ()),
    1190            0 :         Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
    1191            0 :         None => json_response(
    1192            0 :             StatusCode::BAD_REQUEST,
    1193            0 :             format!("Layer {tenant_shard_id}/{timeline_id}/{layer_file_name} not found"),
    1194            0 :         ),
    1195              :     }
    1196            0 : }
    1197              : 
    1198            0 : async fn evict_timeline_layer_handler(
    1199            0 :     request: Request<Body>,
    1200            0 :     _cancel: CancellationToken,
    1201            0 : ) -> Result<Response<Body>, ApiError> {
    1202            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1203            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1204            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1205            0 :     let layer_file_name = get_request_param(&request, "layer_file_name")?;
    1206            0 :     let state = get_state(&request);
    1207              : 
    1208            0 :     let layer_name = LayerName::from_str(layer_file_name)
    1209            0 :         .map_err(|s| ApiError::BadRequest(anyhow::anyhow!(s)))?;
    1210              : 
    1211            0 :     let timeline =
    1212            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1213            0 :             .await?;
    1214            0 :     let evicted = timeline
    1215            0 :         .evict_layer(&layer_name)
    1216            0 :         .await
    1217            0 :         .map_err(ApiError::InternalServerError)?;
    1218              : 
    1219            0 :     match evicted {
    1220            0 :         Some(true) => json_response(StatusCode::OK, ()),
    1221            0 :         Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
    1222            0 :         None => json_response(
    1223            0 :             StatusCode::BAD_REQUEST,
    1224            0 :             format!("Layer {tenant_shard_id}/{timeline_id}/{layer_file_name} not found"),
    1225            0 :         ),
    1226              :     }
    1227            0 : }
    1228              : 
    1229              : /// Get tenant_size SVG graph along with the JSON data.
    1230            0 : fn synthetic_size_html_response(
    1231            0 :     inputs: ModelInputs,
    1232            0 :     storage_model: StorageModel,
    1233            0 :     sizes: SizeResult,
    1234            0 : ) -> Result<Response<Body>, ApiError> {
    1235            0 :     let mut timeline_ids: Vec<String> = Vec::new();
    1236            0 :     let mut timeline_map: HashMap<TimelineId, usize> = HashMap::new();
    1237            0 :     for (index, ti) in inputs.timeline_inputs.iter().enumerate() {
    1238            0 :         timeline_map.insert(ti.timeline_id, index);
    1239            0 :         timeline_ids.push(ti.timeline_id.to_string());
    1240            0 :     }
    1241            0 :     let seg_to_branch: Vec<(usize, SvgBranchKind)> = inputs
    1242            0 :         .segments
    1243            0 :         .iter()
    1244            0 :         .map(|seg| {
    1245            0 :             (
    1246            0 :                 *timeline_map.get(&seg.timeline_id).unwrap(),
    1247            0 :                 seg.kind.into(),
    1248            0 :             )
    1249            0 :         })
    1250            0 :         .collect();
    1251              : 
    1252            0 :     let svg =
    1253            0 :         tenant_size_model::svg::draw_svg(&storage_model, &timeline_ids, &seg_to_branch, &sizes)
    1254            0 :             .map_err(ApiError::InternalServerError)?;
    1255              : 
    1256            0 :     let mut response = String::new();
    1257            0 : 
    1258            0 :     use std::fmt::Write;
    1259            0 :     write!(response, "<html>\n<body>\n").unwrap();
    1260            0 :     write!(response, "<div>\n{svg}\n</div>").unwrap();
    1261            0 :     writeln!(response, "Project size: {}", sizes.total_size).unwrap();
    1262            0 :     writeln!(response, "<pre>").unwrap();
    1263            0 :     writeln!(
    1264            0 :         response,
    1265            0 :         "{}",
    1266            0 :         serde_json::to_string_pretty(&inputs).unwrap()
    1267            0 :     )
    1268            0 :     .unwrap();
    1269            0 :     writeln!(
    1270            0 :         response,
    1271            0 :         "{}",
    1272            0 :         serde_json::to_string_pretty(&sizes.segments).unwrap()
    1273            0 :     )
    1274            0 :     .unwrap();
    1275            0 :     writeln!(response, "</pre>").unwrap();
    1276            0 :     write!(response, "</body>\n</html>\n").unwrap();
    1277            0 : 
    1278            0 :     html_response(StatusCode::OK, response)
    1279            0 : }
    1280              : 
    1281            0 : pub fn html_response(status: StatusCode, data: String) -> Result<Response<Body>, ApiError> {
    1282            0 :     let response = Response::builder()
    1283            0 :         .status(status)
    1284            0 :         .header(header::CONTENT_TYPE, "text/html")
    1285            0 :         .body(Body::from(data.as_bytes().to_vec()))
    1286            0 :         .map_err(|e| ApiError::InternalServerError(e.into()))?;
    1287            0 :     Ok(response)
    1288            0 : }
    1289              : 
    1290            0 : async fn get_tenant_config_handler(
    1291            0 :     request: Request<Body>,
    1292            0 :     _cancel: CancellationToken,
    1293            0 : ) -> Result<Response<Body>, ApiError> {
    1294            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1295            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1296            0 :     let state = get_state(&request);
    1297              : 
    1298            0 :     let tenant = state
    1299            0 :         .tenant_manager
    1300            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    1301              : 
    1302            0 :     let response = HashMap::from([
    1303              :         (
    1304              :             "tenant_specific_overrides",
    1305            0 :             serde_json::to_value(tenant.tenant_specific_overrides())
    1306            0 :                 .context("serializing tenant specific overrides")
    1307            0 :                 .map_err(ApiError::InternalServerError)?,
    1308              :         ),
    1309              :         (
    1310            0 :             "effective_config",
    1311            0 :             serde_json::to_value(tenant.effective_config())
    1312            0 :                 .context("serializing effective config")
    1313            0 :                 .map_err(ApiError::InternalServerError)?,
    1314              :         ),
    1315              :     ]);
    1316              : 
    1317            0 :     json_response(StatusCode::OK, response)
    1318            0 : }
    1319              : 
    1320            0 : async fn update_tenant_config_handler(
    1321            0 :     mut request: Request<Body>,
    1322            0 :     _cancel: CancellationToken,
    1323            0 : ) -> Result<Response<Body>, ApiError> {
    1324            0 :     let request_data: TenantConfigRequest = json_request(&mut request).await?;
    1325            0 :     let tenant_id = request_data.tenant_id;
    1326            0 :     check_permission(&request, Some(tenant_id))?;
    1327              : 
    1328            0 :     let new_tenant_conf =
    1329            0 :         TenantConfOpt::try_from(&request_data.config).map_err(ApiError::BadRequest)?;
    1330              : 
    1331            0 :     let state = get_state(&request);
    1332            0 : 
    1333            0 :     let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    1334              : 
    1335            0 :     let tenant = state
    1336            0 :         .tenant_manager
    1337            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    1338            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1339              : 
    1340              :     // This is a legacy API that only operates on attached tenants: the preferred
    1341              :     // API to use is the location_config/ endpoint, which lets the caller provide
    1342              :     // the full LocationConf.
    1343            0 :     let location_conf = LocationConf::attached_single(
    1344            0 :         new_tenant_conf.clone(),
    1345            0 :         tenant.get_generation(),
    1346            0 :         &ShardParameters::default(),
    1347            0 :     );
    1348            0 : 
    1349            0 :     crate::tenant::Tenant::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
    1350            0 :         .await
    1351            0 :         .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
    1352            0 :     tenant.set_new_tenant_config(new_tenant_conf);
    1353            0 : 
    1354            0 :     json_response(StatusCode::OK, ())
    1355            0 : }
    1356              : 
    1357            0 : async fn put_tenant_location_config_handler(
    1358            0 :     mut request: Request<Body>,
    1359            0 :     _cancel: CancellationToken,
    1360            0 : ) -> Result<Response<Body>, ApiError> {
    1361            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1362              : 
    1363            0 :     let request_data: TenantLocationConfigRequest = json_request(&mut request).await?;
    1364            0 :     let flush = parse_query_param(&request, "flush_ms")?.map(Duration::from_millis);
    1365            0 :     let lazy = parse_query_param(&request, "lazy")?.unwrap_or(false);
    1366            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1367              : 
    1368            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
    1369            0 :     let state = get_state(&request);
    1370            0 :     let conf = state.conf;
    1371            0 : 
    1372            0 :     // The `Detached` state is special, it doesn't upsert a tenant, it removes
    1373            0 :     // its local disk content and drops it from memory.
    1374            0 :     if let LocationConfigMode::Detached = request_data.config.mode {
    1375            0 :         if let Err(e) = state
    1376            0 :             .tenant_manager
    1377            0 :             .detach_tenant(conf, tenant_shard_id, &state.deletion_queue_client)
    1378            0 :             .instrument(info_span!("tenant_detach",
    1379              :                 tenant_id = %tenant_shard_id.tenant_id,
    1380            0 :                 shard_id = %tenant_shard_id.shard_slug()
    1381              :             ))
    1382            0 :             .await
    1383              :         {
    1384            0 :             match e {
    1385            0 :                 TenantStateError::SlotError(TenantSlotError::NotFound(_)) => {
    1386            0 :                     // This API is idempotent: a NotFound on a detach is fine.
    1387            0 :                 }
    1388            0 :                 _ => return Err(e.into()),
    1389              :             }
    1390            0 :         }
    1391            0 :         return json_response(StatusCode::OK, ());
    1392            0 :     }
    1393              : 
    1394            0 :     let location_conf =
    1395            0 :         LocationConf::try_from(&request_data.config).map_err(ApiError::BadRequest)?;
    1396              : 
    1397              :     // lazy==true queues up for activation or jumps the queue like normal when a compute connects,
    1398              :     // similar to at startup ordering.
    1399            0 :     let spawn_mode = if lazy {
    1400            0 :         tenant::SpawnMode::Lazy
    1401              :     } else {
    1402            0 :         tenant::SpawnMode::Eager
    1403              :     };
    1404              : 
    1405            0 :     let tenant = state
    1406            0 :         .tenant_manager
    1407            0 :         .upsert_location(tenant_shard_id, location_conf, flush, spawn_mode, &ctx)
    1408            0 :         .await?;
    1409            0 :     let stripe_size = tenant.as_ref().map(|t| t.get_shard_stripe_size());
    1410            0 :     let attached = tenant.is_some();
    1411              : 
    1412            0 :     if let Some(_flush_ms) = flush {
    1413            0 :         match state
    1414            0 :             .secondary_controller
    1415            0 :             .upload_tenant(tenant_shard_id)
    1416            0 :             .await
    1417              :         {
    1418              :             Ok(()) => {
    1419            0 :                 tracing::info!("Uploaded heatmap during flush");
    1420              :             }
    1421            0 :             Err(e) => {
    1422            0 :                 tracing::warn!("Failed to flush heatmap: {e}");
    1423              :             }
    1424              :         }
    1425              :     } else {
    1426            0 :         tracing::info!("No flush requested when configuring");
    1427              :     }
    1428              : 
    1429              :     // This API returns a vector of pageservers where the tenant is attached: this is
    1430              :     // primarily for use in the sharding service.  For compatibilty, we also return this
    1431              :     // when called directly on a pageserver, but the payload is always zero or one shards.
    1432            0 :     let mut response = TenantLocationConfigResponse {
    1433            0 :         shards: Vec::new(),
    1434            0 :         stripe_size: None,
    1435            0 :     };
    1436            0 :     if attached {
    1437            0 :         response.shards.push(TenantShardLocation {
    1438            0 :             shard_id: tenant_shard_id,
    1439            0 :             node_id: state.conf.id,
    1440            0 :         });
    1441            0 :         if tenant_shard_id.shard_count.count() > 1 {
    1442              :             // Stripe size should be set if we are attached
    1443            0 :             debug_assert!(stripe_size.is_some());
    1444            0 :             response.stripe_size = stripe_size;
    1445            0 :         }
    1446            0 :     }
    1447              : 
    1448            0 :     json_response(StatusCode::OK, response)
    1449            0 : }
    1450              : 
    1451            0 : async fn list_location_config_handler(
    1452            0 :     request: Request<Body>,
    1453            0 :     _cancel: CancellationToken,
    1454            0 : ) -> Result<Response<Body>, ApiError> {
    1455            0 :     let state = get_state(&request);
    1456            0 :     let slots = state.tenant_manager.list();
    1457            0 :     let result = LocationConfigListResponse {
    1458            0 :         tenant_shards: slots
    1459            0 :             .into_iter()
    1460            0 :             .map(|(tenant_shard_id, slot)| {
    1461            0 :                 let v = match slot {
    1462            0 :                     TenantSlot::Attached(t) => Some(t.get_location_conf()),
    1463            0 :                     TenantSlot::Secondary(s) => Some(s.get_location_conf()),
    1464            0 :                     TenantSlot::InProgress(_) => None,
    1465              :                 };
    1466            0 :                 (tenant_shard_id, v)
    1467            0 :             })
    1468            0 :             .collect(),
    1469            0 :     };
    1470            0 :     json_response(StatusCode::OK, result)
    1471            0 : }
    1472              : 
    1473            0 : async fn get_location_config_handler(
    1474            0 :     request: Request<Body>,
    1475            0 :     _cancel: CancellationToken,
    1476            0 : ) -> Result<Response<Body>, ApiError> {
    1477            0 :     let state = get_state(&request);
    1478            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1479            0 :     let slot = state.tenant_manager.get(tenant_shard_id);
    1480              : 
    1481            0 :     let Some(slot) = slot else {
    1482            0 :         return Err(ApiError::NotFound(
    1483            0 :             anyhow::anyhow!("Tenant shard not found").into(),
    1484            0 :         ));
    1485              :     };
    1486              : 
    1487            0 :     let result: Option<LocationConfig> = match slot {
    1488            0 :         TenantSlot::Attached(t) => Some(t.get_location_conf()),
    1489            0 :         TenantSlot::Secondary(s) => Some(s.get_location_conf()),
    1490            0 :         TenantSlot::InProgress(_) => None,
    1491              :     };
    1492              : 
    1493            0 :     json_response(StatusCode::OK, result)
    1494            0 : }
    1495              : 
    1496              : // Do a time travel recovery on the given tenant/tenant shard. Tenant needs to be detached
    1497              : // (from all pageservers) as it invalidates consistency assumptions.
    1498            0 : async fn tenant_time_travel_remote_storage_handler(
    1499            0 :     request: Request<Body>,
    1500            0 :     cancel: CancellationToken,
    1501            0 : ) -> Result<Response<Body>, ApiError> {
    1502            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1503              : 
    1504            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1505              : 
    1506            0 :     let timestamp_raw = must_get_query_param(&request, "travel_to")?;
    1507            0 :     let timestamp = humantime::parse_rfc3339(&timestamp_raw)
    1508            0 :         .with_context(|| format!("Invalid time for travel_to: {timestamp_raw:?}"))
    1509            0 :         .map_err(ApiError::BadRequest)?;
    1510              : 
    1511            0 :     let done_if_after_raw = must_get_query_param(&request, "done_if_after")?;
    1512            0 :     let done_if_after = humantime::parse_rfc3339(&done_if_after_raw)
    1513            0 :         .with_context(|| format!("Invalid time for done_if_after: {done_if_after_raw:?}"))
    1514            0 :         .map_err(ApiError::BadRequest)?;
    1515              : 
    1516              :     // This is just a sanity check to fend off naive wrong usages of the API:
    1517              :     // the tenant needs to be detached *everywhere*
    1518            0 :     let state = get_state(&request);
    1519            0 :     let we_manage_tenant = state.tenant_manager.manages_tenant_shard(tenant_shard_id);
    1520            0 :     if we_manage_tenant {
    1521            0 :         return Err(ApiError::BadRequest(anyhow!(
    1522            0 :             "Tenant {tenant_shard_id} is already attached at this pageserver"
    1523            0 :         )));
    1524            0 :     }
    1525            0 : 
    1526            0 :     if timestamp > done_if_after {
    1527            0 :         return Err(ApiError::BadRequest(anyhow!(
    1528            0 :             "The done_if_after timestamp comes before the timestamp to recover to"
    1529            0 :         )));
    1530            0 :     }
    1531            0 : 
    1532            0 :     tracing::info!("Issuing time travel request internally. timestamp={timestamp_raw}, done_if_after={done_if_after_raw}");
    1533              : 
    1534            0 :     remote_timeline_client::upload::time_travel_recover_tenant(
    1535            0 :         &state.remote_storage,
    1536            0 :         &tenant_shard_id,
    1537            0 :         timestamp,
    1538            0 :         done_if_after,
    1539            0 :         &cancel,
    1540            0 :     )
    1541            0 :     .await
    1542            0 :     .map_err(|e| match e {
    1543            0 :         TimeTravelError::BadInput(e) => {
    1544            0 :             warn!("bad input error: {e}");
    1545            0 :             ApiError::BadRequest(anyhow!("bad input error"))
    1546              :         }
    1547              :         TimeTravelError::Unimplemented => {
    1548            0 :             ApiError::BadRequest(anyhow!("unimplemented for the configured remote storage"))
    1549              :         }
    1550            0 :         TimeTravelError::Cancelled => ApiError::InternalServerError(anyhow!("cancelled")),
    1551              :         TimeTravelError::TooManyVersions => {
    1552            0 :             ApiError::InternalServerError(anyhow!("too many versions in remote storage"))
    1553              :         }
    1554            0 :         TimeTravelError::Other(e) => {
    1555            0 :             warn!("internal error: {e}");
    1556            0 :             ApiError::InternalServerError(anyhow!("internal error"))
    1557              :         }
    1558            0 :     })?;
    1559              : 
    1560            0 :     json_response(StatusCode::OK, ())
    1561            0 : }
    1562              : 
    1563              : /// Testing helper to transition a tenant to [`crate::tenant::TenantState::Broken`].
    1564            0 : async fn handle_tenant_break(
    1565            0 :     r: Request<Body>,
    1566            0 :     _cancel: CancellationToken,
    1567            0 : ) -> Result<Response<Body>, ApiError> {
    1568            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&r, "tenant_shard_id")?;
    1569              : 
    1570            0 :     let state = get_state(&r);
    1571            0 :     state
    1572            0 :         .tenant_manager
    1573            0 :         .get_attached_tenant_shard(tenant_shard_id)?
    1574            0 :         .set_broken("broken from test".to_owned())
    1575            0 :         .await;
    1576              : 
    1577            0 :     json_response(StatusCode::OK, ())
    1578            0 : }
    1579              : 
    1580              : // Obtains an lsn lease on the given timeline.
    1581            0 : async fn lsn_lease_handler(
    1582            0 :     mut request: Request<Body>,
    1583            0 :     _cancel: CancellationToken,
    1584            0 : ) -> Result<Response<Body>, ApiError> {
    1585            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1586            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1587            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1588            0 :     let lsn = json_request::<LsnLeaseRequest>(&mut request).await?.lsn;
    1589              : 
    1590            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    1591            0 : 
    1592            0 :     let state = get_state(&request);
    1593              : 
    1594            0 :     let timeline =
    1595            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1596            0 :             .await?;
    1597            0 :     let result = timeline
    1598            0 :         .make_lsn_lease(lsn, timeline.get_lsn_lease_length(), &ctx)
    1599            0 :         .map_err(|e| ApiError::InternalServerError(e.context("lsn lease http handler")))?;
    1600              : 
    1601            0 :     json_response(StatusCode::OK, result)
    1602            0 : }
    1603              : 
    1604              : // Run GC immediately on given timeline.
    1605            0 : async fn timeline_gc_handler(
    1606            0 :     mut request: Request<Body>,
    1607            0 :     cancel: CancellationToken,
    1608            0 : ) -> Result<Response<Body>, ApiError> {
    1609            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1610            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1611            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1612              : 
    1613            0 :     let gc_req: TimelineGcRequest = json_request(&mut request).await?;
    1614              : 
    1615            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    1616            0 :     let gc_result = mgr::immediate_gc(tenant_shard_id, timeline_id, gc_req, cancel, &ctx).await?;
    1617              : 
    1618            0 :     json_response(StatusCode::OK, gc_result)
    1619            0 : }
    1620              : 
    1621              : // Run compaction immediately on given timeline.
    1622            0 : async fn timeline_compact_handler(
    1623            0 :     request: Request<Body>,
    1624            0 :     cancel: CancellationToken,
    1625            0 : ) -> Result<Response<Body>, ApiError> {
    1626            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1627            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1628            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1629              : 
    1630            0 :     let state = get_state(&request);
    1631            0 : 
    1632            0 :     let mut flags = EnumSet::empty();
    1633            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "force_repartition")? {
    1634            0 :         flags |= CompactFlags::ForceRepartition;
    1635            0 :     }
    1636            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? {
    1637            0 :         flags |= CompactFlags::ForceImageLayerCreation;
    1638            0 :     }
    1639            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "enhanced_gc_bottom_most_compaction")? {
    1640            0 :         if !cfg!(feature = "testing") {
    1641            0 :             return Err(ApiError::InternalServerError(anyhow!(
    1642            0 :                 "enhanced_gc_bottom_most_compaction is only available in testing mode"
    1643            0 :             )));
    1644            0 :         }
    1645            0 :         flags |= CompactFlags::EnhancedGcBottomMostCompaction;
    1646            0 :     }
    1647            0 :     let wait_until_uploaded =
    1648            0 :         parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false);
    1649              : 
    1650            0 :     async {
    1651            0 :         let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    1652            0 :         let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
    1653            0 :         timeline
    1654            0 :             .compact(&cancel, flags, &ctx)
    1655            0 :             .await
    1656            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))?;
    1657            0 :         if wait_until_uploaded {
    1658            0 :             timeline.remote_client.wait_completion().await
    1659              :             // XXX map to correct ApiError for the cases where it's due to shutdown
    1660            0 :             .context("wait completion").map_err(ApiError::InternalServerError)?;
    1661            0 :         }
    1662            0 :         json_response(StatusCode::OK, ())
    1663            0 :     }
    1664            0 :     .instrument(info_span!("manual_compaction", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    1665            0 :     .await
    1666            0 : }
    1667              : 
    1668              : // Run checkpoint immediately on given timeline.
    1669            0 : async fn timeline_checkpoint_handler(
    1670            0 :     request: Request<Body>,
    1671            0 :     cancel: CancellationToken,
    1672            0 : ) -> Result<Response<Body>, ApiError> {
    1673            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1674            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1675            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1676              : 
    1677            0 :     let state = get_state(&request);
    1678            0 : 
    1679            0 :     let mut flags = EnumSet::empty();
    1680            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "force_repartition")? {
    1681            0 :         flags |= CompactFlags::ForceRepartition;
    1682            0 :     }
    1683            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? {
    1684            0 :         flags |= CompactFlags::ForceImageLayerCreation;
    1685            0 :     }
    1686              : 
    1687              :     // By default, checkpoints come with a compaction, but this may be optionally disabled by tests that just want to flush + upload.
    1688            0 :     let compact = parse_query_param::<_, bool>(&request, "compact")?.unwrap_or(true);
    1689              : 
    1690            0 :     let wait_until_uploaded =
    1691            0 :         parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false);
    1692              : 
    1693            0 :     async {
    1694            0 :         let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    1695            0 :         let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
    1696            0 :         timeline
    1697            0 :             .freeze_and_flush()
    1698            0 :             .await
    1699            0 :             .map_err(|e| {
    1700            0 :                 match e {
    1701            0 :                     tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown,
    1702            0 :                     other => ApiError::InternalServerError(other.into()),
    1703              : 
    1704              :                 }
    1705            0 :             })?;
    1706            0 :         if compact {
    1707            0 :             timeline
    1708            0 :                 .compact(&cancel, flags, &ctx)
    1709            0 :                 .await
    1710            0 :                 .map_err(|e|
    1711            0 :                     match e {
    1712            0 :                         CompactionError::ShuttingDown => ApiError::ShuttingDown,
    1713            0 :                         CompactionError::Other(e) => ApiError::InternalServerError(e)
    1714            0 :                     }
    1715            0 :                 )?;
    1716            0 :         }
    1717              : 
    1718            0 :         if wait_until_uploaded {
    1719            0 :             timeline.remote_client.wait_completion().await
    1720              :             // XXX map to correct ApiError for the cases where it's due to shutdown
    1721            0 :             .context("wait completion").map_err(ApiError::InternalServerError)?;
    1722            0 :         }
    1723              : 
    1724            0 :         json_response(StatusCode::OK, ())
    1725            0 :     }
    1726            0 :     .instrument(info_span!("manual_checkpoint", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    1727            0 :     .await
    1728            0 : }
    1729              : 
    1730            0 : async fn timeline_download_remote_layers_handler_post(
    1731            0 :     mut request: Request<Body>,
    1732            0 :     _cancel: CancellationToken,
    1733            0 : ) -> Result<Response<Body>, ApiError> {
    1734            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1735            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1736            0 :     let body: DownloadRemoteLayersTaskSpawnRequest = json_request(&mut request).await?;
    1737            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1738              : 
    1739            0 :     let state = get_state(&request);
    1740              : 
    1741            0 :     let timeline =
    1742            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1743            0 :             .await?;
    1744            0 :     match timeline.spawn_download_all_remote_layers(body).await {
    1745            0 :         Ok(st) => json_response(StatusCode::ACCEPTED, st),
    1746            0 :         Err(st) => json_response(StatusCode::CONFLICT, st),
    1747              :     }
    1748            0 : }
    1749              : 
    1750            0 : async fn timeline_download_remote_layers_handler_get(
    1751            0 :     request: Request<Body>,
    1752            0 :     _cancel: CancellationToken,
    1753            0 : ) -> Result<Response<Body>, ApiError> {
    1754            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1755            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1756            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1757            0 :     let state = get_state(&request);
    1758              : 
    1759            0 :     let timeline =
    1760            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1761            0 :             .await?;
    1762            0 :     let info = timeline
    1763            0 :         .get_download_all_remote_layers_task_info()
    1764            0 :         .context("task never started since last pageserver process start")
    1765            0 :         .map_err(|e| ApiError::NotFound(e.into()))?;
    1766            0 :     json_response(StatusCode::OK, info)
    1767            0 : }
    1768              : 
    1769            0 : async fn timeline_detach_ancestor_handler(
    1770            0 :     request: Request<Body>,
    1771            0 :     _cancel: CancellationToken,
    1772            0 : ) -> Result<Response<Body>, ApiError> {
    1773              :     use crate::tenant::timeline::detach_ancestor;
    1774              :     use pageserver_api::models::detach_ancestor::AncestorDetached;
    1775              : 
    1776            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1777            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1778            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1779              : 
    1780            0 :     let span = tracing::info_span!("detach_ancestor", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id);
    1781              : 
    1782            0 :     async move {
    1783            0 :         let mut options = detach_ancestor::Options::default();
    1784              : 
    1785            0 :         let rewrite_concurrency =
    1786            0 :             parse_query_param::<_, std::num::NonZeroUsize>(&request, "rewrite_concurrency")?;
    1787            0 :         let copy_concurrency =
    1788            0 :             parse_query_param::<_, std::num::NonZeroUsize>(&request, "copy_concurrency")?;
    1789              : 
    1790            0 :         [
    1791            0 :             (&mut options.rewrite_concurrency, rewrite_concurrency),
    1792            0 :             (&mut options.copy_concurrency, copy_concurrency),
    1793            0 :         ]
    1794            0 :         .into_iter()
    1795            0 :         .filter_map(|(target, val)| val.map(|val| (target, val)))
    1796            0 :         .for_each(|(target, val)| *target = val);
    1797            0 : 
    1798            0 :         let state = get_state(&request);
    1799              : 
    1800            0 :         let tenant = state
    1801            0 :             .tenant_manager
    1802            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
    1803              : 
    1804            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1805              : 
    1806            0 :         let ctx = RequestContext::new(TaskKind::DetachAncestor, DownloadBehavior::Download);
    1807            0 :         let ctx = &ctx;
    1808              : 
    1809            0 :         let timeline = tenant.get_timeline(timeline_id, true)?;
    1810              : 
    1811            0 :         let progress = timeline
    1812            0 :             .prepare_to_detach_from_ancestor(&tenant, options, ctx)
    1813            0 :             .await?;
    1814              : 
    1815              :         // uncomment to allow early as possible Tenant::drop
    1816              :         // drop(tenant);
    1817              : 
    1818            0 :         let resp = match progress {
    1819            0 :             detach_ancestor::Progress::Prepared(_guard, prepared) => {
    1820              :                 // it would be great to tag the guard on to the tenant activation future
    1821            0 :                 let reparented_timelines = state
    1822            0 :                     .tenant_manager
    1823            0 :                     .complete_detaching_timeline_ancestor(
    1824            0 :                         tenant_shard_id,
    1825            0 :                         timeline_id,
    1826            0 :                         prepared,
    1827            0 :                         ctx,
    1828            0 :                     )
    1829            0 :                     .await
    1830            0 :                     .context("timeline detach ancestor completion")
    1831            0 :                     .map_err(ApiError::InternalServerError)?;
    1832              : 
    1833            0 :                 AncestorDetached {
    1834            0 :                     reparented_timelines,
    1835            0 :                 }
    1836              :             }
    1837            0 :             detach_ancestor::Progress::Done(resp) => resp,
    1838              :         };
    1839              : 
    1840            0 :         json_response(StatusCode::OK, resp)
    1841            0 :     }
    1842            0 :     .instrument(span)
    1843            0 :     .await
    1844            0 : }
    1845              : 
    1846            0 : async fn deletion_queue_flush(
    1847            0 :     r: Request<Body>,
    1848            0 :     cancel: CancellationToken,
    1849            0 : ) -> Result<Response<Body>, ApiError> {
    1850            0 :     let state = get_state(&r);
    1851              : 
    1852            0 :     let execute = parse_query_param(&r, "execute")?.unwrap_or(false);
    1853            0 : 
    1854            0 :     let flush = async {
    1855            0 :         if execute {
    1856            0 :             state.deletion_queue_client.flush_execute().await
    1857              :         } else {
    1858            0 :             state.deletion_queue_client.flush().await
    1859              :         }
    1860            0 :     }
    1861              :     // DeletionQueueError's only case is shutting down.
    1862            0 :     .map_err(|_| ApiError::ShuttingDown);
    1863              : 
    1864              :     tokio::select! {
    1865              :         res = flush => {
    1866            0 :             res.map(|()| json_response(StatusCode::OK, ()))?
    1867              :         }
    1868              :         _ = cancel.cancelled() => {
    1869              :             Err(ApiError::ShuttingDown)
    1870              :         }
    1871              :     }
    1872            0 : }
    1873              : 
    1874              : /// Try if `GetPage@Lsn` is successful, useful for manual debugging.
    1875            0 : async fn getpage_at_lsn_handler(
    1876            0 :     request: Request<Body>,
    1877            0 :     _cancel: CancellationToken,
    1878            0 : ) -> Result<Response<Body>, ApiError> {
    1879            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1880            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1881            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1882            0 :     let state = get_state(&request);
    1883              : 
    1884              :     struct Key(crate::repository::Key);
    1885              : 
    1886              :     impl std::str::FromStr for Key {
    1887              :         type Err = anyhow::Error;
    1888              : 
    1889            0 :         fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
    1890            0 :             crate::repository::Key::from_hex(s).map(Key)
    1891            0 :         }
    1892              :     }
    1893              : 
    1894            0 :     let key: Key = parse_query_param(&request, "key")?
    1895            0 :         .ok_or_else(|| ApiError::BadRequest(anyhow!("missing 'key' query parameter")))?;
    1896            0 :     let lsn: Lsn = parse_query_param(&request, "lsn")?
    1897            0 :         .ok_or_else(|| ApiError::BadRequest(anyhow!("missing 'lsn' query parameter")))?;
    1898              : 
    1899            0 :     async {
    1900            0 :         let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    1901            0 :         let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
    1902              : 
    1903            0 :         let page = timeline.get(key.0, lsn, &ctx).await?;
    1904              : 
    1905            0 :         Result::<_, ApiError>::Ok(
    1906            0 :             Response::builder()
    1907            0 :                 .status(StatusCode::OK)
    1908            0 :                 .header(header::CONTENT_TYPE, "application/octet-stream")
    1909            0 :                 .body(hyper::Body::from(page))
    1910            0 :                 .unwrap(),
    1911            0 :         )
    1912            0 :     }
    1913            0 :     .instrument(info_span!("timeline_get", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    1914            0 :     .await
    1915            0 : }
    1916              : 
    1917            0 : async fn timeline_collect_keyspace(
    1918            0 :     request: Request<Body>,
    1919            0 :     _cancel: CancellationToken,
    1920            0 : ) -> Result<Response<Body>, ApiError> {
    1921            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1922            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1923            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1924            0 :     let state = get_state(&request);
    1925              : 
    1926            0 :     let at_lsn: Option<Lsn> = parse_query_param(&request, "at_lsn")?;
    1927              : 
    1928            0 :     async {
    1929            0 :         let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    1930            0 :         let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
    1931            0 :         let at_lsn = at_lsn.unwrap_or_else(|| timeline.get_last_record_lsn());
    1932            0 :         let (dense_ks, sparse_ks) = timeline
    1933            0 :             .collect_keyspace(at_lsn, &ctx)
    1934            0 :             .await
    1935            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))?;
    1936              : 
    1937              :         // This API is currently used by pagebench. Pagebench will iterate all keys within the keyspace.
    1938              :         // Therefore, we split dense/sparse keys in this API.
    1939            0 :         let res = pageserver_api::models::partitioning::Partitioning { keys: dense_ks, sparse_keys: sparse_ks, at_lsn };
    1940            0 : 
    1941            0 :         json_response(StatusCode::OK, res)
    1942            0 :     }
    1943            0 :     .instrument(info_span!("timeline_collect_keyspace", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    1944            0 :     .await
    1945            0 : }
    1946              : 
    1947            0 : async fn active_timeline_of_active_tenant(
    1948            0 :     tenant_manager: &TenantManager,
    1949            0 :     tenant_shard_id: TenantShardId,
    1950            0 :     timeline_id: TimelineId,
    1951            0 : ) -> Result<Arc<Timeline>, ApiError> {
    1952            0 :     let tenant = tenant_manager.get_attached_tenant_shard(tenant_shard_id)?;
    1953              : 
    1954            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1955              : 
    1956            0 :     Ok(tenant.get_timeline(timeline_id, true)?)
    1957            0 : }
    1958              : 
    1959            0 : async fn always_panic_handler(
    1960            0 :     req: Request<Body>,
    1961            0 :     _cancel: CancellationToken,
    1962            0 : ) -> Result<Response<Body>, ApiError> {
    1963            0 :     // Deliberately cause a panic to exercise the panic hook registered via std::panic::set_hook().
    1964            0 :     // For pageserver, the relevant panic hook is `tracing_panic_hook` , and the `sentry` crate's wrapper around it.
    1965            0 :     // Use catch_unwind to ensure that tokio nor hyper are distracted by our panic.
    1966            0 :     let query = req.uri().query();
    1967            0 :     let _ = std::panic::catch_unwind(|| {
    1968            0 :         panic!("unconditional panic for testing panic hook integration; request query: {query:?}")
    1969            0 :     });
    1970            0 :     json_response(StatusCode::NO_CONTENT, ())
    1971            0 : }
    1972              : 
    1973            0 : async fn disk_usage_eviction_run(
    1974            0 :     mut r: Request<Body>,
    1975            0 :     cancel: CancellationToken,
    1976            0 : ) -> Result<Response<Body>, ApiError> {
    1977            0 :     check_permission(&r, None)?;
    1978              : 
    1979            0 :     #[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)]
    1980              :     struct Config {
    1981              :         /// How many bytes to evict before reporting that pressure is relieved.
    1982              :         evict_bytes: u64,
    1983              : 
    1984              :         #[serde(default)]
    1985              :         eviction_order: crate::disk_usage_eviction_task::EvictionOrder,
    1986              :     }
    1987              : 
    1988              :     #[derive(Debug, Clone, Copy, serde::Serialize)]
    1989              :     struct Usage {
    1990              :         // remains unchanged after instantiation of the struct
    1991              :         evict_bytes: u64,
    1992              :         // updated by `add_available_bytes`
    1993              :         freed_bytes: u64,
    1994              :     }
    1995              : 
    1996              :     impl crate::disk_usage_eviction_task::Usage for Usage {
    1997            0 :         fn has_pressure(&self) -> bool {
    1998            0 :             self.evict_bytes > self.freed_bytes
    1999            0 :         }
    2000              : 
    2001            0 :         fn add_available_bytes(&mut self, bytes: u64) {
    2002            0 :             self.freed_bytes += bytes;
    2003            0 :         }
    2004              :     }
    2005              : 
    2006            0 :     let config = json_request::<Config>(&mut r).await?;
    2007              : 
    2008            0 :     let usage = Usage {
    2009            0 :         evict_bytes: config.evict_bytes,
    2010            0 :         freed_bytes: 0,
    2011            0 :     };
    2012            0 : 
    2013            0 :     let state = get_state(&r);
    2014            0 :     let eviction_state = state.disk_usage_eviction_state.clone();
    2015              : 
    2016            0 :     let res = crate::disk_usage_eviction_task::disk_usage_eviction_task_iteration_impl(
    2017            0 :         &eviction_state,
    2018            0 :         &state.remote_storage,
    2019            0 :         usage,
    2020            0 :         &state.tenant_manager,
    2021            0 :         config.eviction_order,
    2022            0 :         &cancel,
    2023            0 :     )
    2024            0 :     .await;
    2025              : 
    2026            0 :     info!(?res, "disk_usage_eviction_task_iteration_impl finished");
    2027              : 
    2028            0 :     let res = res.map_err(ApiError::InternalServerError)?;
    2029              : 
    2030            0 :     json_response(StatusCode::OK, res)
    2031            0 : }
    2032              : 
    2033            0 : async fn secondary_upload_handler(
    2034            0 :     request: Request<Body>,
    2035            0 :     _cancel: CancellationToken,
    2036            0 : ) -> Result<Response<Body>, ApiError> {
    2037            0 :     let state = get_state(&request);
    2038            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2039            0 :     state
    2040            0 :         .secondary_controller
    2041            0 :         .upload_tenant(tenant_shard_id)
    2042            0 :         .await
    2043            0 :         .map_err(ApiError::InternalServerError)?;
    2044              : 
    2045            0 :     json_response(StatusCode::OK, ())
    2046            0 : }
    2047              : 
    2048            0 : async fn tenant_scan_remote_handler(
    2049            0 :     request: Request<Body>,
    2050            0 :     cancel: CancellationToken,
    2051            0 : ) -> Result<Response<Body>, ApiError> {
    2052            0 :     let state = get_state(&request);
    2053            0 :     let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
    2054              : 
    2055            0 :     let mut response = TenantScanRemoteStorageResponse::default();
    2056              : 
    2057            0 :     let (shards, _other_keys) =
    2058            0 :         list_remote_tenant_shards(&state.remote_storage, tenant_id, cancel.clone())
    2059            0 :             .await
    2060            0 :             .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
    2061              : 
    2062            0 :     for tenant_shard_id in shards {
    2063            0 :         let (timeline_ids, _other_keys) =
    2064            0 :             list_remote_timelines(&state.remote_storage, tenant_shard_id, cancel.clone())
    2065            0 :                 .await
    2066            0 :                 .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
    2067              : 
    2068            0 :         let mut generation = Generation::none();
    2069            0 :         for timeline_id in timeline_ids {
    2070            0 :             match download_index_part(
    2071            0 :                 &state.remote_storage,
    2072            0 :                 &tenant_shard_id,
    2073            0 :                 &timeline_id,
    2074            0 :                 Generation::MAX,
    2075            0 :                 &cancel,
    2076            0 :             )
    2077            0 :             .instrument(info_span!("download_index_part",
    2078              :                          tenant_id=%tenant_shard_id.tenant_id,
    2079            0 :                          shard_id=%tenant_shard_id.shard_slug(),
    2080              :                          %timeline_id))
    2081            0 :             .await
    2082              :             {
    2083            0 :                 Ok((index_part, index_generation)) => {
    2084            0 :                     tracing::info!("Found timeline {tenant_shard_id}/{timeline_id} metadata (gen {index_generation:?}, {} layers, {} consistent LSN)",
    2085            0 :                         index_part.layer_metadata.len(), index_part.metadata.disk_consistent_lsn());
    2086            0 :                     generation = std::cmp::max(generation, index_generation);
    2087              :                 }
    2088              :                 Err(DownloadError::NotFound) => {
    2089              :                     // This is normal for tenants that were created with multiple shards: they have an unsharded path
    2090              :                     // containing the timeline's initdb tarball but no index.  Otherwise it is a bit strange.
    2091            0 :                     tracing::info!("Timeline path {tenant_shard_id}/{timeline_id} exists in remote storage but has no index, skipping");
    2092            0 :                     continue;
    2093              :                 }
    2094            0 :                 Err(e) => {
    2095            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    2096              :                 }
    2097              :             };
    2098              :         }
    2099              : 
    2100            0 :         response.shards.push(TenantScanRemoteStorageShard {
    2101            0 :             tenant_shard_id,
    2102            0 :             generation: generation.into(),
    2103            0 :         });
    2104              :     }
    2105              : 
    2106            0 :     if response.shards.is_empty() {
    2107            0 :         return Err(ApiError::NotFound(
    2108            0 :             anyhow::anyhow!("No shards found for tenant ID {tenant_id}").into(),
    2109            0 :         ));
    2110            0 :     }
    2111            0 : 
    2112            0 :     json_response(StatusCode::OK, response)
    2113            0 : }
    2114              : 
    2115            0 : async fn secondary_download_handler(
    2116            0 :     request: Request<Body>,
    2117            0 :     _cancel: CancellationToken,
    2118            0 : ) -> Result<Response<Body>, ApiError> {
    2119            0 :     let state = get_state(&request);
    2120            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2121            0 :     let wait = parse_query_param(&request, "wait_ms")?.map(Duration::from_millis);
    2122              : 
    2123              :     // We don't need this to issue the download request, but:
    2124              :     // - it enables us to cleanly return 404 if we get a request for an absent shard
    2125              :     // - we will use this to provide status feedback in the response
    2126            0 :     let Some(secondary_tenant) = state
    2127            0 :         .tenant_manager
    2128            0 :         .get_secondary_tenant_shard(tenant_shard_id)
    2129              :     else {
    2130            0 :         return Err(ApiError::NotFound(
    2131            0 :             anyhow::anyhow!("Shard {} not found", tenant_shard_id).into(),
    2132            0 :         ));
    2133              :     };
    2134              : 
    2135            0 :     let timeout = wait.unwrap_or(Duration::MAX);
    2136              : 
    2137            0 :     let result = tokio::time::timeout(
    2138            0 :         timeout,
    2139            0 :         state.secondary_controller.download_tenant(tenant_shard_id),
    2140            0 :     )
    2141            0 :     .await;
    2142              : 
    2143            0 :     let progress = secondary_tenant.progress.lock().unwrap().clone();
    2144              : 
    2145            0 :     let status = match result {
    2146              :         Ok(Ok(())) => {
    2147            0 :             if progress.layers_downloaded >= progress.layers_total {
    2148              :                 // Download job ran to completion
    2149            0 :                 StatusCode::OK
    2150              :             } else {
    2151              :                 // Download dropped out without errors because it ran out of time budget
    2152            0 :                 StatusCode::ACCEPTED
    2153              :             }
    2154              :         }
    2155              :         // Edge case: downloads aren't usually fallible: things like a missing heatmap are considered
    2156              :         // okay.  We could get an error here in the unlikely edge case that the tenant
    2157              :         // was detached between our check above and executing the download job.
    2158            0 :         Ok(Err(e)) => return Err(ApiError::InternalServerError(e)),
    2159              :         // A timeout is not an error: we have started the download, we're just not done
    2160              :         // yet.  The caller will get a response body indicating status.
    2161            0 :         Err(_) => StatusCode::ACCEPTED,
    2162              :     };
    2163              : 
    2164            0 :     json_response(status, progress)
    2165            0 : }
    2166              : 
    2167            0 : async fn secondary_status_handler(
    2168            0 :     request: Request<Body>,
    2169            0 :     _cancel: CancellationToken,
    2170            0 : ) -> Result<Response<Body>, ApiError> {
    2171            0 :     let state = get_state(&request);
    2172            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2173              : 
    2174            0 :     let Some(secondary_tenant) = state
    2175            0 :         .tenant_manager
    2176            0 :         .get_secondary_tenant_shard(tenant_shard_id)
    2177              :     else {
    2178            0 :         return Err(ApiError::NotFound(
    2179            0 :             anyhow::anyhow!("Shard {} not found", tenant_shard_id).into(),
    2180            0 :         ));
    2181              :     };
    2182              : 
    2183            0 :     let progress = secondary_tenant.progress.lock().unwrap().clone();
    2184            0 : 
    2185            0 :     json_response(StatusCode::OK, progress)
    2186            0 : }
    2187              : 
    2188            0 : async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
    2189            0 :     json_response(
    2190            0 :         StatusCode::NOT_FOUND,
    2191            0 :         HttpErrorBody::from_msg("page not found".to_owned()),
    2192            0 :     )
    2193            0 : }
    2194              : 
    2195            0 : async fn post_tracing_event_handler(
    2196            0 :     mut r: Request<Body>,
    2197            0 :     _cancel: CancellationToken,
    2198            0 : ) -> Result<Response<Body>, ApiError> {
    2199            0 :     #[derive(Debug, serde::Deserialize)]
    2200              :     #[serde(rename_all = "lowercase")]
    2201              :     enum Level {
    2202              :         Error,
    2203              :         Warn,
    2204              :         Info,
    2205              :         Debug,
    2206              :         Trace,
    2207              :     }
    2208            0 :     #[derive(Debug, serde::Deserialize)]
    2209              :     struct Request {
    2210              :         level: Level,
    2211              :         message: String,
    2212              :     }
    2213            0 :     let body: Request = json_request(&mut r)
    2214            0 :         .await
    2215            0 :         .map_err(|_| ApiError::BadRequest(anyhow::anyhow!("invalid JSON body")))?;
    2216              : 
    2217            0 :     match body.level {
    2218            0 :         Level::Error => tracing::error!(?body.message),
    2219            0 :         Level::Warn => tracing::warn!(?body.message),
    2220            0 :         Level::Info => tracing::info!(?body.message),
    2221            0 :         Level::Debug => tracing::debug!(?body.message),
    2222            0 :         Level::Trace => tracing::trace!(?body.message),
    2223              :     }
    2224              : 
    2225            0 :     json_response(StatusCode::OK, ())
    2226            0 : }
    2227              : 
    2228            0 : async fn force_aux_policy_switch_handler(
    2229            0 :     mut r: Request<Body>,
    2230            0 :     _cancel: CancellationToken,
    2231            0 : ) -> Result<Response<Body>, ApiError> {
    2232            0 :     check_permission(&r, None)?;
    2233            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&r, "tenant_shard_id")?;
    2234            0 :     let timeline_id: TimelineId = parse_request_param(&r, "timeline_id")?;
    2235            0 :     let policy: AuxFilePolicy = json_request(&mut r).await?;
    2236              : 
    2237            0 :     let state = get_state(&r);
    2238              : 
    2239            0 :     let tenant = state
    2240            0 :         .tenant_manager
    2241            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    2242            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    2243            0 :     let timeline =
    2244            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    2245            0 :             .await?;
    2246            0 :     timeline
    2247            0 :         .do_switch_aux_policy(policy)
    2248            0 :         .map_err(ApiError::InternalServerError)?;
    2249              : 
    2250            0 :     json_response(StatusCode::OK, ())
    2251            0 : }
    2252              : 
    2253            0 : async fn put_io_engine_handler(
    2254            0 :     mut r: Request<Body>,
    2255            0 :     _cancel: CancellationToken,
    2256            0 : ) -> Result<Response<Body>, ApiError> {
    2257            0 :     check_permission(&r, None)?;
    2258            0 :     let kind: crate::virtual_file::IoEngineKind = json_request(&mut r).await?;
    2259            0 :     crate::virtual_file::io_engine::set(kind);
    2260            0 :     json_response(StatusCode::OK, ())
    2261            0 : }
    2262              : 
    2263              : /// Polled by control plane.
    2264              : ///
    2265              : /// See [`crate::utilization`].
    2266            0 : async fn get_utilization(
    2267            0 :     r: Request<Body>,
    2268            0 :     _cancel: CancellationToken,
    2269            0 : ) -> Result<Response<Body>, ApiError> {
    2270            0 :     fail::fail_point!("get-utilization-http-handler", |_| {
    2271            0 :         Err(ApiError::ResourceUnavailable("failpoint".into()))
    2272            0 :     });
    2273              : 
    2274              :     // this probably could be completely public, but lets make that change later.
    2275            0 :     check_permission(&r, None)?;
    2276              : 
    2277            0 :     let state = get_state(&r);
    2278            0 :     let mut g = state.latest_utilization.lock().await;
    2279              : 
    2280            0 :     let regenerate_every = Duration::from_secs(1);
    2281            0 :     let still_valid = g
    2282            0 :         .as_ref()
    2283            0 :         .is_some_and(|(captured_at, _)| captured_at.elapsed() < regenerate_every);
    2284            0 : 
    2285            0 :     // avoid needless statvfs calls even though those should be non-blocking fast.
    2286            0 :     // regenerate at most 1Hz to allow polling at any rate.
    2287            0 :     if !still_valid {
    2288            0 :         let path = state.conf.tenants_path();
    2289            0 :         let doc = crate::utilization::regenerate(path.as_std_path())
    2290            0 :             .map_err(ApiError::InternalServerError)?;
    2291              : 
    2292            0 :         let mut buf = Vec::new();
    2293            0 :         serde_json::to_writer(&mut buf, &doc)
    2294            0 :             .context("serialize")
    2295            0 :             .map_err(ApiError::InternalServerError)?;
    2296              : 
    2297            0 :         let body = bytes::Bytes::from(buf);
    2298            0 : 
    2299            0 :         *g = Some((std::time::Instant::now(), body));
    2300            0 :     }
    2301              : 
    2302              :     // hyper 0.14 doesn't yet have Response::clone so this is a bit of extra legwork
    2303            0 :     let cached = g.as_ref().expect("just set").1.clone();
    2304            0 : 
    2305            0 :     Response::builder()
    2306            0 :         .header(hyper::http::header::CONTENT_TYPE, "application/json")
    2307            0 :         // thought of using http date header, but that is second precision which does not give any
    2308            0 :         // debugging aid
    2309            0 :         .status(StatusCode::OK)
    2310            0 :         .body(hyper::Body::from(cached))
    2311            0 :         .context("build response")
    2312            0 :         .map_err(ApiError::InternalServerError)
    2313            0 : }
    2314              : 
    2315            0 : async fn list_aux_files(
    2316            0 :     mut request: Request<Body>,
    2317            0 :     _cancel: CancellationToken,
    2318            0 : ) -> Result<Response<Body>, ApiError> {
    2319            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2320            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2321            0 :     let body: ListAuxFilesRequest = json_request(&mut request).await?;
    2322            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2323              : 
    2324            0 :     let state = get_state(&request);
    2325              : 
    2326            0 :     let timeline =
    2327            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    2328            0 :             .await?;
    2329              : 
    2330            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    2331            0 :     let files = timeline.list_aux_files(body.lsn, &ctx).await?;
    2332            0 :     json_response(StatusCode::OK, files)
    2333            0 : }
    2334              : 
    2335            0 : async fn perf_info(
    2336            0 :     request: Request<Body>,
    2337            0 :     _cancel: CancellationToken,
    2338            0 : ) -> Result<Response<Body>, ApiError> {
    2339            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2340            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2341            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2342              : 
    2343            0 :     let state = get_state(&request);
    2344              : 
    2345            0 :     let timeline =
    2346            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    2347            0 :             .await?;
    2348              : 
    2349            0 :     let result = timeline.perf_info().await;
    2350              : 
    2351            0 :     json_response(StatusCode::OK, result)
    2352            0 : }
    2353              : 
    2354            0 : async fn ingest_aux_files(
    2355            0 :     mut request: Request<Body>,
    2356            0 :     _cancel: CancellationToken,
    2357            0 : ) -> Result<Response<Body>, ApiError> {
    2358            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2359            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2360            0 :     let body: IngestAuxFilesRequest = json_request(&mut request).await?;
    2361            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2362              : 
    2363            0 :     let state = get_state(&request);
    2364              : 
    2365            0 :     let timeline =
    2366            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    2367            0 :             .await?;
    2368              : 
    2369            0 :     let mut modification = timeline.begin_modification(
    2370            0 :         Lsn(timeline.get_last_record_lsn().0 + 8), /* advance LSN by 8 */
    2371            0 :     );
    2372            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    2373            0 :     for (fname, content) in body.aux_files {
    2374            0 :         modification
    2375            0 :             .put_file(&fname, content.as_bytes(), &ctx)
    2376            0 :             .await
    2377            0 :             .map_err(ApiError::InternalServerError)?;
    2378              :     }
    2379            0 :     modification
    2380            0 :         .commit(&ctx)
    2381            0 :         .await
    2382            0 :         .map_err(ApiError::InternalServerError)?;
    2383              : 
    2384            0 :     json_response(StatusCode::OK, ())
    2385            0 : }
    2386              : 
    2387              : /// Report on the largest tenants on this pageserver, for the storage controller to identify
    2388              : /// candidates for splitting
    2389            0 : async fn post_top_tenants(
    2390            0 :     mut r: Request<Body>,
    2391            0 :     _cancel: CancellationToken,
    2392            0 : ) -> Result<Response<Body>, ApiError> {
    2393            0 :     check_permission(&r, None)?;
    2394            0 :     let request: TopTenantShardsRequest = json_request(&mut r).await?;
    2395            0 :     let state = get_state(&r);
    2396            0 : 
    2397            0 :     fn get_size_metric(sizes: &TopTenantShardItem, order_by: &TenantSorting) -> u64 {
    2398            0 :         match order_by {
    2399            0 :             TenantSorting::ResidentSize => sizes.resident_size,
    2400            0 :             TenantSorting::MaxLogicalSize => sizes.max_logical_size,
    2401            0 :         }
    2402            0 :     }
    2403            0 : 
    2404            0 :     #[derive(Eq, PartialEq)]
    2405            0 :     struct HeapItem {
    2406            0 :         metric: u64,
    2407            0 :         sizes: TopTenantShardItem,
    2408            0 :     }
    2409            0 : 
    2410            0 :     impl PartialOrd for HeapItem {
    2411            0 :         fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
    2412            0 :             Some(self.cmp(other))
    2413            0 :         }
    2414            0 :     }
    2415            0 : 
    2416            0 :     /// Heap items have reverse ordering on their metric: this enables using BinaryHeap, which
    2417            0 :     /// supports popping the greatest item but not the smallest.
    2418            0 :     impl Ord for HeapItem {
    2419            0 :         fn cmp(&self, other: &Self) -> std::cmp::Ordering {
    2420            0 :             Reverse(self.metric).cmp(&Reverse(other.metric))
    2421            0 :         }
    2422            0 :     }
    2423            0 : 
    2424            0 :     let mut top_n: BinaryHeap<HeapItem> = BinaryHeap::with_capacity(request.limit);
    2425              : 
    2426              :     // FIXME: this is a lot of clones to take this tenant list
    2427            0 :     for (tenant_shard_id, tenant_slot) in state.tenant_manager.list() {
    2428            0 :         if let Some(shards_lt) = request.where_shards_lt {
    2429              :             // Ignore tenants which already have >= this many shards
    2430            0 :             if tenant_shard_id.shard_count >= shards_lt {
    2431            0 :                 continue;
    2432            0 :             }
    2433            0 :         }
    2434              : 
    2435            0 :         let sizes = match tenant_slot {
    2436            0 :             TenantSlot::Attached(tenant) => tenant.get_sizes(),
    2437              :             TenantSlot::Secondary(_) | TenantSlot::InProgress(_) => {
    2438            0 :                 continue;
    2439              :             }
    2440              :         };
    2441            0 :         let metric = get_size_metric(&sizes, &request.order_by);
    2442              : 
    2443            0 :         if let Some(gt) = request.where_gt {
    2444              :             // Ignore tenants whose metric is <= the lower size threshold, to do less sorting work
    2445            0 :             if metric <= gt {
    2446            0 :                 continue;
    2447            0 :             }
    2448            0 :         };
    2449              : 
    2450            0 :         match top_n.peek() {
    2451            0 :             None => {
    2452            0 :                 // Top N list is empty: candidate becomes first member
    2453            0 :                 top_n.push(HeapItem { metric, sizes });
    2454            0 :             }
    2455            0 :             Some(i) if i.metric > metric && top_n.len() < request.limit => {
    2456            0 :                 // Lowest item in list is greater than our candidate, but we aren't at limit yet: push to end
    2457            0 :                 top_n.push(HeapItem { metric, sizes });
    2458            0 :             }
    2459            0 :             Some(i) if i.metric > metric => {
    2460            0 :                 // List is at limit and lowest value is greater than our candidate, drop it.
    2461            0 :             }
    2462            0 :             Some(_) => top_n.push(HeapItem { metric, sizes }),
    2463              :         }
    2464              : 
    2465            0 :         while top_n.len() > request.limit {
    2466            0 :             top_n.pop();
    2467            0 :         }
    2468              :     }
    2469              : 
    2470            0 :     json_response(
    2471            0 :         StatusCode::OK,
    2472            0 :         TopTenantShardsResponse {
    2473            0 :             shards: top_n.into_iter().map(|i| i.sizes).collect(),
    2474            0 :         },
    2475            0 :     )
    2476            0 : }
    2477              : 
    2478            0 : async fn put_tenant_timeline_import_basebackup(
    2479            0 :     request: Request<Body>,
    2480            0 :     _cancel: CancellationToken,
    2481            0 : ) -> Result<Response<Body>, ApiError> {
    2482            0 :     let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
    2483            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2484            0 :     let base_lsn: Lsn = must_parse_query_param(&request, "base_lsn")?;
    2485            0 :     let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
    2486            0 :     let pg_version: u32 = must_parse_query_param(&request, "pg_version")?;
    2487              : 
    2488            0 :     check_permission(&request, Some(tenant_id))?;
    2489              : 
    2490            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
    2491              : 
    2492            0 :     let span = info_span!("import_basebackup", tenant_id=%tenant_id, timeline_id=%timeline_id, base_lsn=%base_lsn, end_lsn=%end_lsn, pg_version=%pg_version);
    2493            0 :     async move {
    2494            0 :         let state = get_state(&request);
    2495            0 :         let tenant = state
    2496            0 :             .tenant_manager
    2497            0 :             .get_attached_tenant_shard(TenantShardId::unsharded(tenant_id))?;
    2498              : 
    2499            0 :         let broker_client = state.broker_client.clone();
    2500            0 : 
    2501            0 :         let mut body = StreamReader::new(request.into_body().map(|res| {
    2502            0 :             res.map_err(|error| {
    2503            0 :                 std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!(error))
    2504            0 :             })
    2505            0 :         }));
    2506            0 : 
    2507            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    2508              : 
    2509            0 :         let timeline = tenant
    2510            0 :             .create_empty_timeline(timeline_id, base_lsn, pg_version, &ctx)
    2511            0 :             .map_err(ApiError::InternalServerError)
    2512            0 :             .await?;
    2513              : 
    2514              :         // TODO mark timeline as not ready until it reaches end_lsn.
    2515              :         // We might have some wal to import as well, and we should prevent compute
    2516              :         // from connecting before that and writing conflicting wal.
    2517              :         //
    2518              :         // This is not relevant for pageserver->pageserver migrations, since there's
    2519              :         // no wal to import. But should be fixed if we want to import from postgres.
    2520              : 
    2521              :         // TODO leave clean state on error. For now you can use detach to clean
    2522              :         // up broken state from a failed import.
    2523              : 
    2524              :         // Import basebackup provided via CopyData
    2525            0 :         info!("importing basebackup");
    2526              : 
    2527            0 :         timeline
    2528            0 :             .import_basebackup_from_tar(tenant.clone(), &mut body, base_lsn, broker_client, &ctx)
    2529            0 :             .await
    2530            0 :             .map_err(ApiError::InternalServerError)?;
    2531              : 
    2532              :         // Read the end of the tar archive.
    2533            0 :         read_tar_eof(body)
    2534            0 :             .await
    2535            0 :             .map_err(ApiError::InternalServerError)?;
    2536              : 
    2537              :         // TODO check checksum
    2538              :         // Meanwhile you can verify client-side by taking fullbackup
    2539              :         // and checking that it matches in size with what was imported.
    2540              :         // It wouldn't work if base came from vanilla postgres though,
    2541              :         // since we discard some log files.
    2542              : 
    2543            0 :         info!("done");
    2544            0 :         json_response(StatusCode::OK, ())
    2545            0 :     }
    2546            0 :     .instrument(span)
    2547            0 :     .await
    2548            0 : }
    2549              : 
    2550            0 : async fn put_tenant_timeline_import_wal(
    2551            0 :     request: Request<Body>,
    2552            0 :     _cancel: CancellationToken,
    2553            0 : ) -> Result<Response<Body>, ApiError> {
    2554            0 :     let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
    2555            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2556            0 :     let start_lsn: Lsn = must_parse_query_param(&request, "start_lsn")?;
    2557            0 :     let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
    2558              : 
    2559            0 :     check_permission(&request, Some(tenant_id))?;
    2560              : 
    2561            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
    2562              : 
    2563            0 :     let span = info_span!("import_wal", tenant_id=%tenant_id, timeline_id=%timeline_id, start_lsn=%start_lsn, end_lsn=%end_lsn);
    2564            0 :     async move {
    2565            0 :         let state = get_state(&request);
    2566              : 
    2567            0 :         let timeline = active_timeline_of_active_tenant(&state.tenant_manager, TenantShardId::unsharded(tenant_id), timeline_id).await?;
    2568              : 
    2569            0 :         let mut body = StreamReader::new(request.into_body().map(|res| {
    2570            0 :             res.map_err(|error| {
    2571            0 :                 std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!(error))
    2572            0 :             })
    2573            0 :         }));
    2574            0 : 
    2575            0 :         let last_record_lsn = timeline.get_last_record_lsn();
    2576            0 :         if last_record_lsn != start_lsn {
    2577            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}")));
    2578            0 :         }
    2579            0 : 
    2580            0 :         // TODO leave clean state on error. For now you can use detach to clean
    2581            0 :         // up broken state from a failed import.
    2582            0 : 
    2583            0 :         // Import wal provided via CopyData
    2584            0 :         info!("importing wal");
    2585            0 :         crate::import_datadir::import_wal_from_tar(&timeline, &mut body, start_lsn, end_lsn, &ctx).await.map_err(ApiError::InternalServerError)?;
    2586            0 :         info!("wal import complete");
    2587              : 
    2588              :         // Read the end of the tar archive.
    2589            0 :         read_tar_eof(body).await.map_err(ApiError::InternalServerError)?;
    2590              : 
    2591              :         // TODO Does it make sense to overshoot?
    2592            0 :         if timeline.get_last_record_lsn() < end_lsn {
    2593            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}")));
    2594            0 :         }
    2595            0 : 
    2596            0 :         // Flush data to disk, then upload to s3. No need for a forced checkpoint.
    2597            0 :         // We only want to persist the data, and it doesn't matter if it's in the
    2598            0 :         // shape of deltas or images.
    2599            0 :         info!("flushing layers");
    2600            0 :         timeline.freeze_and_flush().await.map_err(|e| match e {
    2601            0 :             tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown,
    2602            0 :             other => ApiError::InternalServerError(anyhow::anyhow!(other)),
    2603            0 :         })?;
    2604              : 
    2605            0 :         info!("done");
    2606              : 
    2607            0 :         json_response(StatusCode::OK, ())
    2608            0 :     }.instrument(span).await
    2609            0 : }
    2610              : 
    2611              : /// Read the end of a tar archive.
    2612              : ///
    2613              : /// A tar archive normally ends with two consecutive blocks of zeros, 512 bytes each.
    2614              : /// `tokio_tar` already read the first such block. Read the second all-zeros block,
    2615              : /// and check that there is no more data after the EOF marker.
    2616              : ///
    2617              : /// 'tar' command can also write extra blocks of zeros, up to a record
    2618              : /// size, controlled by the --record-size argument. Ignore them too.
    2619            0 : async fn read_tar_eof(mut reader: (impl tokio::io::AsyncRead + Unpin)) -> anyhow::Result<()> {
    2620            0 :     use tokio::io::AsyncReadExt;
    2621            0 :     let mut buf = [0u8; 512];
    2622            0 : 
    2623            0 :     // Read the all-zeros block, and verify it
    2624            0 :     let mut total_bytes = 0;
    2625            0 :     while total_bytes < 512 {
    2626            0 :         let nbytes = reader.read(&mut buf[total_bytes..]).await?;
    2627            0 :         total_bytes += nbytes;
    2628            0 :         if nbytes == 0 {
    2629            0 :             break;
    2630            0 :         }
    2631              :     }
    2632            0 :     if total_bytes < 512 {
    2633            0 :         anyhow::bail!("incomplete or invalid tar EOF marker");
    2634            0 :     }
    2635            0 :     if !buf.iter().all(|&x| x == 0) {
    2636            0 :         anyhow::bail!("invalid tar EOF marker");
    2637            0 :     }
    2638            0 : 
    2639            0 :     // Drain any extra zero-blocks after the EOF marker
    2640            0 :     let mut trailing_bytes = 0;
    2641            0 :     let mut seen_nonzero_bytes = false;
    2642              :     loop {
    2643            0 :         let nbytes = reader.read(&mut buf).await?;
    2644            0 :         trailing_bytes += nbytes;
    2645            0 :         if !buf.iter().all(|&x| x == 0) {
    2646            0 :             seen_nonzero_bytes = true;
    2647            0 :         }
    2648            0 :         if nbytes == 0 {
    2649            0 :             break;
    2650            0 :         }
    2651              :     }
    2652            0 :     if seen_nonzero_bytes {
    2653            0 :         anyhow::bail!("unexpected non-zero bytes after the tar archive");
    2654            0 :     }
    2655            0 :     if trailing_bytes % 512 != 0 {
    2656            0 :         anyhow::bail!("unexpected number of zeros ({trailing_bytes}), not divisible by tar block size (512 bytes), after the tar archive");
    2657            0 :     }
    2658            0 :     Ok(())
    2659            0 : }
    2660              : 
    2661              : /// Common functionality of all the HTTP API handlers.
    2662              : ///
    2663              : /// - Adds a tracing span to each request (by `request_span`)
    2664              : /// - Logs the request depending on the request method (by `request_span`)
    2665              : /// - Logs the response if it was not successful (by `request_span`
    2666              : /// - Shields the handler function from async cancellations. Hyper can drop the handler
    2667              : ///   Future if the connection to the client is lost, but most of the pageserver code is
    2668              : ///   not async cancellation safe. This converts the dropped future into a graceful cancellation
    2669              : ///   request with a CancellationToken.
    2670            0 : async fn api_handler<R, H>(request: Request<Body>, handler: H) -> Result<Response<Body>, ApiError>
    2671            0 : where
    2672            0 :     R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
    2673            0 :     H: FnOnce(Request<Body>, CancellationToken) -> R + Send + Sync + 'static,
    2674            0 : {
    2675            0 :     if request.uri() != &"/v1/failpoints".parse::<Uri>().unwrap() {
    2676            0 :         fail::fail_point!("api-503", |_| Err(ApiError::ResourceUnavailable(
    2677            0 :             "failpoint".into()
    2678            0 :         )));
    2679              : 
    2680            0 :         fail::fail_point!("api-500", |_| Err(ApiError::InternalServerError(
    2681            0 :             anyhow::anyhow!("failpoint")
    2682            0 :         )));
    2683            0 :     }
    2684              : 
    2685              :     // Spawn a new task to handle the request, to protect the handler from unexpected
    2686              :     // async cancellations. Most pageserver functions are not async cancellation safe.
    2687              :     // We arm a drop-guard, so that if Hyper drops the Future, we signal the task
    2688              :     // with the cancellation token.
    2689            0 :     let token = CancellationToken::new();
    2690            0 :     let cancel_guard = token.clone().drop_guard();
    2691            0 :     let result = request_span(request, move |r| async {
    2692            0 :         let handle = tokio::spawn(
    2693            0 :             async {
    2694            0 :                 let token_cloned = token.clone();
    2695            0 :                 let result = handler(r, token).await;
    2696            0 :                 if token_cloned.is_cancelled() {
    2697            0 :                     // dropguard has executed: we will never turn this result into response.
    2698            0 :                     //
    2699            0 :                     // at least temporarily do {:?} logging; these failures are rare enough but
    2700            0 :                     // could hide difficult errors.
    2701            0 :                     match &result {
    2702            0 :                         Ok(response) => {
    2703            0 :                             let status = response.status();
    2704            0 :                             info!(%status, "Cancelled request finished successfully")
    2705            0 :                         }
    2706            0 :                         Err(e) => error!("Cancelled request finished with an error: {e:?}"),
    2707            0 :                     }
    2708            0 :                 }
    2709            0 :                 // only logging for cancelled panicked request handlers is the tracing_panic_hook,
    2710            0 :                 // which should suffice.
    2711            0 :                 //
    2712            0 :                 // there is still a chance to lose the result due to race between
    2713            0 :                 // returning from here and the actual connection closing happening
    2714            0 :                 // before outer task gets to execute. leaving that up for #5815.
    2715            0 :                 result
    2716            0 :             }
    2717            0 :             .in_current_span(),
    2718            0 :         );
    2719            0 : 
    2720            0 :         match handle.await {
    2721            0 :             // TODO: never actually return Err from here, always Ok(...) so that we can log
    2722            0 :             // spanned errors. Call api_error_handler instead and return appropriate Body.
    2723            0 :             Ok(result) => result,
    2724            0 :             Err(e) => {
    2725            0 :                 // The handler task panicked. We have a global panic handler that logs the
    2726            0 :                 // panic with its backtrace, so no need to log that here. Only log a brief
    2727            0 :                 // message to make it clear that we returned the error to the client.
    2728            0 :                 error!("HTTP request handler task panicked: {e:#}");
    2729            0 : 
    2730            0 :                 // Don't return an Error here, because then fallback error handler that was
    2731            0 :                 // installed in make_router() will print the error. Instead, construct the
    2732            0 :                 // HTTP error response and return that.
    2733            0 :                 Ok(
    2734            0 :                     ApiError::InternalServerError(anyhow!("HTTP request handler task panicked"))
    2735            0 :                         .into_response(),
    2736            0 :                 )
    2737            0 :             }
    2738            0 :         }
    2739            0 :     })
    2740            0 :     .await;
    2741              : 
    2742            0 :     cancel_guard.disarm();
    2743            0 : 
    2744            0 :     result
    2745            0 : }
    2746              : 
    2747              : /// Like api_handler, but returns an error response if the server is built without
    2748              : /// the 'testing' feature.
    2749            0 : async fn testing_api_handler<R, H>(
    2750            0 :     desc: &str,
    2751            0 :     request: Request<Body>,
    2752            0 :     handler: H,
    2753            0 : ) -> Result<Response<Body>, ApiError>
    2754            0 : where
    2755            0 :     R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
    2756            0 :     H: FnOnce(Request<Body>, CancellationToken) -> R + Send + Sync + 'static,
    2757            0 : {
    2758            0 :     if cfg!(feature = "testing") {
    2759            0 :         api_handler(request, handler).await
    2760              :     } else {
    2761            0 :         std::future::ready(Err(ApiError::BadRequest(anyhow!(
    2762            0 :             "Cannot {desc} because pageserver was compiled without testing APIs",
    2763            0 :         ))))
    2764            0 :         .await
    2765              :     }
    2766            0 : }
    2767              : 
    2768            0 : pub fn make_router(
    2769            0 :     state: Arc<State>,
    2770            0 :     launch_ts: &'static LaunchTimestamp,
    2771            0 :     auth: Option<Arc<SwappableJwtAuth>>,
    2772            0 : ) -> anyhow::Result<RouterBuilder<hyper::Body, ApiError>> {
    2773            0 :     let spec = include_bytes!("openapi_spec.yml");
    2774            0 :     let mut router = attach_openapi_ui(endpoint::make_router(), spec, "/swagger.yml", "/v1/doc");
    2775            0 :     if auth.is_some() {
    2776            0 :         router = router.middleware(auth_middleware(|request| {
    2777            0 :             let state = get_state(request);
    2778            0 :             if state.allowlist_routes.contains(request.uri()) {
    2779            0 :                 None
    2780              :             } else {
    2781            0 :                 state.auth.as_deref()
    2782              :             }
    2783            0 :         }))
    2784            0 :     }
    2785              : 
    2786            0 :     router = router.middleware(
    2787            0 :         endpoint::add_response_header_middleware(
    2788            0 :             "PAGESERVER_LAUNCH_TIMESTAMP",
    2789            0 :             &launch_ts.to_string(),
    2790            0 :         )
    2791            0 :         .expect("construct launch timestamp header middleware"),
    2792            0 :     );
    2793            0 : 
    2794            0 :     Ok(router
    2795            0 :         .data(state)
    2796            0 :         .get("/metrics", |r| request_span(r, prometheus_metrics_handler))
    2797            0 :         .get("/v1/status", |r| api_handler(r, status_handler))
    2798            0 :         .put("/v1/failpoints", |r| {
    2799            0 :             testing_api_handler("manage failpoints", r, failpoints_handler)
    2800            0 :         })
    2801            0 :         .post("/v1/reload_auth_validation_keys", |r| {
    2802            0 :             api_handler(r, reload_auth_validation_keys_handler)
    2803            0 :         })
    2804            0 :         .get("/v1/tenant", |r| api_handler(r, tenant_list_handler))
    2805            0 :         .get("/v1/tenant/:tenant_shard_id", |r| {
    2806            0 :             api_handler(r, tenant_status)
    2807            0 :         })
    2808            0 :         .delete("/v1/tenant/:tenant_shard_id", |r| {
    2809            0 :             api_handler(r, tenant_delete_handler)
    2810            0 :         })
    2811            0 :         .get("/v1/tenant/:tenant_shard_id/synthetic_size", |r| {
    2812            0 :             api_handler(r, tenant_size_handler)
    2813            0 :         })
    2814            0 :         .put("/v1/tenant/config", |r| {
    2815            0 :             api_handler(r, update_tenant_config_handler)
    2816            0 :         })
    2817            0 :         .put("/v1/tenant/:tenant_shard_id/shard_split", |r| {
    2818            0 :             api_handler(r, tenant_shard_split_handler)
    2819            0 :         })
    2820            0 :         .get("/v1/tenant/:tenant_shard_id/config", |r| {
    2821            0 :             api_handler(r, get_tenant_config_handler)
    2822            0 :         })
    2823            0 :         .put("/v1/tenant/:tenant_shard_id/location_config", |r| {
    2824            0 :             api_handler(r, put_tenant_location_config_handler)
    2825            0 :         })
    2826            0 :         .get("/v1/location_config", |r| {
    2827            0 :             api_handler(r, list_location_config_handler)
    2828            0 :         })
    2829            0 :         .get("/v1/location_config/:tenant_shard_id", |r| {
    2830            0 :             api_handler(r, get_location_config_handler)
    2831            0 :         })
    2832            0 :         .put(
    2833            0 :             "/v1/tenant/:tenant_shard_id/time_travel_remote_storage",
    2834            0 :             |r| api_handler(r, tenant_time_travel_remote_storage_handler),
    2835            0 :         )
    2836            0 :         .get("/v1/tenant/:tenant_shard_id/timeline", |r| {
    2837            0 :             api_handler(r, timeline_list_handler)
    2838            0 :         })
    2839            0 :         .post("/v1/tenant/:tenant_shard_id/timeline", |r| {
    2840            0 :             api_handler(r, timeline_create_handler)
    2841            0 :         })
    2842            0 :         .post("/v1/tenant/:tenant_shard_id/reset", |r| {
    2843            0 :             api_handler(r, tenant_reset_handler)
    2844            0 :         })
    2845            0 :         .post(
    2846            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/preserve_initdb_archive",
    2847            0 :             |r| api_handler(r, timeline_preserve_initdb_handler),
    2848            0 :         )
    2849            0 :         .post(
    2850            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/archival_config",
    2851            0 :             |r| api_handler(r, timeline_archival_config_handler),
    2852            0 :         )
    2853            0 :         .get("/v1/tenant/:tenant_shard_id/timeline/:timeline_id", |r| {
    2854            0 :             api_handler(r, timeline_detail_handler)
    2855            0 :         })
    2856            0 :         .get(
    2857            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_lsn_by_timestamp",
    2858            0 :             |r| api_handler(r, get_lsn_by_timestamp_handler),
    2859            0 :         )
    2860            0 :         .get(
    2861            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_timestamp_of_lsn",
    2862            0 :             |r| api_handler(r, get_timestamp_of_lsn_handler),
    2863            0 :         )
    2864            0 :         .post(
    2865            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/lsn_lease",
    2866            0 :             |r| api_handler(r, lsn_lease_handler),
    2867            0 :         )
    2868            0 :         .put(
    2869            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/do_gc",
    2870            0 :             |r| api_handler(r, timeline_gc_handler),
    2871            0 :         )
    2872            0 :         .put(
    2873            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
    2874            0 :             |r| testing_api_handler("run timeline compaction", r, timeline_compact_handler),
    2875            0 :         )
    2876            0 :         .put(
    2877            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/checkpoint",
    2878            0 :             |r| testing_api_handler("run timeline checkpoint", r, timeline_checkpoint_handler),
    2879            0 :         )
    2880            0 :         .post(
    2881            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_remote_layers",
    2882            0 :             |r| api_handler(r, timeline_download_remote_layers_handler_post),
    2883            0 :         )
    2884            0 :         .get(
    2885            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_remote_layers",
    2886            0 :             |r| api_handler(r, timeline_download_remote_layers_handler_get),
    2887            0 :         )
    2888            0 :         .put(
    2889            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/detach_ancestor",
    2890            0 :             |r| api_handler(r, timeline_detach_ancestor_handler),
    2891            0 :         )
    2892            0 :         .delete("/v1/tenant/:tenant_shard_id/timeline/:timeline_id", |r| {
    2893            0 :             api_handler(r, timeline_delete_handler)
    2894            0 :         })
    2895            0 :         .get(
    2896            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer",
    2897            0 :             |r| api_handler(r, layer_map_info_handler),
    2898            0 :         )
    2899            0 :         .get(
    2900            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_file_name",
    2901            0 :             |r| api_handler(r, layer_download_handler),
    2902            0 :         )
    2903            0 :         .delete(
    2904            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_file_name",
    2905            0 :             |r| api_handler(r, evict_timeline_layer_handler),
    2906            0 :         )
    2907            0 :         .post("/v1/tenant/:tenant_shard_id/heatmap_upload", |r| {
    2908            0 :             api_handler(r, secondary_upload_handler)
    2909            0 :         })
    2910            0 :         .get("/v1/tenant/:tenant_id/scan_remote_storage", |r| {
    2911            0 :             api_handler(r, tenant_scan_remote_handler)
    2912            0 :         })
    2913            0 :         .put("/v1/disk_usage_eviction/run", |r| {
    2914            0 :             api_handler(r, disk_usage_eviction_run)
    2915            0 :         })
    2916            0 :         .put("/v1/deletion_queue/flush", |r| {
    2917            0 :             api_handler(r, deletion_queue_flush)
    2918            0 :         })
    2919            0 :         .get("/v1/tenant/:tenant_shard_id/secondary/status", |r| {
    2920            0 :             api_handler(r, secondary_status_handler)
    2921            0 :         })
    2922            0 :         .post("/v1/tenant/:tenant_shard_id/secondary/download", |r| {
    2923            0 :             api_handler(r, secondary_download_handler)
    2924            0 :         })
    2925            0 :         .put("/v1/tenant/:tenant_shard_id/break", |r| {
    2926            0 :             testing_api_handler("set tenant state to broken", r, handle_tenant_break)
    2927            0 :         })
    2928            0 :         .get("/v1/panic", |r| api_handler(r, always_panic_handler))
    2929            0 :         .post("/v1/tracing/event", |r| {
    2930            0 :             testing_api_handler("emit a tracing event", r, post_tracing_event_handler)
    2931            0 :         })
    2932            0 :         .get(
    2933            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/getpage",
    2934            0 :             |r| testing_api_handler("getpage@lsn", r, getpage_at_lsn_handler),
    2935            0 :         )
    2936            0 :         .get(
    2937            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/keyspace",
    2938            0 :             |r| api_handler(r, timeline_collect_keyspace),
    2939            0 :         )
    2940            0 :         .put("/v1/io_engine", |r| api_handler(r, put_io_engine_handler))
    2941            0 :         .put(
    2942            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/force_aux_policy_switch",
    2943            0 :             |r| api_handler(r, force_aux_policy_switch_handler),
    2944            0 :         )
    2945            0 :         .get("/v1/utilization", |r| api_handler(r, get_utilization))
    2946            0 :         .post(
    2947            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/ingest_aux_files",
    2948            0 :             |r| testing_api_handler("ingest_aux_files", r, ingest_aux_files),
    2949            0 :         )
    2950            0 :         .post(
    2951            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/list_aux_files",
    2952            0 :             |r| testing_api_handler("list_aux_files", r, list_aux_files),
    2953            0 :         )
    2954            0 :         .post("/v1/top_tenants", |r| api_handler(r, post_top_tenants))
    2955            0 :         .post(
    2956            0 :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/perf_info",
    2957            0 :             |r| testing_api_handler("perf_info", r, perf_info),
    2958            0 :         )
    2959            0 :         .put(
    2960            0 :             "/v1/tenant/:tenant_id/timeline/:timeline_id/import_basebackup",
    2961            0 :             |r| api_handler(r, put_tenant_timeline_import_basebackup),
    2962            0 :         )
    2963            0 :         .put(
    2964            0 :             "/v1/tenant/:tenant_id/timeline/:timeline_id/import_wal",
    2965            0 :             |r| api_handler(r, put_tenant_timeline_import_wal),
    2966            0 :         )
    2967            0 :         .any(handler_404))
    2968            0 : }
        

Generated by: LCOV version 2.1-beta