LCOV - code coverage report
Current view: top level - pageserver/src/http - routes.rs (source / functions) Coverage Total Hit
Test: ac1e0b9bf1b4ead74961174b01ba016322d3f9a6.info Lines: 0.0 % 2749 0
Test Date: 2025-07-08 09:16:10 Functions: 0.0 % 901 0

            Line data    Source code
       1              : //!
       2              : //! Management HTTP API
       3              : //!
       4              : use std::cmp::Reverse;
       5              : use std::collections::{BinaryHeap, HashMap};
       6              : use std::str::FromStr;
       7              : use std::sync::Arc;
       8              : use std::time::Duration;
       9              : 
      10              : use anyhow::{Context, Result, anyhow};
      11              : use enumset::EnumSet;
      12              : use futures::future::join_all;
      13              : use futures::{StreamExt, TryFutureExt};
      14              : use http_utils::endpoint::{
      15              :     self, attach_openapi_ui, auth_middleware, check_permission_with, profile_cpu_handler,
      16              :     profile_heap_handler, prometheus_metrics_handler, request_span,
      17              : };
      18              : use http_utils::error::{ApiError, HttpErrorBody};
      19              : use http_utils::failpoints::failpoints_handler;
      20              : use http_utils::json::{json_request, json_request_maybe, json_response};
      21              : use http_utils::request::{
      22              :     get_request_param, must_get_query_param, must_parse_query_param, parse_query_param,
      23              :     parse_request_param,
      24              : };
      25              : use http_utils::{RequestExt, RouterBuilder};
      26              : use humantime::format_rfc3339;
      27              : use hyper::{Body, Request, Response, StatusCode, Uri, header};
      28              : use metrics::launch_timestamp::LaunchTimestamp;
      29              : use pageserver_api::models::virtual_file::IoMode;
      30              : use pageserver_api::models::{
      31              :     DetachBehavior, DownloadRemoteLayersTaskSpawnRequest, IngestAuxFilesRequest,
      32              :     ListAuxFilesRequest, LocationConfig, LocationConfigListResponse, LocationConfigMode, LsnLease,
      33              :     LsnLeaseRequest, OffloadedTimelineInfo, PageTraceEvent, ShardParameters, StatusResponse,
      34              :     TenantConfigPatchRequest, TenantConfigRequest, TenantDetails, TenantInfo,
      35              :     TenantLocationConfigRequest, TenantLocationConfigResponse, TenantScanRemoteStorageResponse,
      36              :     TenantScanRemoteStorageShard, TenantShardLocation, TenantShardSplitRequest,
      37              :     TenantShardSplitResponse, TenantSorting, TenantState, TenantWaitLsnRequest,
      38              :     TimelineArchivalConfigRequest, TimelineCreateRequest, TimelineCreateRequestMode,
      39              :     TimelineCreateRequestModeImportPgdata, TimelineGcRequest, TimelineInfo,
      40              :     TimelinePatchIndexPartRequest, TimelineVisibilityState, TimelinesInfoAndOffloaded,
      41              :     TopTenantShardItem, TopTenantShardsRequest, TopTenantShardsResponse,
      42              : };
      43              : use pageserver_api::shard::{ShardCount, TenantShardId};
      44              : use postgres_ffi::PgMajorVersion;
      45              : use remote_storage::{DownloadError, GenericRemoteStorage, TimeTravelError};
      46              : use scopeguard::defer;
      47              : use serde_json::json;
      48              : use tenant_size_model::svg::SvgBranchKind;
      49              : use tenant_size_model::{SizeResult, StorageModel};
      50              : use tokio::time::Instant;
      51              : use tokio_util::io::StreamReader;
      52              : use tokio_util::sync::CancellationToken;
      53              : use tracing::*;
      54              : use utils::auth::SwappableJwtAuth;
      55              : use utils::generation::Generation;
      56              : use utils::id::{TenantId, TimelineId};
      57              : use utils::lsn::Lsn;
      58              : 
      59              : use crate::config::PageServerConf;
      60              : use crate::context;
      61              : use crate::context::{DownloadBehavior, RequestContext, RequestContextBuilder};
      62              : use crate::deletion_queue::DeletionQueueClient;
      63              : use crate::feature_resolver::FeatureResolver;
      64              : use crate::pgdatadir_mapping::LsnForTimestamp;
      65              : use crate::task_mgr::TaskKind;
      66              : use crate::tenant::config::LocationConf;
      67              : use crate::tenant::mgr::{
      68              :     GetActiveTenantError, GetTenantError, TenantManager, TenantMapError, TenantMapInsertError,
      69              :     TenantSlot, TenantSlotError, TenantSlotUpsertError, TenantStateError, UpsertLocationError,
      70              : };
      71              : use crate::tenant::remote_timeline_client::index::GcCompactionState;
      72              : use crate::tenant::remote_timeline_client::{
      73              :     download_index_part, download_tenant_manifest, list_remote_tenant_shards, list_remote_timelines,
      74              : };
      75              : use crate::tenant::secondary::SecondaryController;
      76              : use crate::tenant::size::ModelInputs;
      77              : use crate::tenant::storage_layer::{IoConcurrency, LayerAccessStatsReset, LayerName};
      78              : use crate::tenant::timeline::layer_manager::LayerManagerLockHolder;
      79              : use crate::tenant::timeline::offload::{OffloadError, offload_timeline};
      80              : use crate::tenant::timeline::{
      81              :     CompactFlags, CompactOptions, CompactRequest, CompactionError, MarkInvisibleRequest, Timeline,
      82              :     WaitLsnTimeout, WaitLsnWaiter, import_pgdata,
      83              : };
      84              : use crate::tenant::{
      85              :     GetTimelineError, LogicalSizeCalculationCause, OffloadedTimeline, PageReconstructError,
      86              :     remote_timeline_client,
      87              : };
      88              : use crate::{DEFAULT_PG_VERSION, disk_usage_eviction_task, tenant};
      89              : 
      90              : // For APIs that require an Active tenant, how long should we block waiting for that state?
      91              : // This is not functionally necessary (clients will retry), but avoids generating a lot of
      92              : // failed API calls while tenants are activating.
      93              : #[cfg(not(feature = "testing"))]
      94              : pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(5000);
      95              : 
      96              : // Tests run on slow/oversubscribed nodes, and may need to wait much longer for tenants to
      97              : // finish attaching, if calls to remote storage are slow.
      98              : #[cfg(feature = "testing")]
      99              : pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);
     100              : 
     101              : pub struct State {
     102              :     conf: &'static PageServerConf,
     103              :     tenant_manager: Arc<TenantManager>,
     104              :     auth: Option<Arc<SwappableJwtAuth>>,
     105              :     allowlist_routes: &'static [&'static str],
     106              :     remote_storage: GenericRemoteStorage,
     107              :     broker_client: storage_broker::BrokerClientChannel,
     108              :     disk_usage_eviction_state: Arc<disk_usage_eviction_task::State>,
     109              :     deletion_queue_client: DeletionQueueClient,
     110              :     secondary_controller: SecondaryController,
     111              :     latest_utilization: tokio::sync::Mutex<Option<(std::time::Instant, bytes::Bytes)>>,
     112              :     feature_resolver: FeatureResolver,
     113              : }
     114              : 
     115              : impl State {
     116              :     #[allow(clippy::too_many_arguments)]
     117            0 :     pub fn new(
     118            0 :         conf: &'static PageServerConf,
     119            0 :         tenant_manager: Arc<TenantManager>,
     120            0 :         auth: Option<Arc<SwappableJwtAuth>>,
     121            0 :         remote_storage: GenericRemoteStorage,
     122            0 :         broker_client: storage_broker::BrokerClientChannel,
     123            0 :         disk_usage_eviction_state: Arc<disk_usage_eviction_task::State>,
     124            0 :         deletion_queue_client: DeletionQueueClient,
     125            0 :         secondary_controller: SecondaryController,
     126            0 :         feature_resolver: FeatureResolver,
     127            0 :     ) -> anyhow::Result<Self> {
     128            0 :         let allowlist_routes = &[
     129            0 :             "/v1/status",
     130            0 :             "/v1/doc",
     131            0 :             "/swagger.yml",
     132            0 :             "/metrics",
     133            0 :             "/profile/cpu",
     134            0 :             "/profile/heap",
     135            0 :         ];
     136            0 :         Ok(Self {
     137            0 :             conf,
     138            0 :             tenant_manager,
     139            0 :             auth,
     140            0 :             allowlist_routes,
     141            0 :             remote_storage,
     142            0 :             broker_client,
     143            0 :             disk_usage_eviction_state,
     144            0 :             deletion_queue_client,
     145            0 :             secondary_controller,
     146            0 :             latest_utilization: Default::default(),
     147            0 :             feature_resolver,
     148            0 :         })
     149            0 :     }
     150              : }
     151              : 
     152              : #[inline(always)]
     153            0 : fn get_state(request: &Request<Body>) -> &State {
     154            0 :     request
     155            0 :         .data::<Arc<State>>()
     156            0 :         .expect("unknown state type")
     157            0 :         .as_ref()
     158            0 : }
     159              : 
     160              : #[inline(always)]
     161            0 : fn get_config(request: &Request<Body>) -> &'static PageServerConf {
     162            0 :     get_state(request).conf
     163            0 : }
     164              : 
     165              : /// Check that the requester is authorized to operate on given tenant
     166            0 : fn check_permission(request: &Request<Body>, tenant_id: Option<TenantId>) -> Result<(), ApiError> {
     167            0 :     check_permission_with(request, |claims| {
     168            0 :         crate::auth::check_permission(claims, tenant_id)
     169            0 :     })
     170            0 : }
     171              : 
     172              : impl From<PageReconstructError> for ApiError {
     173            0 :     fn from(pre: PageReconstructError) -> ApiError {
     174            0 :         match pre {
     175            0 :             PageReconstructError::Other(other) => ApiError::InternalServerError(other),
     176            0 :             PageReconstructError::MissingKey(e) => ApiError::InternalServerError(e.into()),
     177            0 :             PageReconstructError::Cancelled => ApiError::Cancelled,
     178            0 :             PageReconstructError::AncestorLsnTimeout(e) => ApiError::Timeout(format!("{e}").into()),
     179            0 :             PageReconstructError::WalRedo(pre) => ApiError::InternalServerError(pre),
     180              :         }
     181            0 :     }
     182              : }
     183              : 
     184              : impl From<TenantMapInsertError> for ApiError {
     185            0 :     fn from(tmie: TenantMapInsertError) -> ApiError {
     186            0 :         match tmie {
     187            0 :             TenantMapInsertError::SlotError(e) => e.into(),
     188            0 :             TenantMapInsertError::SlotUpsertError(e) => e.into(),
     189            0 :             TenantMapInsertError::Other(e) => ApiError::InternalServerError(e),
     190              :         }
     191            0 :     }
     192              : }
     193              : 
     194              : impl From<TenantSlotError> for ApiError {
     195            0 :     fn from(e: TenantSlotError) -> ApiError {
     196              :         use TenantSlotError::*;
     197            0 :         match e {
     198            0 :             NotFound(tenant_id) => {
     199            0 :                 ApiError::NotFound(anyhow::anyhow!("NotFound: tenant {tenant_id}").into())
     200              :             }
     201              :             InProgress => {
     202            0 :                 ApiError::ResourceUnavailable("Tenant is being modified concurrently".into())
     203              :             }
     204            0 :             MapState(e) => e.into(),
     205              :         }
     206            0 :     }
     207              : }
     208              : 
     209              : impl From<TenantSlotUpsertError> for ApiError {
     210            0 :     fn from(e: TenantSlotUpsertError) -> ApiError {
     211              :         use TenantSlotUpsertError::*;
     212            0 :         match e {
     213            0 :             InternalError(e) => ApiError::InternalServerError(anyhow::anyhow!("{e}")),
     214            0 :             MapState(e) => e.into(),
     215            0 :             ShuttingDown(_) => ApiError::ShuttingDown,
     216              :         }
     217            0 :     }
     218              : }
     219              : 
     220              : impl From<UpsertLocationError> for ApiError {
     221            0 :     fn from(e: UpsertLocationError) -> ApiError {
     222              :         use UpsertLocationError::*;
     223            0 :         match e {
     224            0 :             BadRequest(e) => ApiError::BadRequest(e),
     225            0 :             Unavailable(_) => ApiError::ShuttingDown,
     226            0 :             e @ InProgress => ApiError::Conflict(format!("{e}")),
     227            0 :             Flush(e) | InternalError(e) => ApiError::InternalServerError(e),
     228              :         }
     229            0 :     }
     230              : }
     231              : 
     232              : impl From<TenantMapError> for ApiError {
     233            0 :     fn from(e: TenantMapError) -> ApiError {
     234              :         use TenantMapError::*;
     235            0 :         match e {
     236              :             StillInitializing | ShuttingDown => {
     237            0 :                 ApiError::ResourceUnavailable(format!("{e}").into())
     238              :             }
     239              :         }
     240            0 :     }
     241              : }
     242              : 
     243              : impl From<TenantStateError> for ApiError {
     244            0 :     fn from(tse: TenantStateError) -> ApiError {
     245            0 :         match tse {
     246              :             TenantStateError::IsStopping(_) => {
     247            0 :                 ApiError::ResourceUnavailable("Tenant is stopping".into())
     248              :             }
     249            0 :             TenantStateError::SlotError(e) => e.into(),
     250            0 :             TenantStateError::SlotUpsertError(e) => e.into(),
     251            0 :             TenantStateError::Other(e) => ApiError::InternalServerError(anyhow!(e)),
     252              :         }
     253            0 :     }
     254              : }
     255              : 
     256              : impl From<GetTenantError> for ApiError {
     257            0 :     fn from(tse: GetTenantError) -> ApiError {
     258            0 :         match tse {
     259            0 :             GetTenantError::NotFound(tid) => ApiError::NotFound(anyhow!("tenant {tid}").into()),
     260            0 :             GetTenantError::ShardNotFound(tid) => {
     261            0 :                 ApiError::NotFound(anyhow!("tenant {tid}").into())
     262              :             }
     263              :             GetTenantError::NotActive(_) => {
     264              :                 // Why is this not `ApiError::NotFound`?
     265              :                 // Because we must be careful to never return 404 for a tenant if it does
     266              :                 // in fact exist locally. If we did, the caller could draw the conclusion
     267              :                 // that it can attach the tenant to another PS and we'd be in split-brain.
     268            0 :                 ApiError::ResourceUnavailable("Tenant not yet active".into())
     269              :             }
     270            0 :             GetTenantError::MapState(e) => ApiError::ResourceUnavailable(format!("{e}").into()),
     271              :         }
     272            0 :     }
     273              : }
     274              : 
     275              : impl From<GetTimelineError> for ApiError {
     276            0 :     fn from(gte: GetTimelineError) -> Self {
     277              :         // Rationale: tenant is activated only after eligble timelines activate
     278            0 :         ApiError::NotFound(gte.into())
     279            0 :     }
     280              : }
     281              : 
     282              : impl From<GetActiveTenantError> for ApiError {
     283            0 :     fn from(e: GetActiveTenantError) -> ApiError {
     284            0 :         match e {
     285            0 :             GetActiveTenantError::Broken(reason) => {
     286            0 :                 ApiError::InternalServerError(anyhow!("tenant is broken: {}", reason))
     287              :             }
     288              :             GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => {
     289            0 :                 ApiError::ShuttingDown
     290              :             }
     291            0 :             GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{e}")),
     292            0 :             GetActiveTenantError::Cancelled => ApiError::ShuttingDown,
     293            0 :             GetActiveTenantError::NotFound(gte) => gte.into(),
     294              :             GetActiveTenantError::WaitForActiveTimeout { .. } => {
     295            0 :                 ApiError::ResourceUnavailable(format!("{e}").into())
     296              :             }
     297              :             GetActiveTenantError::SwitchedTenant => {
     298              :                 // in our HTTP handlers, this error doesn't happen
     299              :                 // TODO: separate error types
     300            0 :                 ApiError::ResourceUnavailable("switched tenant".into())
     301              :             }
     302              :         }
     303            0 :     }
     304              : }
     305              : 
     306              : impl From<crate::tenant::DeleteTimelineError> for ApiError {
     307            0 :     fn from(value: crate::tenant::DeleteTimelineError) -> Self {
     308              :         use crate::tenant::DeleteTimelineError::*;
     309            0 :         match value {
     310            0 :             NotFound => ApiError::NotFound(anyhow::anyhow!("timeline not found").into()),
     311            0 :             HasChildren(children) => ApiError::PreconditionFailed(
     312            0 :                 format!("Cannot delete timeline which has child timelines: {children:?}")
     313            0 :                     .into_boxed_str(),
     314            0 :             ),
     315            0 :             a @ AlreadyInProgress(_) => ApiError::Conflict(a.to_string()),
     316            0 :             Cancelled => ApiError::ResourceUnavailable("shutting down".into()),
     317            0 :             Other(e) => ApiError::InternalServerError(e),
     318              :         }
     319            0 :     }
     320              : }
     321              : 
     322              : impl From<crate::tenant::TimelineArchivalError> for ApiError {
     323            0 :     fn from(value: crate::tenant::TimelineArchivalError) -> Self {
     324              :         use crate::tenant::TimelineArchivalError::*;
     325            0 :         match value {
     326            0 :             NotFound => ApiError::NotFound(anyhow::anyhow!("timeline not found").into()),
     327            0 :             Timeout => ApiError::Timeout("hit pageserver internal timeout".into()),
     328            0 :             Cancelled => ApiError::ShuttingDown,
     329            0 :             e @ HasArchivedParent(_) => {
     330            0 :                 ApiError::PreconditionFailed(e.to_string().into_boxed_str())
     331              :             }
     332            0 :             HasUnarchivedChildren(children) => ApiError::PreconditionFailed(
     333            0 :                 format!(
     334            0 :                     "Cannot archive timeline which has non-archived child timelines: {children:?}"
     335            0 :                 )
     336            0 :                 .into_boxed_str(),
     337            0 :             ),
     338            0 :             a @ AlreadyInProgress => ApiError::Conflict(a.to_string()),
     339            0 :             Other(e) => ApiError::InternalServerError(e),
     340              :         }
     341            0 :     }
     342              : }
     343              : 
     344              : impl From<crate::tenant::mgr::DeleteTimelineError> for ApiError {
     345            0 :     fn from(value: crate::tenant::mgr::DeleteTimelineError) -> Self {
     346              :         use crate::tenant::mgr::DeleteTimelineError::*;
     347            0 :         match value {
     348              :             // Report Precondition failed so client can distinguish between
     349              :             // "tenant is missing" case from "timeline is missing"
     350            0 :             Tenant(GetTenantError::NotFound(..)) => ApiError::PreconditionFailed(
     351            0 :                 "Requested tenant is missing".to_owned().into_boxed_str(),
     352            0 :             ),
     353            0 :             Tenant(t) => ApiError::from(t),
     354            0 :             Timeline(t) => ApiError::from(t),
     355              :         }
     356            0 :     }
     357              : }
     358              : 
     359              : impl From<crate::tenant::mgr::DeleteTenantError> for ApiError {
     360            0 :     fn from(value: crate::tenant::mgr::DeleteTenantError) -> Self {
     361              :         use crate::tenant::mgr::DeleteTenantError::*;
     362            0 :         match value {
     363            0 :             SlotError(e) => e.into(),
     364            0 :             Other(o) => ApiError::InternalServerError(o),
     365            0 :             Cancelled => ApiError::ShuttingDown,
     366              :         }
     367            0 :     }
     368              : }
     369              : 
     370              : impl From<crate::tenant::secondary::SecondaryTenantError> for ApiError {
     371            0 :     fn from(ste: crate::tenant::secondary::SecondaryTenantError) -> ApiError {
     372              :         use crate::tenant::secondary::SecondaryTenantError;
     373            0 :         match ste {
     374            0 :             SecondaryTenantError::GetTenant(gte) => gte.into(),
     375            0 :             SecondaryTenantError::ShuttingDown => ApiError::ShuttingDown,
     376              :         }
     377            0 :     }
     378              : }
     379              : 
     380              : impl From<crate::tenant::FinalizeTimelineImportError> for ApiError {
     381            0 :     fn from(err: crate::tenant::FinalizeTimelineImportError) -> ApiError {
     382              :         use crate::tenant::FinalizeTimelineImportError::*;
     383            0 :         match err {
     384              :             ImportTaskStillRunning => {
     385            0 :                 ApiError::ResourceUnavailable("Import task still running".into())
     386              :             }
     387            0 :             ShuttingDown => ApiError::ShuttingDown,
     388              :         }
     389            0 :     }
     390              : }
     391              : 
     392              : // Helper function to construct a TimelineInfo struct for a timeline
     393            0 : async fn build_timeline_info(
     394            0 :     timeline: &Arc<Timeline>,
     395            0 :     include_non_incremental_logical_size: bool,
     396            0 :     force_await_initial_logical_size: bool,
     397            0 :     ctx: &RequestContext,
     398            0 : ) -> anyhow::Result<TimelineInfo> {
     399            0 :     crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id();
     400              : 
     401            0 :     if force_await_initial_logical_size {
     402            0 :         timeline.clone().await_initial_logical_size().await
     403            0 :     }
     404              : 
     405            0 :     let mut info = build_timeline_info_common(
     406            0 :         timeline,
     407            0 :         ctx,
     408            0 :         tenant::timeline::GetLogicalSizePriority::Background,
     409            0 :     )
     410            0 :     .await?;
     411            0 :     if include_non_incremental_logical_size {
     412              :         // XXX we should be using spawn_ondemand_logical_size_calculation here.
     413              :         // Otherwise, if someone deletes the timeline / detaches the tenant while
     414              :         // we're executing this function, we will outlive the timeline on-disk state.
     415              :         info.current_logical_size_non_incremental = Some(
     416            0 :             timeline
     417            0 :                 .get_current_logical_size_non_incremental(info.last_record_lsn, ctx)
     418            0 :                 .await?,
     419              :         );
     420            0 :     }
     421            0 :     Ok(info)
     422            0 : }
     423              : 
     424            0 : async fn build_timeline_info_common(
     425            0 :     timeline: &Arc<Timeline>,
     426            0 :     ctx: &RequestContext,
     427            0 :     logical_size_task_priority: tenant::timeline::GetLogicalSizePriority,
     428            0 : ) -> anyhow::Result<TimelineInfo> {
     429            0 :     crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id();
     430            0 :     let initdb_lsn = timeline.initdb_lsn;
     431            0 :     let last_record_lsn = timeline.get_last_record_lsn();
     432            0 :     let (wal_source_connstr, last_received_msg_lsn, last_received_msg_ts) = {
     433            0 :         let guard = timeline.last_received_wal.lock().unwrap();
     434            0 :         if let Some(info) = guard.as_ref() {
     435            0 :             (
     436            0 :                 Some(format!("{}", info.wal_source_connconf)), // Password is hidden, but it's for statistics only.
     437            0 :                 Some(info.last_received_msg_lsn),
     438            0 :                 Some(info.last_received_msg_ts),
     439            0 :             )
     440              :         } else {
     441            0 :             (None, None, None)
     442              :         }
     443              :     };
     444              : 
     445            0 :     let ancestor_timeline_id = timeline.get_ancestor_timeline_id();
     446            0 :     let ancestor_lsn = match timeline.get_ancestor_lsn() {
     447            0 :         Lsn(0) => None,
     448            0 :         lsn @ Lsn(_) => Some(lsn),
     449              :     };
     450            0 :     let current_logical_size = timeline.get_current_logical_size(logical_size_task_priority, ctx);
     451            0 :     let current_physical_size = Some(timeline.layer_size_sum().await);
     452            0 :     let state = timeline.current_state();
     453              :     // Report is_archived = false if the timeline is still loading
     454            0 :     let is_archived = timeline.is_archived().unwrap_or(false);
     455            0 :     let remote_consistent_lsn_projected = timeline
     456            0 :         .get_remote_consistent_lsn_projected()
     457            0 :         .unwrap_or(Lsn(0));
     458            0 :     let remote_consistent_lsn_visible = timeline
     459            0 :         .get_remote_consistent_lsn_visible()
     460            0 :         .unwrap_or(Lsn(0));
     461            0 :     let is_invisible = timeline.remote_client.is_invisible().unwrap_or(false);
     462              : 
     463            0 :     let walreceiver_status = timeline.walreceiver_status();
     464              : 
     465            0 :     let (pitr_history_size, within_ancestor_pitr) = timeline.get_pitr_history_stats();
     466              : 
     467              :     // Externally, expose the lowest LSN that can be used to create a branch.
     468              :     // Internally we distinguish between the planned GC cutoff (PITR point) and the "applied" GC cutoff (where we
     469              :     // actually trimmed data to), which can pass each other when PITR is changed.
     470            0 :     let min_readable_lsn = std::cmp::max(
     471            0 :         timeline.get_gc_cutoff_lsn().unwrap_or_default(),
     472            0 :         *timeline.get_applied_gc_cutoff_lsn(),
     473              :     );
     474              : 
     475            0 :     let info = TimelineInfo {
     476            0 :         tenant_id: timeline.tenant_shard_id,
     477            0 :         timeline_id: timeline.timeline_id,
     478            0 :         ancestor_timeline_id,
     479            0 :         ancestor_lsn,
     480            0 :         disk_consistent_lsn: timeline.get_disk_consistent_lsn(),
     481            0 :         remote_consistent_lsn: remote_consistent_lsn_projected,
     482            0 :         remote_consistent_lsn_visible,
     483            0 :         initdb_lsn,
     484            0 :         last_record_lsn,
     485            0 :         prev_record_lsn: Some(timeline.get_prev_record_lsn()),
     486            0 :         min_readable_lsn,
     487            0 :         applied_gc_cutoff_lsn: *timeline.get_applied_gc_cutoff_lsn(),
     488            0 :         current_logical_size: current_logical_size.size_dont_care_about_accuracy(),
     489            0 :         current_logical_size_is_accurate: match current_logical_size.accuracy() {
     490            0 :             tenant::timeline::logical_size::Accuracy::Approximate => false,
     491            0 :             tenant::timeline::logical_size::Accuracy::Exact => true,
     492              :         },
     493            0 :         directory_entries_counts: timeline.get_directory_metrics().to_vec(),
     494            0 :         current_physical_size,
     495            0 :         current_logical_size_non_incremental: None,
     496            0 :         pitr_history_size,
     497            0 :         within_ancestor_pitr,
     498            0 :         timeline_dir_layer_file_size_sum: None,
     499            0 :         wal_source_connstr,
     500            0 :         last_received_msg_lsn,
     501            0 :         last_received_msg_ts,
     502            0 :         pg_version: timeline.pg_version,
     503              : 
     504            0 :         state,
     505            0 :         is_archived: Some(is_archived),
     506            0 :         rel_size_migration: Some(timeline.get_rel_size_v2_status()),
     507            0 :         is_invisible: Some(is_invisible),
     508              : 
     509            0 :         walreceiver_status,
     510              :     };
     511            0 :     Ok(info)
     512            0 : }
     513              : 
     514            0 : fn build_timeline_offloaded_info(offloaded: &Arc<OffloadedTimeline>) -> OffloadedTimelineInfo {
     515              :     let &OffloadedTimeline {
     516            0 :         tenant_shard_id,
     517            0 :         timeline_id,
     518            0 :         ancestor_retain_lsn,
     519            0 :         ancestor_timeline_id,
     520            0 :         archived_at,
     521              :         ..
     522            0 :     } = offloaded.as_ref();
     523            0 :     OffloadedTimelineInfo {
     524            0 :         tenant_id: tenant_shard_id,
     525            0 :         timeline_id,
     526            0 :         ancestor_retain_lsn,
     527            0 :         ancestor_timeline_id,
     528            0 :         archived_at: archived_at.and_utc(),
     529            0 :     }
     530            0 : }
     531              : 
     532              : // healthcheck handler
     533            0 : async fn status_handler(
     534            0 :     request: Request<Body>,
     535            0 :     _cancel: CancellationToken,
     536            0 : ) -> Result<Response<Body>, ApiError> {
     537            0 :     check_permission(&request, None)?;
     538            0 :     let config = get_config(&request);
     539            0 :     json_response(StatusCode::OK, StatusResponse { id: config.id })
     540            0 : }
     541              : 
     542            0 : async fn reload_auth_validation_keys_handler(
     543            0 :     request: Request<Body>,
     544            0 :     _cancel: CancellationToken,
     545            0 : ) -> Result<Response<Body>, ApiError> {
     546            0 :     check_permission(&request, None)?;
     547            0 :     let config = get_config(&request);
     548            0 :     let state = get_state(&request);
     549            0 :     let Some(shared_auth) = &state.auth else {
     550            0 :         return json_response(StatusCode::BAD_REQUEST, ());
     551              :     };
     552              :     // unwrap is ok because check is performed when creating config, so path is set and exists
     553            0 :     let key_path = config.auth_validation_public_key_path.as_ref().unwrap();
     554            0 :     info!("Reloading public key(s) for verifying JWT tokens from {key_path:?}");
     555              : 
     556            0 :     match utils::auth::JwtAuth::from_key_path(key_path) {
     557            0 :         Ok(new_auth) => {
     558            0 :             shared_auth.swap(new_auth);
     559            0 :             json_response(StatusCode::OK, ())
     560              :         }
     561            0 :         Err(e) => {
     562            0 :             let err_msg = "Error reloading public keys";
     563            0 :             warn!("Error reloading public keys from {key_path:?}: {e:}");
     564            0 :             json_response(
     565              :                 StatusCode::INTERNAL_SERVER_ERROR,
     566            0 :                 HttpErrorBody::from_msg(err_msg.to_string()),
     567              :             )
     568              :         }
     569              :     }
     570            0 : }
     571              : 
     572            0 : async fn timeline_create_handler(
     573            0 :     mut request: Request<Body>,
     574            0 :     _cancel: CancellationToken,
     575            0 : ) -> Result<Response<Body>, ApiError> {
     576            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     577            0 :     let request_data: TimelineCreateRequest = json_request(&mut request).await?;
     578            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     579              : 
     580            0 :     let new_timeline_id = request_data.new_timeline_id;
     581              :     // fill in the default pg_version if not provided & convert request into domain model
     582            0 :     let params: tenant::CreateTimelineParams = match request_data.mode {
     583              :         TimelineCreateRequestMode::Bootstrap {
     584            0 :             existing_initdb_timeline_id,
     585            0 :             pg_version,
     586            0 :         } => tenant::CreateTimelineParams::Bootstrap(tenant::CreateTimelineParamsBootstrap {
     587            0 :             new_timeline_id,
     588            0 :             existing_initdb_timeline_id,
     589            0 :             pg_version: pg_version.unwrap_or(DEFAULT_PG_VERSION),
     590            0 :         }),
     591              :         TimelineCreateRequestMode::Branch {
     592            0 :             ancestor_timeline_id,
     593            0 :             ancestor_start_lsn,
     594              :             read_only: _,
     595              :             pg_version: _,
     596            0 :         } => tenant::CreateTimelineParams::Branch(tenant::CreateTimelineParamsBranch {
     597            0 :             new_timeline_id,
     598            0 :             ancestor_timeline_id,
     599            0 :             ancestor_start_lsn,
     600            0 :         }),
     601              :         TimelineCreateRequestMode::ImportPgdata {
     602              :             import_pgdata:
     603              :                 TimelineCreateRequestModeImportPgdata {
     604            0 :                     location,
     605            0 :                     idempotency_key,
     606              :                 },
     607              :         } => tenant::CreateTimelineParams::ImportPgdata(tenant::CreateTimelineParamsImportPgdata {
     608            0 :             idempotency_key: import_pgdata::index_part_format::IdempotencyKey::new(
     609            0 :                 idempotency_key.0,
     610              :             ),
     611            0 :             new_timeline_id,
     612              :             location: {
     613              :                 use import_pgdata::index_part_format::Location;
     614              :                 use pageserver_api::models::ImportPgdataLocation;
     615            0 :                 match location {
     616              :                     #[cfg(feature = "testing")]
     617            0 :                     ImportPgdataLocation::LocalFs { path } => Location::LocalFs { path },
     618              :                     ImportPgdataLocation::AwsS3 {
     619            0 :                         region,
     620            0 :                         bucket,
     621            0 :                         key,
     622            0 :                     } => Location::AwsS3 {
     623            0 :                         region,
     624            0 :                         bucket,
     625            0 :                         key,
     626            0 :                     },
     627              :                 }
     628              :             },
     629              :         }),
     630              :     };
     631              : 
     632            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Error);
     633              : 
     634            0 :     let state = get_state(&request);
     635              : 
     636            0 :     async {
     637            0 :         let tenant = state
     638            0 :             .tenant_manager
     639            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     640              : 
     641            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
     642              : 
     643              :         // earlier versions of the code had pg_version and ancestor_lsn in the span
     644              :         // => continue to provide that information, but, through a log message that doesn't require us to destructure
     645            0 :         tracing::info!(?params, "creating timeline");
     646              : 
     647            0 :         match tenant
     648            0 :             .create_timeline(params, state.broker_client.clone(), &ctx)
     649            0 :             .await
     650              :         {
     651            0 :             Ok(new_timeline) => {
     652              :                 // Created. Construct a TimelineInfo for it.
     653            0 :                 let timeline_info = build_timeline_info_common(
     654            0 :                     &new_timeline,
     655            0 :                     &ctx,
     656            0 :                     tenant::timeline::GetLogicalSizePriority::User,
     657            0 :                 )
     658            0 :                 .await
     659            0 :                 .map_err(ApiError::InternalServerError)?;
     660            0 :                 json_response(StatusCode::CREATED, timeline_info)
     661              :             }
     662            0 :             Err(_) if tenant.cancel.is_cancelled() => {
     663              :                 // In case we get some ugly error type during shutdown, cast it into a clean 503.
     664            0 :                 json_response(
     665              :                     StatusCode::SERVICE_UNAVAILABLE,
     666            0 :                     HttpErrorBody::from_msg("Tenant shutting down".to_string()),
     667              :                 )
     668              :             }
     669            0 :             Err(e @ tenant::CreateTimelineError::Conflict) => {
     670            0 :                 json_response(StatusCode::CONFLICT, HttpErrorBody::from_msg(e.to_string()))
     671              :             }
     672            0 :             Err(e @ tenant::CreateTimelineError::AlreadyCreating) => json_response(
     673              :                 StatusCode::TOO_MANY_REQUESTS,
     674            0 :                 HttpErrorBody::from_msg(e.to_string()),
     675              :             ),
     676            0 :             Err(tenant::CreateTimelineError::AncestorLsn(err)) => json_response(
     677              :                 StatusCode::NOT_ACCEPTABLE,
     678            0 :                 HttpErrorBody::from_msg(format!("{err:#}")),
     679              :             ),
     680            0 :             Err(e @ tenant::CreateTimelineError::AncestorNotActive) => json_response(
     681              :                 StatusCode::SERVICE_UNAVAILABLE,
     682            0 :                 HttpErrorBody::from_msg(e.to_string()),
     683              :             ),
     684            0 :             Err(e @ tenant::CreateTimelineError::AncestorArchived) => json_response(
     685              :                 StatusCode::NOT_ACCEPTABLE,
     686            0 :                 HttpErrorBody::from_msg(e.to_string()),
     687              :             ),
     688            0 :             Err(tenant::CreateTimelineError::ShuttingDown) => json_response(
     689              :                 StatusCode::SERVICE_UNAVAILABLE,
     690            0 :                 HttpErrorBody::from_msg("tenant shutting down".to_string()),
     691              :             ),
     692            0 :             Err(tenant::CreateTimelineError::Other(err)) => Err(ApiError::InternalServerError(err)),
     693              :         }
     694            0 :     }
     695            0 :     .instrument(info_span!("timeline_create",
     696              :         tenant_id = %tenant_shard_id.tenant_id,
     697            0 :         shard_id = %tenant_shard_id.shard_slug(),
     698              :         timeline_id = %new_timeline_id,
     699              :     ))
     700            0 :     .await
     701            0 : }
     702              : 
     703            0 : async fn timeline_list_handler(
     704            0 :     request: Request<Body>,
     705            0 :     _cancel: CancellationToken,
     706            0 : ) -> Result<Response<Body>, ApiError> {
     707            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     708            0 :     let include_non_incremental_logical_size: Option<bool> =
     709            0 :         parse_query_param(&request, "include-non-incremental-logical-size")?;
     710            0 :     let force_await_initial_logical_size: Option<bool> =
     711            0 :         parse_query_param(&request, "force-await-initial-logical-size")?;
     712            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     713              : 
     714            0 :     let state = get_state(&request);
     715            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
     716              : 
     717            0 :     let response_data = async {
     718            0 :         let tenant = state
     719            0 :             .tenant_manager
     720            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     721              : 
     722            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
     723              : 
     724            0 :         let timelines = tenant.list_timelines();
     725              : 
     726            0 :         let mut response_data = Vec::with_capacity(timelines.len());
     727            0 :         for timeline in timelines {
     728            0 :             let timeline_info = build_timeline_info(
     729            0 :                 &timeline,
     730            0 :                 include_non_incremental_logical_size.unwrap_or(false),
     731            0 :                 force_await_initial_logical_size.unwrap_or(false),
     732            0 :                 &ctx,
     733              :             )
     734            0 :             .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id))
     735            0 :             .await
     736            0 :             .context("Failed to build timeline info")
     737            0 :             .map_err(ApiError::InternalServerError)?;
     738              : 
     739            0 :             response_data.push(timeline_info);
     740              :         }
     741            0 :         Ok::<Vec<TimelineInfo>, ApiError>(response_data)
     742            0 :     }
     743            0 :     .instrument(info_span!("timeline_list",
     744              :                 tenant_id = %tenant_shard_id.tenant_id,
     745            0 :                 shard_id = %tenant_shard_id.shard_slug()))
     746            0 :     .await?;
     747              : 
     748            0 :     json_response(StatusCode::OK, response_data)
     749            0 : }
     750              : 
     751            0 : async fn timeline_and_offloaded_list_handler(
     752            0 :     request: Request<Body>,
     753            0 :     _cancel: CancellationToken,
     754            0 : ) -> Result<Response<Body>, ApiError> {
     755            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     756            0 :     let include_non_incremental_logical_size: Option<bool> =
     757            0 :         parse_query_param(&request, "include-non-incremental-logical-size")?;
     758            0 :     let force_await_initial_logical_size: Option<bool> =
     759            0 :         parse_query_param(&request, "force-await-initial-logical-size")?;
     760            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     761              : 
     762            0 :     let state = get_state(&request);
     763            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
     764              : 
     765            0 :     let response_data = async {
     766            0 :         let tenant = state
     767            0 :             .tenant_manager
     768            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     769              : 
     770            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
     771              : 
     772            0 :         let (timelines, offloadeds) = tenant.list_timelines_and_offloaded();
     773              : 
     774            0 :         let mut timeline_infos = Vec::with_capacity(timelines.len());
     775            0 :         for timeline in timelines {
     776            0 :             let timeline_info = build_timeline_info(
     777            0 :                 &timeline,
     778            0 :                 include_non_incremental_logical_size.unwrap_or(false),
     779            0 :                 force_await_initial_logical_size.unwrap_or(false),
     780            0 :                 &ctx,
     781              :             )
     782            0 :             .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id))
     783            0 :             .await
     784            0 :             .context("Failed to build timeline info")
     785            0 :             .map_err(ApiError::InternalServerError)?;
     786              : 
     787            0 :             timeline_infos.push(timeline_info);
     788              :         }
     789            0 :         let offloaded_infos = offloadeds
     790            0 :             .into_iter()
     791            0 :             .map(|offloaded| build_timeline_offloaded_info(&offloaded))
     792            0 :             .collect::<Vec<_>>();
     793            0 :         let res = TimelinesInfoAndOffloaded {
     794            0 :             timelines: timeline_infos,
     795            0 :             offloaded: offloaded_infos,
     796            0 :         };
     797            0 :         Ok::<TimelinesInfoAndOffloaded, ApiError>(res)
     798            0 :     }
     799            0 :     .instrument(info_span!("timeline_and_offloaded_list",
     800              :                 tenant_id = %tenant_shard_id.tenant_id,
     801            0 :                 shard_id = %tenant_shard_id.shard_slug()))
     802            0 :     .await?;
     803              : 
     804            0 :     json_response(StatusCode::OK, response_data)
     805            0 : }
     806              : 
     807            0 : async fn timeline_preserve_initdb_handler(
     808            0 :     request: Request<Body>,
     809            0 :     _cancel: CancellationToken,
     810            0 : ) -> Result<Response<Body>, ApiError> {
     811            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     812            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
     813            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     814            0 :     let state = get_state(&request);
     815              : 
     816              :     // Part of the process for disaster recovery from safekeeper-stored WAL:
     817              :     // If we don't recover into a new timeline but want to keep the timeline ID,
     818              :     // then the initdb archive is deleted. This endpoint copies it to a different
     819              :     // location where timeline recreation cand find it.
     820              : 
     821            0 :     async {
     822            0 :         let tenant = state
     823            0 :             .tenant_manager
     824            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     825              : 
     826            0 :         let timeline = tenant.get_timeline(timeline_id, false)?;
     827              : 
     828            0 :         timeline
     829            0 :             .preserve_initdb_archive()
     830            0 :             .await
     831            0 :             .context("preserving initdb archive")
     832            0 :             .map_err(ApiError::InternalServerError)?;
     833              : 
     834            0 :         Ok::<_, ApiError>(())
     835            0 :     }
     836            0 :     .instrument(info_span!("timeline_preserve_initdb_archive",
     837              :                 tenant_id = %tenant_shard_id.tenant_id,
     838            0 :                 shard_id = %tenant_shard_id.shard_slug(),
     839              :                 %timeline_id))
     840            0 :     .await?;
     841              : 
     842            0 :     json_response(StatusCode::OK, ())
     843            0 : }
     844              : 
     845            0 : async fn timeline_archival_config_handler(
     846            0 :     mut request: Request<Body>,
     847            0 :     _cancel: CancellationToken,
     848            0 : ) -> Result<Response<Body>, ApiError> {
     849            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     850            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
     851              : 
     852            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
     853              : 
     854            0 :     let request_data: TimelineArchivalConfigRequest = json_request(&mut request).await?;
     855            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     856            0 :     let state = get_state(&request);
     857              : 
     858            0 :     async {
     859            0 :         let tenant = state
     860            0 :             .tenant_manager
     861            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     862              : 
     863            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
     864              : 
     865            0 :         tenant
     866            0 :             .apply_timeline_archival_config(
     867            0 :                 timeline_id,
     868            0 :                 request_data.state,
     869            0 :                 state.broker_client.clone(),
     870            0 :                 ctx,
     871            0 :             )
     872            0 :             .await?;
     873            0 :         Ok::<_, ApiError>(())
     874            0 :     }
     875            0 :     .instrument(info_span!("timeline_archival_config",
     876              :                 tenant_id = %tenant_shard_id.tenant_id,
     877            0 :                 shard_id = %tenant_shard_id.shard_slug(),
     878              :                 state = ?request_data.state,
     879              :                 %timeline_id))
     880            0 :     .await?;
     881              : 
     882            0 :     json_response(StatusCode::OK, ())
     883            0 : }
     884              : 
     885              : /// This API is used to patch the index part of a timeline. You must ensure such patches are safe to apply. Use this API as an emergency
     886              : /// measure only.
     887              : ///
     888              : /// Some examples of safe patches:
     889              : /// - Increase the gc_cutoff and gc_compaction_cutoff to a larger value in case of a bug that didn't bump the cutoff and cause read errors.
     890              : /// - Force set the index part to use reldir v2 (migrating/migrated).
     891              : ///
     892              : /// Some examples of unsafe patches:
     893              : /// - Force set the index part from v2 to v1 (legacy). This will cause the code path to ignore anything written to the new keyspace and cause
     894              : ///   errors.
     895              : /// - Decrease the gc_cutoff without validating the data really exists. It will cause read errors in the background.
     896            0 : async fn timeline_patch_index_part_handler(
     897            0 :     mut request: Request<Body>,
     898            0 :     _cancel: CancellationToken,
     899            0 : ) -> Result<Response<Body>, ApiError> {
     900            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     901            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
     902              : 
     903            0 :     let request_data: TimelinePatchIndexPartRequest = json_request(&mut request).await?;
     904            0 :     check_permission(&request, None)?; // require global permission for this request
     905            0 :     let state = get_state(&request);
     906              : 
     907            0 :     async {
     908            0 :         let timeline =
     909            0 :             active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
     910            0 :                 .await?;
     911              : 
     912            0 :         if let Some(rel_size_migration) = request_data.rel_size_migration {
     913            0 :             timeline
     914            0 :                 .update_rel_size_v2_status(rel_size_migration)
     915            0 :                 .map_err(ApiError::InternalServerError)?;
     916            0 :         }
     917              : 
     918            0 :         if let Some(gc_compaction_last_completed_lsn) =
     919            0 :             request_data.gc_compaction_last_completed_lsn
     920              :         {
     921            0 :             timeline
     922            0 :                 .update_gc_compaction_state(GcCompactionState {
     923            0 :                     last_completed_lsn: gc_compaction_last_completed_lsn,
     924            0 :                 })
     925            0 :                 .map_err(ApiError::InternalServerError)?;
     926            0 :         }
     927              : 
     928            0 :         if let Some(applied_gc_cutoff_lsn) = request_data.applied_gc_cutoff_lsn {
     929            0 :             {
     930            0 :                 let guard = timeline.applied_gc_cutoff_lsn.lock_for_write();
     931            0 :                 guard.store_and_unlock(applied_gc_cutoff_lsn);
     932            0 :             }
     933            0 :         }
     934              : 
     935            0 :         if request_data.force_index_update {
     936            0 :             timeline
     937            0 :                 .remote_client
     938            0 :                 .force_schedule_index_upload()
     939            0 :                 .context("force schedule index upload")
     940            0 :                 .map_err(ApiError::InternalServerError)?;
     941            0 :         }
     942              : 
     943            0 :         Ok::<_, ApiError>(())
     944            0 :     }
     945            0 :     .instrument(info_span!("timeline_patch_index_part",
     946              :                 tenant_id = %tenant_shard_id.tenant_id,
     947            0 :                 shard_id = %tenant_shard_id.shard_slug(),
     948              :                 %timeline_id))
     949            0 :     .await?;
     950              : 
     951            0 :     json_response(StatusCode::OK, ())
     952            0 : }
     953              : 
     954            0 : async fn timeline_detail_handler(
     955            0 :     request: Request<Body>,
     956            0 :     _cancel: CancellationToken,
     957            0 : ) -> Result<Response<Body>, ApiError> {
     958            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
     959            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
     960            0 :     let include_non_incremental_logical_size: Option<bool> =
     961            0 :         parse_query_param(&request, "include-non-incremental-logical-size")?;
     962            0 :     let force_await_initial_logical_size: Option<bool> =
     963            0 :         parse_query_param(&request, "force-await-initial-logical-size")?;
     964            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
     965              : 
     966              :     // Logical size calculation needs downloading.
     967            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
     968            0 :     let state = get_state(&request);
     969              : 
     970            0 :     let timeline_info = async {
     971            0 :         let tenant = state
     972            0 :             .tenant_manager
     973            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
     974              : 
     975            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
     976              : 
     977            0 :         let timeline = tenant.get_timeline(timeline_id, false)?;
     978            0 :         let ctx = &ctx.with_scope_timeline(&timeline);
     979              : 
     980            0 :         let timeline_info = build_timeline_info(
     981            0 :             &timeline,
     982            0 :             include_non_incremental_logical_size.unwrap_or(false),
     983            0 :             force_await_initial_logical_size.unwrap_or(false),
     984            0 :             ctx,
     985            0 :         )
     986            0 :         .await
     987            0 :         .context("get local timeline info")
     988            0 :         .map_err(ApiError::InternalServerError)?;
     989              : 
     990            0 :         Ok::<_, ApiError>(timeline_info)
     991            0 :     }
     992            0 :     .instrument(info_span!("timeline_detail",
     993              :                 tenant_id = %tenant_shard_id.tenant_id,
     994            0 :                 shard_id = %tenant_shard_id.shard_slug(),
     995              :                 %timeline_id))
     996            0 :     .await?;
     997              : 
     998            0 :     json_response(StatusCode::OK, timeline_info)
     999            0 : }
    1000              : 
    1001            0 : async fn get_lsn_by_timestamp_handler(
    1002            0 :     request: Request<Body>,
    1003            0 :     cancel: CancellationToken,
    1004            0 : ) -> Result<Response<Body>, ApiError> {
    1005            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1006            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1007            0 :     let state = get_state(&request);
    1008              : 
    1009            0 :     if !tenant_shard_id.is_shard_zero() {
    1010              :         // Requires SLRU contents, which are only stored on shard zero
    1011            0 :         return Err(ApiError::BadRequest(anyhow!(
    1012            0 :             "Lsn calculations by timestamp are only available on shard zero"
    1013            0 :         )));
    1014            0 :     }
    1015              : 
    1016            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1017            0 :     let timestamp_raw = must_get_query_param(&request, "timestamp")?;
    1018            0 :     let timestamp = humantime::parse_rfc3339(&timestamp_raw)
    1019            0 :         .with_context(|| format!("Invalid time: {timestamp_raw:?}"))
    1020            0 :         .map_err(ApiError::BadRequest)?;
    1021            0 :     let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp);
    1022              : 
    1023            0 :     let with_lease = parse_query_param(&request, "with_lease")?.unwrap_or(false);
    1024              : 
    1025            0 :     let timeline =
    1026            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1027            0 :             .await?;
    1028            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
    1029            0 :         .with_scope_timeline(&timeline);
    1030            0 :     let result = timeline
    1031            0 :         .find_lsn_for_timestamp(timestamp_pg, &cancel, &ctx)
    1032            0 :         .await?;
    1033              : 
    1034              :     #[derive(serde::Serialize, Debug)]
    1035              :     struct Result {
    1036              :         lsn: Lsn,
    1037              :         kind: &'static str,
    1038              :         #[serde(default)]
    1039              :         #[serde(skip_serializing_if = "Option::is_none")]
    1040              :         #[serde(flatten)]
    1041              :         lease: Option<LsnLease>,
    1042              :     }
    1043            0 :     let (lsn, kind) = match result {
    1044            0 :         LsnForTimestamp::Present(lsn) => (lsn, "present"),
    1045            0 :         LsnForTimestamp::Future(lsn) => (lsn, "future"),
    1046            0 :         LsnForTimestamp::Past(lsn) => (lsn, "past"),
    1047            0 :         LsnForTimestamp::NoData(lsn) => (lsn, "nodata"),
    1048              :     };
    1049              : 
    1050            0 :     let lease = if with_lease {
    1051            0 :         timeline
    1052            0 :             .init_lsn_lease(lsn, timeline.get_lsn_lease_length_for_ts(), &ctx)
    1053            0 :             .inspect_err(|_| {
    1054            0 :                 warn!("fail to grant a lease to {}", lsn);
    1055            0 :             })
    1056            0 :             .ok()
    1057              :     } else {
    1058            0 :         None
    1059              :     };
    1060              : 
    1061            0 :     let result = Result { lsn, kind, lease };
    1062            0 :     let valid_until = result
    1063            0 :         .lease
    1064            0 :         .as_ref()
    1065            0 :         .map(|l| humantime::format_rfc3339_millis(l.valid_until).to_string());
    1066            0 :     tracing::info!(
    1067              :         lsn=?result.lsn,
    1068              :         kind=%result.kind,
    1069              :         timestamp=%timestamp_raw,
    1070              :         valid_until=?valid_until,
    1071            0 :         "lsn_by_timestamp finished"
    1072              :     );
    1073            0 :     json_response(StatusCode::OK, result)
    1074            0 : }
    1075              : 
    1076            0 : async fn get_timestamp_of_lsn_handler(
    1077            0 :     request: Request<Body>,
    1078            0 :     _cancel: CancellationToken,
    1079            0 : ) -> Result<Response<Body>, ApiError> {
    1080            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1081            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1082            0 :     let state = get_state(&request);
    1083              : 
    1084            0 :     if !tenant_shard_id.is_shard_zero() {
    1085              :         // Requires SLRU contents, which are only stored on shard zero
    1086            0 :         return Err(ApiError::BadRequest(anyhow!(
    1087            0 :             "Timestamp calculations by lsn are only available on shard zero"
    1088            0 :         )));
    1089            0 :     }
    1090              : 
    1091            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1092              : 
    1093            0 :     let lsn_str = must_get_query_param(&request, "lsn")?;
    1094            0 :     let lsn = Lsn::from_str(&lsn_str)
    1095            0 :         .with_context(|| format!("Invalid LSN: {lsn_str:?}"))
    1096            0 :         .map_err(ApiError::BadRequest)?;
    1097              : 
    1098            0 :     let timeline =
    1099            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1100            0 :             .await?;
    1101            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
    1102            0 :         .with_scope_timeline(&timeline);
    1103            0 :     let result = timeline.get_timestamp_for_lsn(lsn, &ctx).await?;
    1104              : 
    1105            0 :     match result {
    1106            0 :         Some(time) => {
    1107            0 :             let time = format_rfc3339(
    1108            0 :                 postgres_ffi::try_from_pg_timestamp(time).map_err(ApiError::InternalServerError)?,
    1109              :             )
    1110            0 :             .to_string();
    1111            0 :             json_response(StatusCode::OK, time)
    1112              :         }
    1113            0 :         None => Err(ApiError::PreconditionFailed(
    1114            0 :             format!("Timestamp for lsn {lsn} not found").into(),
    1115            0 :         )),
    1116              :     }
    1117            0 : }
    1118              : 
    1119            0 : async fn timeline_delete_handler(
    1120            0 :     request: Request<Body>,
    1121            0 :     _cancel: CancellationToken,
    1122            0 : ) -> Result<Response<Body>, ApiError> {
    1123            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1124            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1125            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1126              : 
    1127            0 :     let state = get_state(&request);
    1128              : 
    1129            0 :     let tenant = state
    1130            0 :         .tenant_manager
    1131            0 :         .get_attached_tenant_shard(tenant_shard_id)
    1132            0 :         .map_err(|e| {
    1133            0 :             match e {
    1134              :                 // GetTenantError has a built-in conversion to ApiError, but in this context we don't
    1135              :                 // want to treat missing tenants as 404, to avoid ambiguity with successful deletions.
    1136              :                 GetTenantError::NotFound(_) | GetTenantError::ShardNotFound(_) => {
    1137            0 :                     ApiError::PreconditionFailed(
    1138            0 :                         "Requested tenant is missing".to_string().into_boxed_str(),
    1139            0 :                     )
    1140              :                 }
    1141            0 :                 e => e.into(),
    1142              :             }
    1143            0 :         })?;
    1144            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1145            0 :     tenant.delete_timeline(timeline_id).instrument(info_span!("timeline_delete", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id))
    1146            0 :         .await?;
    1147              : 
    1148            0 :     json_response(StatusCode::ACCEPTED, ())
    1149            0 : }
    1150              : 
    1151            0 : async fn tenant_reset_handler(
    1152            0 :     request: Request<Body>,
    1153            0 :     _cancel: CancellationToken,
    1154            0 : ) -> Result<Response<Body>, ApiError> {
    1155            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1156            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1157              : 
    1158            0 :     let drop_cache: Option<bool> = parse_query_param(&request, "drop_cache")?;
    1159              : 
    1160            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
    1161            0 :     let state = get_state(&request);
    1162            0 :     state
    1163            0 :         .tenant_manager
    1164            0 :         .reset_tenant(tenant_shard_id, drop_cache.unwrap_or(false), &ctx)
    1165            0 :         .await
    1166            0 :         .map_err(ApiError::InternalServerError)?;
    1167              : 
    1168            0 :     json_response(StatusCode::OK, ())
    1169            0 : }
    1170              : 
    1171            0 : async fn tenant_list_handler(
    1172            0 :     request: Request<Body>,
    1173            0 :     _cancel: CancellationToken,
    1174            0 : ) -> Result<Response<Body>, ApiError> {
    1175            0 :     check_permission(&request, None)?;
    1176            0 :     let state = get_state(&request);
    1177              : 
    1178            0 :     let response_data = state
    1179            0 :         .tenant_manager
    1180            0 :         .list_tenants()
    1181            0 :         .map_err(|_| {
    1182            0 :             ApiError::ResourceUnavailable("Tenant map is initializing or shutting down".into())
    1183            0 :         })?
    1184            0 :         .iter()
    1185            0 :         .map(|(id, state, gen_)| TenantInfo {
    1186            0 :             id: *id,
    1187            0 :             state: state.clone(),
    1188            0 :             current_physical_size: None,
    1189            0 :             attachment_status: state.attachment_status(),
    1190            0 :             generation: (*gen_)
    1191            0 :                 .into()
    1192            0 :                 .expect("Tenants are always attached with a generation"),
    1193            0 :             gc_blocking: None,
    1194            0 :         })
    1195            0 :         .collect::<Vec<TenantInfo>>();
    1196              : 
    1197            0 :     json_response(StatusCode::OK, response_data)
    1198            0 : }
    1199              : 
    1200            0 : async fn tenant_status(
    1201            0 :     request: Request<Body>,
    1202            0 :     _cancel: CancellationToken,
    1203            0 : ) -> Result<Response<Body>, ApiError> {
    1204            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1205            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1206            0 :     let state = get_state(&request);
    1207              : 
    1208              :     // In tests, sometimes we want to query the state of a tenant without auto-activating it if it's currently waiting.
    1209            0 :     let activate = true;
    1210              :     #[cfg(feature = "testing")]
    1211            0 :     let activate = parse_query_param(&request, "activate")?.unwrap_or(activate);
    1212              : 
    1213            0 :     let tenant_info = async {
    1214            0 :         let tenant = state
    1215            0 :             .tenant_manager
    1216            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
    1217              : 
    1218            0 :         if activate {
    1219              :             // This is advisory: we prefer to let the tenant activate on-demand when this function is
    1220              :             // called, but it is still valid to return 200 and describe the current state of the tenant
    1221              :             // if it doesn't make it into an active state.
    1222            0 :             tenant
    1223            0 :                 .wait_to_become_active(ACTIVE_TENANT_TIMEOUT)
    1224            0 :                 .await
    1225            0 :                 .ok();
    1226            0 :         }
    1227              : 
    1228              :         // Calculate total physical size of all timelines
    1229            0 :         let mut current_physical_size = 0;
    1230            0 :         for timeline in tenant.list_timelines().iter() {
    1231            0 :             current_physical_size += timeline.layer_size_sum().await;
    1232              :         }
    1233              : 
    1234            0 :         let state = tenant.current_state();
    1235              :         Result::<_, ApiError>::Ok(TenantDetails {
    1236              :             tenant_info: TenantInfo {
    1237            0 :                 id: tenant_shard_id,
    1238            0 :                 state: state.clone(),
    1239            0 :                 current_physical_size: Some(current_physical_size),
    1240            0 :                 attachment_status: state.attachment_status(),
    1241            0 :                 generation: tenant
    1242            0 :                     .generation()
    1243            0 :                     .into()
    1244            0 :                     .expect("Tenants are always attached with a generation"),
    1245            0 :                 gc_blocking: tenant.gc_block.summary().map(|x| format!("{x:?}")),
    1246              :             },
    1247            0 :             walredo: tenant.wal_redo_manager_status(),
    1248            0 :             timelines: tenant.list_timeline_ids(),
    1249              :         })
    1250            0 :     }
    1251            0 :     .instrument(info_span!("tenant_status_handler",
    1252              :                 tenant_id = %tenant_shard_id.tenant_id,
    1253            0 :                 shard_id = %tenant_shard_id.shard_slug()))
    1254            0 :     .await?;
    1255              : 
    1256            0 :     json_response(StatusCode::OK, tenant_info)
    1257            0 : }
    1258              : 
    1259            0 : async fn tenant_delete_handler(
    1260            0 :     request: Request<Body>,
    1261            0 :     _cancel: CancellationToken,
    1262            0 : ) -> Result<Response<Body>, ApiError> {
    1263              :     // TODO openapi spec
    1264            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1265            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1266              : 
    1267            0 :     let state = get_state(&request);
    1268              : 
    1269            0 :     state
    1270            0 :         .tenant_manager
    1271            0 :         .delete_tenant(tenant_shard_id)
    1272            0 :         .instrument(info_span!("tenant_delete_handler",
    1273              :             tenant_id = %tenant_shard_id.tenant_id,
    1274            0 :             shard_id = %tenant_shard_id.shard_slug()
    1275              :         ))
    1276            0 :         .await?;
    1277              : 
    1278            0 :     json_response(StatusCode::OK, ())
    1279            0 : }
    1280              : 
    1281              : /// HTTP endpoint to query the current tenant_size of a tenant.
    1282              : ///
    1283              : /// This is not used by consumption metrics under [`crate::consumption_metrics`], but can be used
    1284              : /// to debug any of the calculations. Requires `tenant_id` request parameter, supports
    1285              : /// `inputs_only=true|false` (default false) which supports debugging failure to calculate model
    1286              : /// values.
    1287              : ///
    1288              : /// 'retention_period' query parameter overrides the cutoff that is used to calculate the size
    1289              : /// (only if it is shorter than the real cutoff).
    1290              : ///
    1291              : /// Note: we don't update the cached size and prometheus metric here.
    1292              : /// The retention period might be different, and it's nice to have a method to just calculate it
    1293              : /// without modifying anything anyway.
    1294            0 : async fn tenant_size_handler(
    1295            0 :     request: Request<Body>,
    1296            0 :     cancel: CancellationToken,
    1297            0 : ) -> Result<Response<Body>, ApiError> {
    1298            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1299            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1300            0 :     let inputs_only: Option<bool> = parse_query_param(&request, "inputs_only")?;
    1301            0 :     let retention_period: Option<u64> = parse_query_param(&request, "retention_period")?;
    1302            0 :     let headers = request.headers();
    1303            0 :     let state = get_state(&request);
    1304              : 
    1305            0 :     if !tenant_shard_id.is_shard_zero() {
    1306            0 :         return Err(ApiError::BadRequest(anyhow!(
    1307            0 :             "Size calculations are only available on shard zero"
    1308            0 :         )));
    1309            0 :     }
    1310              : 
    1311            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    1312            0 :     let tenant = state
    1313            0 :         .tenant_manager
    1314            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    1315            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1316              : 
    1317              :     // this can be long operation
    1318            0 :     let inputs = tenant
    1319            0 :         .gather_size_inputs(
    1320            0 :             retention_period,
    1321            0 :             LogicalSizeCalculationCause::TenantSizeHandler,
    1322            0 :             &cancel,
    1323            0 :             &ctx,
    1324            0 :         )
    1325            0 :         .await
    1326            0 :         .map_err(|e| match e {
    1327            0 :             crate::tenant::size::CalculateSyntheticSizeError::Cancelled => ApiError::ShuttingDown,
    1328            0 :             other => ApiError::InternalServerError(anyhow::anyhow!(other)),
    1329            0 :         })?;
    1330              : 
    1331            0 :     let mut sizes = None;
    1332            0 :     let accepts_html = headers
    1333            0 :         .get(header::ACCEPT)
    1334            0 :         .map(|v| v == "text/html")
    1335            0 :         .unwrap_or_default();
    1336            0 :     if !inputs_only.unwrap_or(false) {
    1337            0 :         let storage_model = inputs.calculate_model();
    1338            0 :         let size = storage_model.calculate();
    1339              : 
    1340              :         // If request header expects html, return html
    1341            0 :         if accepts_html {
    1342            0 :             return synthetic_size_html_response(inputs, storage_model, size);
    1343            0 :         }
    1344            0 :         sizes = Some(size);
    1345            0 :     } else if accepts_html {
    1346            0 :         return Err(ApiError::BadRequest(anyhow!(
    1347            0 :             "inputs_only parameter is incompatible with html output request"
    1348            0 :         )));
    1349            0 :     }
    1350              : 
    1351              :     /// The type resides in the pageserver not to expose `ModelInputs`.
    1352              :     #[derive(serde::Serialize)]
    1353              :     struct TenantHistorySize {
    1354              :         id: TenantId,
    1355              :         /// Size is a mixture of WAL and logical size, so the unit is bytes.
    1356              :         ///
    1357              :         /// Will be none if `?inputs_only=true` was given.
    1358              :         size: Option<u64>,
    1359              :         /// Size of each segment used in the model.
    1360              :         /// Will be null if `?inputs_only=true` was given.
    1361              :         segment_sizes: Option<Vec<tenant_size_model::SegmentSizeResult>>,
    1362              :         inputs: crate::tenant::size::ModelInputs,
    1363              :     }
    1364              : 
    1365            0 :     json_response(
    1366              :         StatusCode::OK,
    1367              :         TenantHistorySize {
    1368            0 :             id: tenant_shard_id.tenant_id,
    1369            0 :             size: sizes.as_ref().map(|x| x.total_size),
    1370            0 :             segment_sizes: sizes.map(|x| x.segments),
    1371            0 :             inputs,
    1372              :         },
    1373              :     )
    1374            0 : }
    1375              : 
    1376            0 : async fn tenant_shard_split_handler(
    1377            0 :     mut request: Request<Body>,
    1378            0 :     _cancel: CancellationToken,
    1379            0 : ) -> Result<Response<Body>, ApiError> {
    1380            0 :     let req: TenantShardSplitRequest = json_request(&mut request).await?;
    1381              : 
    1382            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1383            0 :     let state = get_state(&request);
    1384            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
    1385              : 
    1386            0 :     let tenant = state
    1387            0 :         .tenant_manager
    1388            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    1389            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1390              : 
    1391            0 :     let new_shards = state
    1392            0 :         .tenant_manager
    1393            0 :         .shard_split(
    1394            0 :             tenant,
    1395            0 :             ShardCount::new(req.new_shard_count),
    1396            0 :             req.new_stripe_size,
    1397            0 :             &ctx,
    1398            0 :         )
    1399            0 :         .await
    1400            0 :         .map_err(ApiError::InternalServerError)?;
    1401              : 
    1402            0 :     json_response(StatusCode::OK, TenantShardSplitResponse { new_shards })
    1403            0 : }
    1404              : 
    1405            0 : async fn layer_map_info_handler(
    1406            0 :     request: Request<Body>,
    1407            0 :     _cancel: CancellationToken,
    1408            0 : ) -> Result<Response<Body>, ApiError> {
    1409            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1410            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1411            0 :     let reset: LayerAccessStatsReset =
    1412            0 :         parse_query_param(&request, "reset")?.unwrap_or(LayerAccessStatsReset::NoReset);
    1413            0 :     let state = get_state(&request);
    1414              : 
    1415            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1416              : 
    1417            0 :     let timeline =
    1418            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1419            0 :             .await?;
    1420            0 :     let layer_map_info = timeline
    1421            0 :         .layer_map_info(reset)
    1422            0 :         .await
    1423            0 :         .map_err(|_shutdown| ApiError::ShuttingDown)?;
    1424              : 
    1425            0 :     json_response(StatusCode::OK, layer_map_info)
    1426            0 : }
    1427              : 
    1428              : #[instrument(skip_all, fields(tenant_id, shard_id, timeline_id, layer_name))]
    1429              : async fn timeline_layer_scan_disposable_keys(
    1430              :     request: Request<Body>,
    1431              :     cancel: CancellationToken,
    1432              : ) -> Result<Response<Body>, ApiError> {
    1433              :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1434              :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1435              :     let layer_name: LayerName = parse_request_param(&request, "layer_name")?;
    1436              : 
    1437              :     tracing::Span::current().record(
    1438              :         "tenant_id",
    1439              :         tracing::field::display(&tenant_shard_id.tenant_id),
    1440              :     );
    1441              :     tracing::Span::current().record(
    1442              :         "shard_id",
    1443              :         tracing::field::display(tenant_shard_id.shard_slug()),
    1444              :     );
    1445              :     tracing::Span::current().record("timeline_id", tracing::field::display(&timeline_id));
    1446              :     tracing::Span::current().record("layer_name", tracing::field::display(&layer_name));
    1447              : 
    1448              :     let state = get_state(&request);
    1449              : 
    1450              :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1451              : 
    1452              :     // technically the timeline need not be active for this scan to complete
    1453              :     let timeline =
    1454              :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1455              :             .await?;
    1456              : 
    1457              :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
    1458              :         .with_scope_timeline(&timeline);
    1459              : 
    1460              :     let guard = timeline
    1461              :         .layers
    1462              :         .read(LayerManagerLockHolder::GetLayerMapInfo)
    1463              :         .await;
    1464              :     let Some(layer) = guard.try_get_from_key(&layer_name.clone().into()) else {
    1465              :         return Err(ApiError::NotFound(
    1466              :             anyhow::anyhow!("Layer {tenant_shard_id}/{timeline_id}/{layer_name} not found").into(),
    1467              :         ));
    1468              :     };
    1469              : 
    1470              :     let resident_layer = layer
    1471              :         .download_and_keep_resident(&ctx)
    1472              :         .await
    1473            0 :         .map_err(|err| match err {
    1474              :             tenant::storage_layer::layer::DownloadError::TimelineShutdown
    1475              :             | tenant::storage_layer::layer::DownloadError::DownloadCancelled => {
    1476            0 :                 ApiError::ShuttingDown
    1477              :             }
    1478              :             tenant::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
    1479              :             | tenant::storage_layer::layer::DownloadError::DownloadRequired
    1480              :             | tenant::storage_layer::layer::DownloadError::NotFile(_)
    1481              :             | tenant::storage_layer::layer::DownloadError::DownloadFailed
    1482              :             | tenant::storage_layer::layer::DownloadError::PreStatFailed(_) => {
    1483            0 :                 ApiError::InternalServerError(err.into())
    1484              :             }
    1485              :             #[cfg(test)]
    1486              :             tenant::storage_layer::layer::DownloadError::Failpoint(_) => {
    1487            0 :                 ApiError::InternalServerError(err.into())
    1488              :             }
    1489            0 :         })?;
    1490              : 
    1491              :     let keys = resident_layer
    1492              :         .load_keys(&ctx)
    1493              :         .await
    1494              :         .map_err(ApiError::InternalServerError)?;
    1495              : 
    1496              :     let shard_identity = timeline.get_shard_identity();
    1497              : 
    1498              :     let mut disposable_count = 0;
    1499              :     let mut not_disposable_count = 0;
    1500              :     let cancel = cancel.clone();
    1501              :     for (i, key) in keys.into_iter().enumerate() {
    1502              :         if shard_identity.is_key_disposable(&key) {
    1503              :             disposable_count += 1;
    1504              :             tracing::debug!(key = %key, key.dbg=?key, "disposable key");
    1505              :         } else {
    1506              :             not_disposable_count += 1;
    1507              :         }
    1508              :         #[allow(clippy::collapsible_if)]
    1509              :         if i % 10000 == 0 {
    1510              :             if cancel.is_cancelled() || timeline.cancel.is_cancelled() || timeline.is_stopping() {
    1511              :                 return Err(ApiError::ShuttingDown);
    1512              :             }
    1513              :         }
    1514              :     }
    1515              : 
    1516              :     json_response(
    1517              :         StatusCode::OK,
    1518              :         pageserver_api::models::ScanDisposableKeysResponse {
    1519              :             disposable_count,
    1520              :             not_disposable_count,
    1521              :         },
    1522              :     )
    1523              : }
    1524              : 
    1525            0 : async fn timeline_download_heatmap_layers_handler(
    1526            0 :     request: Request<Body>,
    1527            0 :     _cancel: CancellationToken,
    1528            0 : ) -> Result<Response<Body>, ApiError> {
    1529              :     // Only used in the case where remote storage is not configured.
    1530              :     const DEFAULT_MAX_CONCURRENCY: usize = 100;
    1531              :     // A conservative default.
    1532              :     const DEFAULT_CONCURRENCY: usize = 16;
    1533              : 
    1534            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1535            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1536              : 
    1537            0 :     let desired_concurrency =
    1538            0 :         parse_query_param(&request, "concurrency")?.unwrap_or(DEFAULT_CONCURRENCY);
    1539            0 :     let recurse = parse_query_param(&request, "recurse")?.unwrap_or(false);
    1540              : 
    1541            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1542              : 
    1543            0 :     let state = get_state(&request);
    1544            0 :     let timeline =
    1545            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1546            0 :             .await?;
    1547            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
    1548            0 :         .with_scope_timeline(&timeline);
    1549              : 
    1550            0 :     let max_concurrency = get_config(&request)
    1551            0 :         .remote_storage_config
    1552            0 :         .as_ref()
    1553            0 :         .map(|c| c.concurrency_limit())
    1554            0 :         .unwrap_or(DEFAULT_MAX_CONCURRENCY);
    1555            0 :     let concurrency = std::cmp::min(max_concurrency, desired_concurrency);
    1556              : 
    1557            0 :     timeline.start_heatmap_layers_download(concurrency, recurse, &ctx)?;
    1558              : 
    1559            0 :     json_response(StatusCode::ACCEPTED, ())
    1560            0 : }
    1561              : 
    1562            0 : async fn timeline_shutdown_download_heatmap_layers_handler(
    1563            0 :     request: Request<Body>,
    1564            0 :     _cancel: CancellationToken,
    1565            0 : ) -> Result<Response<Body>, ApiError> {
    1566            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1567            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1568              : 
    1569            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1570              : 
    1571            0 :     let state = get_state(&request);
    1572            0 :     let timeline =
    1573            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1574            0 :             .await?;
    1575              : 
    1576            0 :     timeline.stop_and_drain_heatmap_layers_download().await;
    1577              : 
    1578            0 :     json_response(StatusCode::OK, ())
    1579            0 : }
    1580              : 
    1581            0 : async fn layer_download_handler(
    1582            0 :     request: Request<Body>,
    1583            0 :     _cancel: CancellationToken,
    1584            0 : ) -> Result<Response<Body>, ApiError> {
    1585            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1586            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1587            0 :     let layer_file_name = get_request_param(&request, "layer_file_name")?;
    1588            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1589            0 :     let layer_name = LayerName::from_str(layer_file_name)
    1590            0 :         .map_err(|s| ApiError::BadRequest(anyhow::anyhow!(s)))?;
    1591            0 :     let state = get_state(&request);
    1592              : 
    1593            0 :     let timeline =
    1594            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1595            0 :             .await?;
    1596            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
    1597            0 :         .with_scope_timeline(&timeline);
    1598            0 :     let downloaded = timeline
    1599            0 :         .download_layer(&layer_name, &ctx)
    1600            0 :         .await
    1601            0 :         .map_err(|e| match e {
    1602              :             tenant::storage_layer::layer::DownloadError::TimelineShutdown
    1603              :             | tenant::storage_layer::layer::DownloadError::DownloadCancelled => {
    1604            0 :                 ApiError::ShuttingDown
    1605              :             }
    1606            0 :             other => ApiError::InternalServerError(other.into()),
    1607            0 :         })?;
    1608              : 
    1609            0 :     match downloaded {
    1610            0 :         Some(true) => json_response(StatusCode::OK, ()),
    1611            0 :         Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
    1612            0 :         None => json_response(
    1613              :             StatusCode::BAD_REQUEST,
    1614            0 :             format!("Layer {tenant_shard_id}/{timeline_id}/{layer_file_name} not found"),
    1615              :         ),
    1616              :     }
    1617            0 : }
    1618              : 
    1619            0 : async fn evict_timeline_layer_handler(
    1620            0 :     request: Request<Body>,
    1621            0 :     _cancel: CancellationToken,
    1622            0 : ) -> Result<Response<Body>, ApiError> {
    1623            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1624            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1625            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1626            0 :     let layer_file_name = get_request_param(&request, "layer_file_name")?;
    1627            0 :     let state = get_state(&request);
    1628              : 
    1629            0 :     let layer_name = LayerName::from_str(layer_file_name)
    1630            0 :         .map_err(|s| ApiError::BadRequest(anyhow::anyhow!(s)))?;
    1631              : 
    1632            0 :     let timeline =
    1633            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1634            0 :             .await?;
    1635            0 :     let evicted = timeline
    1636            0 :         .evict_layer(&layer_name)
    1637            0 :         .await
    1638            0 :         .map_err(ApiError::InternalServerError)?;
    1639              : 
    1640            0 :     match evicted {
    1641            0 :         Some(true) => json_response(StatusCode::OK, ()),
    1642            0 :         Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
    1643            0 :         None => json_response(
    1644              :             StatusCode::BAD_REQUEST,
    1645            0 :             format!("Layer {tenant_shard_id}/{timeline_id}/{layer_file_name} not found"),
    1646              :         ),
    1647              :     }
    1648            0 : }
    1649              : 
    1650            0 : async fn timeline_gc_blocking_handler(
    1651            0 :     request: Request<Body>,
    1652            0 :     _cancel: CancellationToken,
    1653            0 : ) -> Result<Response<Body>, ApiError> {
    1654            0 :     block_or_unblock_gc(request, true).await
    1655            0 : }
    1656              : 
    1657            0 : async fn timeline_gc_unblocking_handler(
    1658            0 :     request: Request<Body>,
    1659            0 :     _cancel: CancellationToken,
    1660            0 : ) -> Result<Response<Body>, ApiError> {
    1661            0 :     block_or_unblock_gc(request, false).await
    1662            0 : }
    1663              : 
    1664              : /// Traces GetPage@LSN requests for a timeline, and emits metadata in an efficient binary encoding.
    1665              : /// Use the `pagectl page-trace` command to decode and analyze the output.
    1666            0 : async fn timeline_page_trace_handler(
    1667            0 :     request: Request<Body>,
    1668            0 :     cancel: CancellationToken,
    1669            0 : ) -> Result<Response<Body>, ApiError> {
    1670            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1671            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1672            0 :     let state = get_state(&request);
    1673            0 :     check_permission(&request, None)?;
    1674              : 
    1675            0 :     let size_limit: usize = parse_query_param(&request, "size_limit_bytes")?.unwrap_or(1024 * 1024);
    1676            0 :     let time_limit_secs: u64 = parse_query_param(&request, "time_limit_secs")?.unwrap_or(5);
    1677              : 
    1678              :     // Convert size limit to event limit based on the serialized size of an event. The event size is
    1679              :     // fixed, as the default bincode serializer uses fixed-width integer encoding.
    1680            0 :     let event_size = bincode::serialize(&PageTraceEvent::default())
    1681            0 :         .map_err(|err| ApiError::InternalServerError(err.into()))?
    1682            0 :         .len();
    1683            0 :     let event_limit = size_limit / event_size;
    1684              : 
    1685            0 :     let timeline =
    1686            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    1687            0 :             .await?;
    1688              : 
    1689              :     // Install a page trace, unless one is already in progress. We just use a buffered channel,
    1690              :     // which may 2x the memory usage in the worst case, but it's still bounded.
    1691            0 :     let (trace_tx, mut trace_rx) = tokio::sync::mpsc::channel(event_limit);
    1692            0 :     let cur = timeline.page_trace.load();
    1693            0 :     let installed = cur.is_none()
    1694            0 :         && timeline
    1695            0 :             .page_trace
    1696            0 :             .compare_and_swap(cur, Some(Arc::new(trace_tx)))
    1697            0 :             .is_none();
    1698            0 :     if !installed {
    1699            0 :         return Err(ApiError::Conflict("page trace already active".to_string()));
    1700            0 :     }
    1701            0 :     defer!(timeline.page_trace.store(None)); // uninstall on return
    1702              : 
    1703              :     // Collect the trace and return it to the client. We could stream the response, but this is
    1704              :     // simple and fine.
    1705            0 :     let mut body = Vec::with_capacity(size_limit);
    1706            0 :     let deadline = Instant::now() + Duration::from_secs(time_limit_secs);
    1707              : 
    1708            0 :     while body.len() < size_limit {
    1709            0 :         tokio::select! {
    1710            0 :             event = trace_rx.recv() => {
    1711            0 :                 let Some(event) = event else {
    1712            0 :                     break; // shouldn't happen (sender doesn't close, unless timeline dropped)
    1713              :                 };
    1714            0 :                 bincode::serialize_into(&mut body, &event)
    1715            0 :                     .map_err(|err| ApiError::InternalServerError(err.into()))?;
    1716              :             }
    1717            0 :             _ = tokio::time::sleep_until(deadline) => break, // time limit reached
    1718            0 :             _ = cancel.cancelled() => return Err(ApiError::Cancelled),
    1719              :         }
    1720              :     }
    1721              : 
    1722            0 :     Ok(Response::builder()
    1723            0 :         .status(StatusCode::OK)
    1724            0 :         .header(header::CONTENT_TYPE, "application/octet-stream")
    1725            0 :         .body(hyper::Body::from(body))
    1726            0 :         .unwrap())
    1727            0 : }
    1728              : 
    1729              : /// Adding a block is `POST ../block_gc`, removing a block is `POST ../unblock_gc`.
    1730              : ///
    1731              : /// Both are technically unsafe because they might fire off index uploads, thus they are POST.
    1732            0 : async fn block_or_unblock_gc(
    1733            0 :     request: Request<Body>,
    1734            0 :     block: bool,
    1735            0 : ) -> Result<Response<Body>, ApiError> {
    1736              :     use crate::tenant::remote_timeline_client::WaitCompletionError;
    1737              :     use crate::tenant::upload_queue::NotInitialized;
    1738            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1739            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1740            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    1741            0 :     let state = get_state(&request);
    1742              : 
    1743            0 :     let tenant = state
    1744            0 :         .tenant_manager
    1745            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    1746              : 
    1747            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1748              : 
    1749            0 :     let timeline = tenant.get_timeline(timeline_id, true)?;
    1750              : 
    1751            0 :     let fut = async {
    1752            0 :         if block {
    1753            0 :             timeline.block_gc(&tenant).await.map(|_| ())
    1754              :         } else {
    1755            0 :             timeline.unblock_gc(&tenant).await
    1756              :         }
    1757            0 :     };
    1758              : 
    1759            0 :     let span = tracing::info_span!(
    1760              :         "block_or_unblock_gc",
    1761              :         tenant_id = %tenant_shard_id.tenant_id,
    1762            0 :         shard_id = %tenant_shard_id.shard_slug(),
    1763              :         timeline_id = %timeline_id,
    1764              :         block = block,
    1765              :     );
    1766              : 
    1767            0 :     let res = fut.instrument(span).await;
    1768              : 
    1769            0 :     res.map_err(|e| {
    1770            0 :         if e.is::<NotInitialized>() || e.is::<WaitCompletionError>() {
    1771            0 :             ApiError::ShuttingDown
    1772              :         } else {
    1773            0 :             ApiError::InternalServerError(e)
    1774              :         }
    1775            0 :     })?;
    1776              : 
    1777            0 :     json_response(StatusCode::OK, ())
    1778            0 : }
    1779              : 
    1780              : /// Get tenant_size SVG graph along with the JSON data.
    1781            0 : fn synthetic_size_html_response(
    1782            0 :     inputs: ModelInputs,
    1783            0 :     storage_model: StorageModel,
    1784            0 :     sizes: SizeResult,
    1785            0 : ) -> Result<Response<Body>, ApiError> {
    1786            0 :     let mut timeline_ids: Vec<String> = Vec::new();
    1787            0 :     let mut timeline_map: HashMap<TimelineId, usize> = HashMap::new();
    1788            0 :     for (index, ti) in inputs.timeline_inputs.iter().enumerate() {
    1789            0 :         timeline_map.insert(ti.timeline_id, index);
    1790            0 :         timeline_ids.push(ti.timeline_id.to_string());
    1791            0 :     }
    1792            0 :     let seg_to_branch: Vec<(usize, SvgBranchKind)> = inputs
    1793            0 :         .segments
    1794            0 :         .iter()
    1795            0 :         .map(|seg| {
    1796            0 :             (
    1797            0 :                 *timeline_map.get(&seg.timeline_id).unwrap(),
    1798            0 :                 seg.kind.into(),
    1799            0 :             )
    1800            0 :         })
    1801            0 :         .collect();
    1802              : 
    1803            0 :     let svg =
    1804            0 :         tenant_size_model::svg::draw_svg(&storage_model, &timeline_ids, &seg_to_branch, &sizes)
    1805            0 :             .map_err(ApiError::InternalServerError)?;
    1806              : 
    1807            0 :     let mut response = String::new();
    1808              : 
    1809              :     use std::fmt::Write;
    1810            0 :     write!(response, "<html>\n<body>\n").unwrap();
    1811            0 :     write!(response, "<div>\n{svg}\n</div>").unwrap();
    1812            0 :     writeln!(response, "Project size: {}", sizes.total_size).unwrap();
    1813            0 :     writeln!(response, "<pre>").unwrap();
    1814            0 :     writeln!(
    1815            0 :         response,
    1816            0 :         "{}",
    1817            0 :         serde_json::to_string_pretty(&inputs).unwrap()
    1818              :     )
    1819            0 :     .unwrap();
    1820            0 :     writeln!(
    1821            0 :         response,
    1822            0 :         "{}",
    1823            0 :         serde_json::to_string_pretty(&sizes.segments).unwrap()
    1824              :     )
    1825            0 :     .unwrap();
    1826            0 :     writeln!(response, "</pre>").unwrap();
    1827            0 :     write!(response, "</body>\n</html>\n").unwrap();
    1828              : 
    1829            0 :     html_response(StatusCode::OK, response)
    1830            0 : }
    1831              : 
    1832            0 : pub fn html_response(status: StatusCode, data: String) -> Result<Response<Body>, ApiError> {
    1833            0 :     let response = Response::builder()
    1834            0 :         .status(status)
    1835            0 :         .header(header::CONTENT_TYPE, "text/html")
    1836            0 :         .body(Body::from(data.as_bytes().to_vec()))
    1837            0 :         .map_err(|e| ApiError::InternalServerError(e.into()))?;
    1838            0 :     Ok(response)
    1839            0 : }
    1840              : 
    1841            0 : async fn get_tenant_config_handler(
    1842            0 :     request: Request<Body>,
    1843            0 :     _cancel: CancellationToken,
    1844            0 : ) -> Result<Response<Body>, ApiError> {
    1845            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1846            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1847            0 :     let state = get_state(&request);
    1848              : 
    1849            0 :     let tenant = state
    1850            0 :         .tenant_manager
    1851            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    1852              : 
    1853            0 :     let response = HashMap::from([
    1854              :         (
    1855              :             "tenant_specific_overrides",
    1856            0 :             serde_json::to_value(tenant.tenant_specific_overrides())
    1857            0 :                 .context("serializing tenant specific overrides")
    1858            0 :                 .map_err(ApiError::InternalServerError)?,
    1859              :         ),
    1860              :         (
    1861            0 :             "effective_config",
    1862            0 :             serde_json::to_value(tenant.effective_config())
    1863            0 :                 .context("serializing effective config")
    1864            0 :                 .map_err(ApiError::InternalServerError)?,
    1865              :         ),
    1866              :     ]);
    1867              : 
    1868            0 :     json_response(StatusCode::OK, response)
    1869            0 : }
    1870              : 
    1871            0 : async fn update_tenant_config_handler(
    1872            0 :     mut request: Request<Body>,
    1873            0 :     _cancel: CancellationToken,
    1874            0 : ) -> Result<Response<Body>, ApiError> {
    1875            0 :     let request_data: TenantConfigRequest = json_request(&mut request).await?;
    1876            0 :     let tenant_id = request_data.tenant_id;
    1877            0 :     check_permission(&request, Some(tenant_id))?;
    1878              : 
    1879            0 :     let new_tenant_conf = request_data.config;
    1880              : 
    1881            0 :     let state = get_state(&request);
    1882              : 
    1883            0 :     let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    1884              : 
    1885            0 :     let tenant = state
    1886            0 :         .tenant_manager
    1887            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    1888            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1889              : 
    1890              :     // This is a legacy API that only operates on attached tenants: the preferred
    1891              :     // API to use is the location_config/ endpoint, which lets the caller provide
    1892              :     // the full LocationConf.
    1893            0 :     let location_conf = LocationConf::attached_single(
    1894            0 :         new_tenant_conf.clone(),
    1895            0 :         tenant.get_generation(),
    1896            0 :         ShardParameters::from(tenant.get_shard_identity()),
    1897              :     );
    1898              : 
    1899            0 :     tenant
    1900            0 :         .get_shard_identity()
    1901            0 :         .assert_equal(location_conf.shard); // not strictly necessary since we construct it above
    1902              : 
    1903            0 :     crate::tenant::TenantShard::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
    1904            0 :         .await
    1905            0 :         .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
    1906              : 
    1907            0 :     let _ = tenant
    1908            0 :         .update_tenant_config(|_crnt| Ok(new_tenant_conf.clone()))
    1909            0 :         .expect("Closure returns Ok()");
    1910              : 
    1911            0 :     json_response(StatusCode::OK, ())
    1912            0 : }
    1913              : 
    1914            0 : async fn patch_tenant_config_handler(
    1915            0 :     mut request: Request<Body>,
    1916            0 :     _cancel: CancellationToken,
    1917            0 : ) -> Result<Response<Body>, ApiError> {
    1918            0 :     let request_data: TenantConfigPatchRequest = json_request(&mut request).await?;
    1919            0 :     let tenant_id = request_data.tenant_id;
    1920            0 :     check_permission(&request, Some(tenant_id))?;
    1921              : 
    1922            0 :     let state = get_state(&request);
    1923              : 
    1924            0 :     let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    1925              : 
    1926            0 :     let tenant = state
    1927            0 :         .tenant_manager
    1928            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    1929            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    1930              : 
    1931            0 :     let updated = tenant
    1932            0 :         .update_tenant_config(|crnt| {
    1933            0 :             crnt.apply_patch(request_data.config.clone())
    1934            0 :                 .map_err(anyhow::Error::new)
    1935            0 :         })
    1936            0 :         .map_err(ApiError::BadRequest)?;
    1937              : 
    1938              :     // This is a legacy API that only operates on attached tenants: the preferred
    1939              :     // API to use is the location_config/ endpoint, which lets the caller provide
    1940              :     // the full LocationConf.
    1941            0 :     let location_conf = LocationConf::attached_single(
    1942            0 :         updated,
    1943            0 :         tenant.get_generation(),
    1944            0 :         ShardParameters::from(tenant.get_shard_identity()),
    1945              :     );
    1946              : 
    1947            0 :     tenant
    1948            0 :         .get_shard_identity()
    1949            0 :         .assert_equal(location_conf.shard); // not strictly necessary since we construct it above
    1950              : 
    1951            0 :     crate::tenant::TenantShard::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
    1952            0 :         .await
    1953            0 :         .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
    1954              : 
    1955            0 :     json_response(StatusCode::OK, ())
    1956            0 : }
    1957              : 
    1958            0 : async fn put_tenant_location_config_handler(
    1959            0 :     mut request: Request<Body>,
    1960            0 :     _cancel: CancellationToken,
    1961            0 : ) -> Result<Response<Body>, ApiError> {
    1962            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    1963              : 
    1964            0 :     let request_data: TenantLocationConfigRequest = json_request(&mut request).await?;
    1965            0 :     let flush = parse_query_param(&request, "flush_ms")?.map(Duration::from_millis);
    1966            0 :     let lazy = parse_query_param(&request, "lazy")?.unwrap_or(false);
    1967            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    1968              : 
    1969            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
    1970            0 :     let state = get_state(&request);
    1971            0 :     let conf = state.conf;
    1972              : 
    1973              :     // The `Detached` state is special, it doesn't upsert a tenant, it removes
    1974              :     // its local disk content and drops it from memory.
    1975            0 :     if let LocationConfigMode::Detached = request_data.config.mode {
    1976            0 :         if let Err(e) = state
    1977            0 :             .tenant_manager
    1978            0 :             .detach_tenant(conf, tenant_shard_id, &state.deletion_queue_client)
    1979            0 :             .instrument(info_span!("tenant_detach",
    1980              :                 tenant_id = %tenant_shard_id.tenant_id,
    1981            0 :                 shard_id = %tenant_shard_id.shard_slug()
    1982              :             ))
    1983            0 :             .await
    1984              :         {
    1985            0 :             match e {
    1986            0 :                 TenantStateError::SlotError(TenantSlotError::NotFound(_)) => {
    1987            0 :                     // This API is idempotent: a NotFound on a detach is fine.
    1988            0 :                 }
    1989            0 :                 _ => return Err(e.into()),
    1990              :             }
    1991            0 :         }
    1992            0 :         return json_response(StatusCode::OK, ());
    1993            0 :     }
    1994              : 
    1995            0 :     let location_conf =
    1996            0 :         LocationConf::try_from(&request_data.config).map_err(ApiError::BadRequest)?;
    1997              : 
    1998              :     // lazy==true queues up for activation or jumps the queue like normal when a compute connects,
    1999              :     // similar to at startup ordering.
    2000            0 :     let spawn_mode = if lazy {
    2001            0 :         tenant::SpawnMode::Lazy
    2002              :     } else {
    2003            0 :         tenant::SpawnMode::Eager
    2004              :     };
    2005              : 
    2006            0 :     let tenant = state
    2007            0 :         .tenant_manager
    2008            0 :         .upsert_location(tenant_shard_id, location_conf, flush, spawn_mode, &ctx)
    2009            0 :         .await?;
    2010            0 :     let stripe_size = tenant.as_ref().map(|t| t.get_shard_stripe_size());
    2011            0 :     let attached = tenant.is_some();
    2012              : 
    2013            0 :     if let Some(_flush_ms) = flush {
    2014            0 :         match state
    2015            0 :             .secondary_controller
    2016            0 :             .upload_tenant(tenant_shard_id)
    2017            0 :             .await
    2018              :         {
    2019              :             Ok(()) => {
    2020            0 :                 tracing::info!("Uploaded heatmap during flush");
    2021              :             }
    2022            0 :             Err(e) => {
    2023            0 :                 tracing::warn!("Failed to flush heatmap: {e}");
    2024              :             }
    2025              :         }
    2026              :     } else {
    2027            0 :         tracing::info!("No flush requested when configuring");
    2028              :     }
    2029              : 
    2030              :     // This API returns a vector of pageservers where the tenant is attached: this is
    2031              :     // primarily for use in the sharding service.  For compatibilty, we also return this
    2032              :     // when called directly on a pageserver, but the payload is always zero or one shards.
    2033            0 :     let mut response = TenantLocationConfigResponse {
    2034            0 :         shards: Vec::new(),
    2035            0 :         stripe_size: None,
    2036            0 :     };
    2037            0 :     if attached {
    2038            0 :         response.shards.push(TenantShardLocation {
    2039            0 :             shard_id: tenant_shard_id,
    2040            0 :             node_id: state.conf.id,
    2041            0 :         });
    2042            0 :         if tenant_shard_id.shard_count.count() > 1 {
    2043              :             // Stripe size should be set if we are attached
    2044            0 :             debug_assert!(stripe_size.is_some());
    2045            0 :             response.stripe_size = stripe_size;
    2046            0 :         }
    2047            0 :     }
    2048              : 
    2049            0 :     json_response(StatusCode::OK, response)
    2050            0 : }
    2051              : 
    2052            0 : async fn list_location_config_handler(
    2053            0 :     request: Request<Body>,
    2054            0 :     _cancel: CancellationToken,
    2055            0 : ) -> Result<Response<Body>, ApiError> {
    2056            0 :     let state = get_state(&request);
    2057            0 :     let slots = state.tenant_manager.list();
    2058            0 :     let result = LocationConfigListResponse {
    2059            0 :         tenant_shards: slots
    2060            0 :             .into_iter()
    2061            0 :             .map(|(tenant_shard_id, slot)| {
    2062            0 :                 let v = match slot {
    2063            0 :                     TenantSlot::Attached(t) => Some(t.get_location_conf()),
    2064            0 :                     TenantSlot::Secondary(s) => Some(s.get_location_conf()),
    2065            0 :                     TenantSlot::InProgress(_) => None,
    2066              :                 };
    2067            0 :                 (tenant_shard_id, v)
    2068            0 :             })
    2069            0 :             .collect(),
    2070              :     };
    2071            0 :     json_response(StatusCode::OK, result)
    2072            0 : }
    2073              : 
    2074            0 : async fn get_location_config_handler(
    2075            0 :     request: Request<Body>,
    2076            0 :     _cancel: CancellationToken,
    2077            0 : ) -> Result<Response<Body>, ApiError> {
    2078            0 :     let state = get_state(&request);
    2079            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2080            0 :     let slot = state.tenant_manager.get(tenant_shard_id);
    2081              : 
    2082            0 :     let Some(slot) = slot else {
    2083            0 :         return Err(ApiError::NotFound(
    2084            0 :             anyhow::anyhow!("Tenant shard not found").into(),
    2085            0 :         ));
    2086              :     };
    2087              : 
    2088            0 :     let result: Option<LocationConfig> = match slot {
    2089            0 :         TenantSlot::Attached(t) => Some(t.get_location_conf()),
    2090            0 :         TenantSlot::Secondary(s) => Some(s.get_location_conf()),
    2091            0 :         TenantSlot::InProgress(_) => None,
    2092              :     };
    2093              : 
    2094            0 :     json_response(StatusCode::OK, result)
    2095            0 : }
    2096              : 
    2097              : // Do a time travel recovery on the given tenant/tenant shard. Tenant needs to be detached
    2098              : // (from all pageservers) as it invalidates consistency assumptions.
    2099            0 : async fn tenant_time_travel_remote_storage_handler(
    2100            0 :     request: Request<Body>,
    2101            0 :     cancel: CancellationToken,
    2102            0 : ) -> Result<Response<Body>, ApiError> {
    2103            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2104              : 
    2105            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2106              : 
    2107            0 :     let timestamp_raw = must_get_query_param(&request, "travel_to")?;
    2108            0 :     let timestamp = humantime::parse_rfc3339(&timestamp_raw)
    2109            0 :         .with_context(|| format!("Invalid time for travel_to: {timestamp_raw:?}"))
    2110            0 :         .map_err(ApiError::BadRequest)?;
    2111              : 
    2112            0 :     let done_if_after_raw = must_get_query_param(&request, "done_if_after")?;
    2113            0 :     let done_if_after = humantime::parse_rfc3339(&done_if_after_raw)
    2114            0 :         .with_context(|| format!("Invalid time for done_if_after: {done_if_after_raw:?}"))
    2115            0 :         .map_err(ApiError::BadRequest)?;
    2116              : 
    2117              :     // This is just a sanity check to fend off naive wrong usages of the API:
    2118              :     // the tenant needs to be detached *everywhere*
    2119            0 :     let state = get_state(&request);
    2120            0 :     let we_manage_tenant = state.tenant_manager.manages_tenant_shard(tenant_shard_id);
    2121            0 :     if we_manage_tenant {
    2122            0 :         return Err(ApiError::BadRequest(anyhow!(
    2123            0 :             "Tenant {tenant_shard_id} is already attached at this pageserver"
    2124            0 :         )));
    2125            0 :     }
    2126              : 
    2127            0 :     if timestamp > done_if_after {
    2128            0 :         return Err(ApiError::BadRequest(anyhow!(
    2129            0 :             "The done_if_after timestamp comes before the timestamp to recover to"
    2130            0 :         )));
    2131            0 :     }
    2132              : 
    2133            0 :     tracing::info!(
    2134            0 :         "Issuing time travel request internally. timestamp={timestamp_raw}, done_if_after={done_if_after_raw}"
    2135              :     );
    2136              : 
    2137            0 :     remote_timeline_client::upload::time_travel_recover_tenant(
    2138            0 :         &state.remote_storage,
    2139            0 :         &tenant_shard_id,
    2140            0 :         timestamp,
    2141            0 :         done_if_after,
    2142            0 :         &cancel,
    2143            0 :     )
    2144            0 :     .await
    2145            0 :     .map_err(|e| match e {
    2146            0 :         TimeTravelError::BadInput(e) => {
    2147            0 :             warn!("bad input error: {e}");
    2148            0 :             ApiError::BadRequest(anyhow!("bad input error"))
    2149              :         }
    2150              :         TimeTravelError::Unimplemented => {
    2151            0 :             ApiError::BadRequest(anyhow!("unimplemented for the configured remote storage"))
    2152              :         }
    2153            0 :         TimeTravelError::Cancelled => ApiError::InternalServerError(anyhow!("cancelled")),
    2154              :         TimeTravelError::TooManyVersions => {
    2155            0 :             ApiError::InternalServerError(anyhow!("too many versions in remote storage"))
    2156              :         }
    2157            0 :         TimeTravelError::Other(e) => {
    2158            0 :             warn!("internal error: {e}");
    2159            0 :             ApiError::InternalServerError(anyhow!("internal error"))
    2160              :         }
    2161            0 :     })?;
    2162              : 
    2163            0 :     json_response(StatusCode::OK, ())
    2164            0 : }
    2165              : 
    2166              : /// Testing helper to transition a tenant to [`crate::tenant::TenantState::Broken`].
    2167            0 : async fn handle_tenant_break(
    2168            0 :     r: Request<Body>,
    2169            0 :     _cancel: CancellationToken,
    2170            0 : ) -> Result<Response<Body>, ApiError> {
    2171            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&r, "tenant_shard_id")?;
    2172              : 
    2173            0 :     let state = get_state(&r);
    2174            0 :     state
    2175            0 :         .tenant_manager
    2176            0 :         .get_attached_tenant_shard(tenant_shard_id)?
    2177            0 :         .set_broken("broken from test".to_owned())
    2178            0 :         .await;
    2179              : 
    2180            0 :     json_response(StatusCode::OK, ())
    2181            0 : }
    2182              : 
    2183              : // Obtains an lsn lease on the given timeline.
    2184            0 : async fn lsn_lease_handler(
    2185            0 :     mut request: Request<Body>,
    2186            0 :     _cancel: CancellationToken,
    2187            0 : ) -> Result<Response<Body>, ApiError> {
    2188            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2189            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2190            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2191            0 :     let lsn = json_request::<LsnLeaseRequest>(&mut request).await?.lsn;
    2192              : 
    2193            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    2194              : 
    2195            0 :     let state = get_state(&request);
    2196              : 
    2197            0 :     let timeline =
    2198            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    2199            0 :             .await?;
    2200              : 
    2201            0 :     let result = async {
    2202            0 :         timeline
    2203            0 :             .init_lsn_lease(lsn, timeline.get_lsn_lease_length(), &ctx)
    2204            0 :             .map_err(|e| {
    2205            0 :                 ApiError::InternalServerError(
    2206            0 :                     e.context(format!("invalid lsn lease request at {lsn}")),
    2207            0 :                 )
    2208            0 :             })
    2209            0 :     }
    2210            0 :     .instrument(info_span!("init_lsn_lease", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    2211            0 :     .await?;
    2212              : 
    2213            0 :     json_response(StatusCode::OK, result)
    2214            0 : }
    2215              : 
    2216              : // Run GC immediately on given timeline.
    2217            0 : async fn timeline_gc_handler(
    2218            0 :     mut request: Request<Body>,
    2219            0 :     cancel: CancellationToken,
    2220            0 : ) -> Result<Response<Body>, ApiError> {
    2221            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2222            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2223            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2224              : 
    2225            0 :     let gc_req: TimelineGcRequest = json_request(&mut request).await?;
    2226              : 
    2227            0 :     let state = get_state(&request);
    2228              : 
    2229            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    2230            0 :     let gc_result = state
    2231            0 :         .tenant_manager
    2232            0 :         .immediate_gc(tenant_shard_id, timeline_id, gc_req, cancel, &ctx)
    2233            0 :         .await?;
    2234              : 
    2235            0 :     json_response(StatusCode::OK, gc_result)
    2236            0 : }
    2237              : 
    2238              : // Cancel scheduled compaction tasks
    2239            0 : async fn timeline_cancel_compact_handler(
    2240            0 :     request: Request<Body>,
    2241            0 :     _cancel: CancellationToken,
    2242            0 : ) -> Result<Response<Body>, ApiError> {
    2243            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2244            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2245            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2246            0 :     let state = get_state(&request);
    2247            0 :     async {
    2248            0 :         let tenant = state
    2249            0 :             .tenant_manager
    2250            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
    2251            0 :         tenant.cancel_scheduled_compaction(timeline_id);
    2252            0 :         json_response(StatusCode::OK, ())
    2253            0 :     }
    2254            0 :     .instrument(info_span!("timeline_cancel_compact", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    2255            0 :     .await
    2256            0 : }
    2257              : 
    2258              : // Get compact info of a timeline
    2259            0 : async fn timeline_compact_info_handler(
    2260            0 :     request: Request<Body>,
    2261            0 :     _cancel: CancellationToken,
    2262            0 : ) -> Result<Response<Body>, ApiError> {
    2263            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2264            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2265            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2266            0 :     let state = get_state(&request);
    2267            0 :     async {
    2268            0 :         let tenant = state
    2269            0 :             .tenant_manager
    2270            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
    2271            0 :         let resp = tenant.get_scheduled_compaction_tasks(timeline_id);
    2272            0 :         json_response(StatusCode::OK, resp)
    2273            0 :     }
    2274            0 :     .instrument(info_span!("timeline_compact_info", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    2275            0 :     .await
    2276            0 : }
    2277              : 
    2278              : // Run compaction immediately on given timeline.
    2279            0 : async fn timeline_compact_handler(
    2280            0 :     mut request: Request<Body>,
    2281            0 :     cancel: CancellationToken,
    2282            0 : ) -> Result<Response<Body>, ApiError> {
    2283            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2284            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2285            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2286              : 
    2287            0 :     let compact_request = json_request_maybe::<Option<CompactRequest>>(&mut request).await?;
    2288              : 
    2289            0 :     let state = get_state(&request);
    2290              : 
    2291            0 :     let mut flags = EnumSet::empty();
    2292              : 
    2293            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "force_l0_compaction")? {
    2294            0 :         flags |= CompactFlags::ForceL0Compaction;
    2295            0 :     }
    2296            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "force_repartition")? {
    2297            0 :         flags |= CompactFlags::ForceRepartition;
    2298            0 :     }
    2299            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? {
    2300            0 :         flags |= CompactFlags::ForceImageLayerCreation;
    2301            0 :     }
    2302            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "enhanced_gc_bottom_most_compaction")? {
    2303            0 :         flags |= CompactFlags::EnhancedGcBottomMostCompaction;
    2304            0 :     }
    2305            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "dry_run")? {
    2306            0 :         flags |= CompactFlags::DryRun;
    2307            0 :     }
    2308              :     // Manual compaction does not yield for L0.
    2309              : 
    2310            0 :     let wait_until_uploaded =
    2311            0 :         parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false);
    2312              : 
    2313            0 :     let wait_until_scheduled_compaction_done =
    2314            0 :         parse_query_param::<_, bool>(&request, "wait_until_scheduled_compaction_done")?
    2315            0 :             .unwrap_or(false);
    2316              : 
    2317            0 :     let sub_compaction = compact_request
    2318            0 :         .as_ref()
    2319            0 :         .map(|r| r.sub_compaction)
    2320            0 :         .unwrap_or(false);
    2321            0 :     let sub_compaction_max_job_size_mb = compact_request
    2322            0 :         .as_ref()
    2323            0 :         .and_then(|r| r.sub_compaction_max_job_size_mb);
    2324              : 
    2325            0 :     let options = CompactOptions {
    2326            0 :         compact_key_range: compact_request
    2327            0 :             .as_ref()
    2328            0 :             .and_then(|r| r.compact_key_range.clone()),
    2329            0 :         compact_lsn_range: compact_request
    2330            0 :             .as_ref()
    2331            0 :             .and_then(|r| r.compact_lsn_range.clone()),
    2332            0 :         flags,
    2333            0 :         sub_compaction,
    2334            0 :         sub_compaction_max_job_size_mb,
    2335              :     };
    2336              : 
    2337            0 :     let scheduled = compact_request
    2338            0 :         .as_ref()
    2339            0 :         .map(|r| r.scheduled)
    2340            0 :         .unwrap_or(false);
    2341              : 
    2342            0 :     async {
    2343            0 :         let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
    2344            0 :         let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
    2345            0 :         if scheduled {
    2346            0 :             let tenant = state
    2347            0 :                 .tenant_manager
    2348            0 :                 .get_attached_tenant_shard(tenant_shard_id)?;
    2349            0 :             let rx = tenant.schedule_compaction(timeline_id, options).await.map_err(ApiError::InternalServerError)?;
    2350            0 :             if wait_until_scheduled_compaction_done {
    2351              :                 // It is possible that this will take a long time, dropping the HTTP request will not cancel the compaction.
    2352            0 :                 rx.await.ok();
    2353            0 :             }
    2354              :         } else {
    2355            0 :             timeline
    2356            0 :                 .compact_with_options(&cancel, options, &ctx)
    2357            0 :                 .await
    2358            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?;
    2359            0 :             if wait_until_uploaded {
    2360            0 :                 timeline.remote_client.wait_completion().await
    2361              :                 // XXX map to correct ApiError for the cases where it's due to shutdown
    2362            0 :                 .context("wait completion").map_err(ApiError::InternalServerError)?;
    2363            0 :             }
    2364              :         }
    2365            0 :         json_response(StatusCode::OK, ())
    2366            0 :     }
    2367            0 :     .instrument(info_span!("manual_compaction", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    2368            0 :     .await
    2369            0 : }
    2370              : 
    2371            0 : async fn timeline_mark_invisible_handler(
    2372            0 :     mut request: Request<Body>,
    2373            0 :     _cancel: CancellationToken,
    2374            0 : ) -> Result<Response<Body>, ApiError> {
    2375            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2376            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2377            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2378              : 
    2379            0 :     let compact_request = json_request_maybe::<Option<MarkInvisibleRequest>>(&mut request).await?;
    2380              : 
    2381            0 :     let state = get_state(&request);
    2382              : 
    2383            0 :     let visibility = match compact_request {
    2384            0 :         Some(req) => match req.is_visible {
    2385            0 :             Some(true) => TimelineVisibilityState::Visible,
    2386            0 :             Some(false) | None => TimelineVisibilityState::Invisible,
    2387              :         },
    2388            0 :         None => TimelineVisibilityState::Invisible,
    2389              :     };
    2390              : 
    2391            0 :     async {
    2392            0 :         let tenant = state
    2393            0 :             .tenant_manager
    2394            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
    2395            0 :         let timeline = tenant.get_timeline(timeline_id, true)?;
    2396            0 :         timeline.remote_client.schedule_index_upload_for_timeline_invisible_state(visibility).map_err(ApiError::InternalServerError)?;
    2397            0 :         json_response(StatusCode::OK, ())
    2398            0 :     }
    2399            0 :     .instrument(info_span!("manual_timeline_mark_invisible", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    2400            0 :     .await
    2401            0 : }
    2402              : 
    2403              : // Run offload immediately on given timeline.
    2404            0 : async fn timeline_offload_handler(
    2405            0 :     request: Request<Body>,
    2406            0 :     _cancel: CancellationToken,
    2407            0 : ) -> Result<Response<Body>, ApiError> {
    2408            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2409            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2410            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2411              : 
    2412            0 :     let state = get_state(&request);
    2413              : 
    2414            0 :     async {
    2415            0 :         let tenant = state
    2416            0 :             .tenant_manager
    2417            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
    2418              : 
    2419            0 :         if tenant.get_offloaded_timeline(timeline_id).is_ok() {
    2420            0 :             return json_response(StatusCode::OK, ());
    2421            0 :         }
    2422            0 :         let timeline =
    2423            0 :             active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    2424            0 :                 .await?;
    2425              : 
    2426            0 :         if !tenant.timeline_has_no_attached_children(timeline_id) {
    2427            0 :             return Err(ApiError::PreconditionFailed(
    2428            0 :                 "timeline has attached children".into(),
    2429            0 :             ));
    2430            0 :         }
    2431            0 :         if let (false, reason) = timeline.can_offload() {
    2432            0 :             return Err(ApiError::PreconditionFailed(
    2433            0 :                 format!("Timeline::can_offload() check failed: {reason}") .into(),
    2434            0 :             ));
    2435            0 :         }
    2436            0 :         offload_timeline(&tenant, &timeline)
    2437            0 :             .await
    2438            0 :             .map_err(|e| {
    2439            0 :                 match e {
    2440            0 :                     OffloadError::Cancelled => ApiError::ResourceUnavailable("Timeline shutting down".into()),
    2441            0 :                     OffloadError::AlreadyInProgress => ApiError::Conflict("Timeline already being offloaded or deleted".into()),
    2442            0 :                     _ => ApiError::InternalServerError(anyhow!(e))
    2443              :                 }
    2444            0 :             })?;
    2445              : 
    2446            0 :         json_response(StatusCode::OK, ())
    2447            0 :     }
    2448            0 :     .instrument(info_span!("manual_timeline_offload", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    2449            0 :     .await
    2450            0 : }
    2451              : 
    2452              : // Run checkpoint immediately on given timeline.
    2453            0 : async fn timeline_checkpoint_handler(
    2454            0 :     request: Request<Body>,
    2455            0 :     cancel: CancellationToken,
    2456            0 : ) -> Result<Response<Body>, ApiError> {
    2457            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2458            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2459            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2460              : 
    2461            0 :     let state = get_state(&request);
    2462              : 
    2463            0 :     let mut flags = EnumSet::empty();
    2464            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "force_l0_compaction")? {
    2465            0 :         flags |= CompactFlags::ForceL0Compaction;
    2466            0 :     }
    2467            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "force_repartition")? {
    2468            0 :         flags |= CompactFlags::ForceRepartition;
    2469            0 :     }
    2470            0 :     if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? {
    2471            0 :         flags |= CompactFlags::ForceImageLayerCreation;
    2472            0 :     }
    2473              : 
    2474              :     // By default, checkpoints come with a compaction, but this may be optionally disabled by tests that just want to flush + upload.
    2475            0 :     let compact = parse_query_param::<_, bool>(&request, "compact")?.unwrap_or(true);
    2476              : 
    2477            0 :     let wait_until_flushed: bool =
    2478            0 :         parse_query_param(&request, "wait_until_flushed")?.unwrap_or(true);
    2479              : 
    2480            0 :     let wait_until_uploaded =
    2481            0 :         parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false);
    2482              : 
    2483            0 :     async {
    2484            0 :         let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
    2485            0 :         let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
    2486            0 :         if wait_until_flushed {
    2487            0 :             timeline.freeze_and_flush().await
    2488              :         } else {
    2489            0 :             timeline.freeze().await.and(Ok(()))
    2490            0 :         }.map_err(|e| {
    2491            0 :                 match e {
    2492            0 :                     tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown,
    2493            0 :                     other => ApiError::InternalServerError(other.into()),
    2494              : 
    2495              :                 }
    2496            0 :             })?;
    2497            0 :         if compact {
    2498            0 :             timeline
    2499            0 :                 .compact(&cancel, flags, &ctx)
    2500            0 :                 .await
    2501            0 :                 .map_err(|e|
    2502            0 :                     match e {
    2503            0 :                         CompactionError::ShuttingDown => ApiError::ShuttingDown,
    2504            0 :                         CompactionError::Offload(e) => ApiError::InternalServerError(anyhow::anyhow!(e)),
    2505            0 :                         CompactionError::CollectKeySpaceError(e) => ApiError::InternalServerError(anyhow::anyhow!(e)),
    2506            0 :                         CompactionError::Other(e) => ApiError::InternalServerError(e),
    2507            0 :                         CompactionError::AlreadyRunning(_) => ApiError::InternalServerError(anyhow::anyhow!(e)),
    2508            0 :                     }
    2509            0 :                 )?;
    2510            0 :         }
    2511              : 
    2512            0 :         if wait_until_uploaded {
    2513            0 :             tracing::info!("Waiting for uploads to complete...");
    2514            0 :             timeline.remote_client.wait_completion().await
    2515              :             // XXX map to correct ApiError for the cases where it's due to shutdown
    2516            0 :             .context("wait completion").map_err(ApiError::InternalServerError)?;
    2517            0 :             tracing::info!("Uploads completed up to {}", timeline.get_remote_consistent_lsn_projected().unwrap_or(Lsn(0)));
    2518            0 :         }
    2519              : 
    2520            0 :         json_response(StatusCode::OK, ())
    2521            0 :     }
    2522            0 :     .instrument(info_span!("manual_checkpoint", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    2523            0 :     .await
    2524            0 : }
    2525              : 
    2526            0 : async fn timeline_download_remote_layers_handler_post(
    2527            0 :     mut request: Request<Body>,
    2528            0 :     _cancel: CancellationToken,
    2529            0 : ) -> Result<Response<Body>, ApiError> {
    2530            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2531            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2532            0 :     let body: DownloadRemoteLayersTaskSpawnRequest = json_request(&mut request).await?;
    2533            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2534              : 
    2535            0 :     let state = get_state(&request);
    2536              : 
    2537            0 :     let timeline =
    2538            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    2539            0 :             .await?;
    2540            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
    2541            0 :         .with_scope_timeline(&timeline);
    2542            0 :     match timeline.spawn_download_all_remote_layers(body, &ctx).await {
    2543            0 :         Ok(st) => json_response(StatusCode::ACCEPTED, st),
    2544            0 :         Err(st) => json_response(StatusCode::CONFLICT, st),
    2545              :     }
    2546            0 : }
    2547              : 
    2548            0 : async fn timeline_download_remote_layers_handler_get(
    2549            0 :     request: Request<Body>,
    2550            0 :     _cancel: CancellationToken,
    2551            0 : ) -> Result<Response<Body>, ApiError> {
    2552            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2553            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2554            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2555            0 :     let state = get_state(&request);
    2556              : 
    2557            0 :     let timeline =
    2558            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    2559            0 :             .await?;
    2560            0 :     let info = timeline
    2561            0 :         .get_download_all_remote_layers_task_info()
    2562            0 :         .context("task never started since last pageserver process start")
    2563            0 :         .map_err(|e| ApiError::NotFound(e.into()))?;
    2564            0 :     json_response(StatusCode::OK, info)
    2565            0 : }
    2566              : 
    2567            0 : async fn timeline_detach_ancestor_handler(
    2568            0 :     request: Request<Body>,
    2569            0 :     _cancel: CancellationToken,
    2570            0 : ) -> Result<Response<Body>, ApiError> {
    2571              :     use pageserver_api::models::detach_ancestor::AncestorDetached;
    2572              : 
    2573              :     use crate::tenant::timeline::detach_ancestor;
    2574              : 
    2575            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2576            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2577            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2578            0 :     let behavior: Option<DetachBehavior> = parse_query_param(&request, "detach_behavior")?;
    2579              : 
    2580            0 :     let behavior = behavior.unwrap_or_default();
    2581              : 
    2582            0 :     let span = tracing::info_span!("detach_ancestor", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id);
    2583              : 
    2584            0 :     async move {
    2585            0 :         let mut options = detach_ancestor::Options::default();
    2586              : 
    2587            0 :         let rewrite_concurrency =
    2588            0 :             parse_query_param::<_, std::num::NonZeroUsize>(&request, "rewrite_concurrency")?;
    2589            0 :         let copy_concurrency =
    2590            0 :             parse_query_param::<_, std::num::NonZeroUsize>(&request, "copy_concurrency")?;
    2591              : 
    2592            0 :         [
    2593            0 :             (&mut options.rewrite_concurrency, rewrite_concurrency),
    2594            0 :             (&mut options.copy_concurrency, copy_concurrency),
    2595            0 :         ]
    2596            0 :         .into_iter()
    2597            0 :         .filter_map(|(target, val)| val.map(|val| (target, val)))
    2598            0 :         .for_each(|(target, val)| *target = val);
    2599              : 
    2600            0 :         let state = get_state(&request);
    2601              : 
    2602            0 :         let tenant = state
    2603            0 :             .tenant_manager
    2604            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
    2605              : 
    2606            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    2607              : 
    2608            0 :         let ctx = RequestContext::new(TaskKind::DetachAncestor, DownloadBehavior::Download);
    2609            0 :         let ctx = &ctx;
    2610              : 
    2611              :         // Flush the upload queues of all timelines before detaching ancestor. We do the same thing again
    2612              :         // during shutdown. This early upload ensures the pageserver does not need to upload too many
    2613              :         // things and creates downtime during timeline reloads.
    2614            0 :         for timeline in tenant.list_timelines() {
    2615            0 :             timeline
    2616            0 :                 .remote_client
    2617            0 :                 .wait_completion()
    2618            0 :                 .await
    2619            0 :                 .map_err(|e| {
    2620            0 :                     ApiError::PreconditionFailed(format!("cannot drain upload queue: {e}").into())
    2621            0 :                 })?;
    2622              :         }
    2623              : 
    2624            0 :         tracing::info!("all timeline upload queues are drained");
    2625              : 
    2626            0 :         let timeline = tenant.get_timeline(timeline_id, true)?;
    2627            0 :         let ctx = &ctx.with_scope_timeline(&timeline);
    2628              : 
    2629            0 :         let progress = timeline
    2630            0 :             .prepare_to_detach_from_ancestor(&tenant, options, behavior, ctx)
    2631            0 :             .await?;
    2632              : 
    2633              :         // uncomment to allow early as possible Tenant::drop
    2634              :         // drop(tenant);
    2635              : 
    2636            0 :         let resp = match progress {
    2637            0 :             detach_ancestor::Progress::Prepared(attempt, prepared) => {
    2638              :                 // it would be great to tag the guard on to the tenant activation future
    2639            0 :                 let reparented_timelines = state
    2640            0 :                     .tenant_manager
    2641            0 :                     .complete_detaching_timeline_ancestor(
    2642            0 :                         tenant_shard_id,
    2643            0 :                         timeline_id,
    2644            0 :                         prepared,
    2645            0 :                         behavior,
    2646            0 :                         attempt,
    2647            0 :                         ctx,
    2648            0 :                     )
    2649            0 :                     .await?;
    2650              : 
    2651            0 :                 AncestorDetached {
    2652            0 :                     reparented_timelines,
    2653            0 :                 }
    2654              :             }
    2655            0 :             detach_ancestor::Progress::Done(resp) => resp,
    2656              :         };
    2657              : 
    2658            0 :         json_response(StatusCode::OK, resp)
    2659            0 :     }
    2660            0 :     .instrument(span)
    2661            0 :     .await
    2662            0 : }
    2663              : 
    2664            0 : async fn deletion_queue_flush(
    2665            0 :     r: Request<Body>,
    2666            0 :     cancel: CancellationToken,
    2667            0 : ) -> Result<Response<Body>, ApiError> {
    2668            0 :     let state = get_state(&r);
    2669              : 
    2670            0 :     let execute = parse_query_param(&r, "execute")?.unwrap_or(false);
    2671              : 
    2672            0 :     let flush = async {
    2673            0 :         if execute {
    2674            0 :             state.deletion_queue_client.flush_execute().await
    2675              :         } else {
    2676            0 :             state.deletion_queue_client.flush().await
    2677              :         }
    2678            0 :     }
    2679              :     // DeletionQueueError's only case is shutting down.
    2680            0 :     .map_err(|_| ApiError::ShuttingDown);
    2681              : 
    2682            0 :     tokio::select! {
    2683            0 :         res = flush => {
    2684            0 :             res.map(|()| json_response(StatusCode::OK, ()))?
    2685              :         }
    2686            0 :         _ = cancel.cancelled() => {
    2687            0 :             Err(ApiError::ShuttingDown)
    2688              :         }
    2689              :     }
    2690            0 : }
    2691              : 
    2692            0 : async fn getpage_at_lsn_handler(
    2693            0 :     request: Request<Body>,
    2694            0 :     cancel: CancellationToken,
    2695            0 : ) -> Result<Response<Body>, ApiError> {
    2696            0 :     getpage_at_lsn_handler_inner(false, request, cancel).await
    2697            0 : }
    2698              : 
    2699            0 : async fn touchpage_at_lsn_handler(
    2700            0 :     request: Request<Body>,
    2701            0 :     cancel: CancellationToken,
    2702            0 : ) -> Result<Response<Body>, ApiError> {
    2703            0 :     getpage_at_lsn_handler_inner(true, request, cancel).await
    2704            0 : }
    2705              : 
    2706              : /// Try if `GetPage@Lsn` is successful, useful for manual debugging.
    2707            0 : async fn getpage_at_lsn_handler_inner(
    2708            0 :     touch: bool,
    2709            0 :     request: Request<Body>,
    2710            0 :     _cancel: CancellationToken,
    2711            0 : ) -> Result<Response<Body>, ApiError> {
    2712            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2713            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2714              :     // Require pageserver admin permission for this API instead of only tenant-level token.
    2715            0 :     check_permission(&request, None)?;
    2716            0 :     let state = get_state(&request);
    2717              : 
    2718              :     struct Key(pageserver_api::key::Key);
    2719              : 
    2720              :     impl std::str::FromStr for Key {
    2721              :         type Err = anyhow::Error;
    2722              : 
    2723            0 :         fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
    2724            0 :             pageserver_api::key::Key::from_hex(s).map(Key)
    2725            0 :         }
    2726              :     }
    2727              : 
    2728            0 :     let key: Key = parse_query_param(&request, "key")?
    2729            0 :         .ok_or_else(|| ApiError::BadRequest(anyhow!("missing 'key' query parameter")))?;
    2730            0 :     let lsn: Option<Lsn> = parse_query_param(&request, "lsn")?;
    2731              : 
    2732            0 :     async {
    2733            0 :         let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
    2734            0 :         let ctx = RequestContextBuilder::new(TaskKind::MgmtRequest)
    2735            0 :             .download_behavior(DownloadBehavior::Download)
    2736            0 :             .scope(context::Scope::new_timeline(&timeline))
    2737            0 :             .read_path_debug(true)
    2738            0 :             .root();
    2739              : 
    2740              :         // Use last_record_lsn if no lsn is provided
    2741            0 :         let lsn = lsn.unwrap_or_else(|| timeline.get_last_record_lsn());
    2742            0 :         let page = timeline.get(key.0, lsn, &ctx).await?;
    2743              : 
    2744            0 :         if touch {
    2745            0 :             json_response(StatusCode::OK, ())
    2746              :         } else {
    2747            0 :             Result::<_, ApiError>::Ok(
    2748            0 :                 Response::builder()
    2749            0 :                     .status(StatusCode::OK)
    2750            0 :                     .header(header::CONTENT_TYPE, "application/octet-stream")
    2751            0 :                     .body(hyper::Body::from(page))
    2752            0 :                     .unwrap(),
    2753            0 :             )
    2754              :         }
    2755            0 :     }
    2756            0 :     .instrument(info_span!("timeline_get", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    2757            0 :     .await
    2758            0 : }
    2759              : 
    2760            0 : async fn timeline_collect_keyspace(
    2761            0 :     request: Request<Body>,
    2762            0 :     _cancel: CancellationToken,
    2763            0 : ) -> Result<Response<Body>, ApiError> {
    2764            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2765            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    2766            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    2767            0 :     let state = get_state(&request);
    2768              : 
    2769            0 :     let at_lsn: Option<Lsn> = parse_query_param(&request, "at_lsn")?;
    2770              : 
    2771            0 :     async {
    2772            0 :         let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
    2773            0 :         let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
    2774            0 :         let at_lsn = at_lsn.unwrap_or_else(|| timeline.get_last_record_lsn());
    2775            0 :         let (dense_ks, sparse_ks) = timeline
    2776            0 :             .collect_keyspace(at_lsn, &ctx)
    2777            0 :             .await
    2778            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))?;
    2779              : 
    2780              :         // This API is currently used by pagebench. Pagebench will iterate all keys within the keyspace.
    2781              :         // Therefore, we split dense/sparse keys in this API.
    2782            0 :         let res = pageserver_api::models::partitioning::Partitioning { keys: dense_ks, sparse_keys: sparse_ks, at_lsn };
    2783              : 
    2784            0 :         json_response(StatusCode::OK, res)
    2785            0 :     }
    2786            0 :     .instrument(info_span!("timeline_collect_keyspace", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
    2787            0 :     .await
    2788            0 : }
    2789              : 
    2790            0 : async fn active_timeline_of_active_tenant(
    2791            0 :     tenant_manager: &TenantManager,
    2792            0 :     tenant_shard_id: TenantShardId,
    2793            0 :     timeline_id: TimelineId,
    2794            0 : ) -> Result<Arc<Timeline>, ApiError> {
    2795            0 :     let tenant = tenant_manager.get_attached_tenant_shard(tenant_shard_id)?;
    2796              : 
    2797            0 :     tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    2798              : 
    2799            0 :     Ok(tenant.get_timeline(timeline_id, true)?)
    2800            0 : }
    2801              : 
    2802            0 : async fn always_panic_handler(
    2803            0 :     req: Request<Body>,
    2804            0 :     _cancel: CancellationToken,
    2805            0 : ) -> Result<Response<Body>, ApiError> {
    2806              :     // Deliberately cause a panic to exercise the panic hook registered via std::panic::set_hook().
    2807              :     // For pageserver, the relevant panic hook is `tracing_panic_hook` , and the `sentry` crate's wrapper around it.
    2808              :     // Use catch_unwind to ensure that tokio nor hyper are distracted by our panic.
    2809            0 :     let query = req.uri().query();
    2810            0 :     let _ = std::panic::catch_unwind(|| {
    2811            0 :         panic!("unconditional panic for testing panic hook integration; request query: {query:?}")
    2812              :     });
    2813            0 :     json_response(StatusCode::NO_CONTENT, ())
    2814            0 : }
    2815              : 
    2816            0 : async fn disk_usage_eviction_run(
    2817            0 :     mut r: Request<Body>,
    2818            0 :     cancel: CancellationToken,
    2819            0 : ) -> Result<Response<Body>, ApiError> {
    2820            0 :     check_permission(&r, None)?;
    2821              : 
    2822            0 :     #[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)]
    2823              :     struct Config {
    2824              :         /// How many bytes to evict before reporting that pressure is relieved.
    2825              :         evict_bytes: u64,
    2826              : 
    2827              :         #[serde(default)]
    2828              :         eviction_order: pageserver_api::config::EvictionOrder,
    2829              :     }
    2830              : 
    2831              :     #[derive(Debug, Clone, Copy, serde::Serialize)]
    2832              :     struct Usage {
    2833              :         // remains unchanged after instantiation of the struct
    2834              :         evict_bytes: u64,
    2835              :         // updated by `add_available_bytes`
    2836              :         freed_bytes: u64,
    2837              :     }
    2838              : 
    2839              :     impl crate::disk_usage_eviction_task::Usage for Usage {
    2840            0 :         fn has_pressure(&self) -> bool {
    2841            0 :             self.evict_bytes > self.freed_bytes
    2842            0 :         }
    2843              : 
    2844            0 :         fn add_available_bytes(&mut self, bytes: u64) {
    2845            0 :             self.freed_bytes += bytes;
    2846            0 :         }
    2847              :     }
    2848              : 
    2849            0 :     let config = json_request::<Config>(&mut r).await?;
    2850              : 
    2851            0 :     let usage = Usage {
    2852            0 :         evict_bytes: config.evict_bytes,
    2853            0 :         freed_bytes: 0,
    2854            0 :     };
    2855              : 
    2856            0 :     let state = get_state(&r);
    2857            0 :     let eviction_state = state.disk_usage_eviction_state.clone();
    2858              : 
    2859            0 :     let res = crate::disk_usage_eviction_task::disk_usage_eviction_task_iteration_impl(
    2860            0 :         &eviction_state,
    2861            0 :         &state.remote_storage,
    2862            0 :         usage,
    2863            0 :         &state.tenant_manager,
    2864            0 :         config.eviction_order.into(),
    2865            0 :         &cancel,
    2866            0 :     )
    2867            0 :     .await;
    2868              : 
    2869            0 :     info!(?res, "disk_usage_eviction_task_iteration_impl finished");
    2870              : 
    2871            0 :     let res = res.map_err(ApiError::InternalServerError)?;
    2872              : 
    2873            0 :     json_response(StatusCode::OK, res)
    2874            0 : }
    2875              : 
    2876            0 : async fn secondary_upload_handler(
    2877            0 :     request: Request<Body>,
    2878            0 :     _cancel: CancellationToken,
    2879            0 : ) -> Result<Response<Body>, ApiError> {
    2880            0 :     let state = get_state(&request);
    2881            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2882            0 :     state
    2883            0 :         .secondary_controller
    2884            0 :         .upload_tenant(tenant_shard_id)
    2885            0 :         .await?;
    2886              : 
    2887            0 :     json_response(StatusCode::OK, ())
    2888            0 : }
    2889              : 
    2890            0 : async fn tenant_scan_remote_handler(
    2891            0 :     request: Request<Body>,
    2892            0 :     cancel: CancellationToken,
    2893            0 : ) -> Result<Response<Body>, ApiError> {
    2894            0 :     let state = get_state(&request);
    2895            0 :     let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
    2896              : 
    2897            0 :     let mut response = TenantScanRemoteStorageResponse::default();
    2898              : 
    2899            0 :     let (shards, _other_keys) =
    2900            0 :         list_remote_tenant_shards(&state.remote_storage, tenant_id, cancel.clone())
    2901            0 :             .await
    2902            0 :             .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
    2903              : 
    2904            0 :     for tenant_shard_id in shards {
    2905            0 :         let (timeline_ids, _other_keys) =
    2906            0 :             list_remote_timelines(&state.remote_storage, tenant_shard_id, cancel.clone())
    2907            0 :                 .await
    2908            0 :                 .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
    2909              : 
    2910            0 :         let mut generation = Generation::none();
    2911            0 :         for timeline_id in timeline_ids {
    2912            0 :             match download_index_part(
    2913            0 :                 &state.remote_storage,
    2914            0 :                 &tenant_shard_id,
    2915            0 :                 &timeline_id,
    2916              :                 Generation::MAX,
    2917            0 :                 &cancel,
    2918              :             )
    2919            0 :             .instrument(info_span!("download_index_part",
    2920              :                          tenant_id=%tenant_shard_id.tenant_id,
    2921            0 :                          shard_id=%tenant_shard_id.shard_slug(),
    2922              :                          %timeline_id))
    2923            0 :             .await
    2924              :             {
    2925            0 :                 Ok((index_part, index_generation, _index_mtime)) => {
    2926            0 :                     tracing::info!(
    2927            0 :                         "Found timeline {tenant_shard_id}/{timeline_id} metadata (gen {index_generation:?}, {} layers, {} consistent LSN)",
    2928            0 :                         index_part.layer_metadata.len(),
    2929            0 :                         index_part.metadata.disk_consistent_lsn()
    2930              :                     );
    2931            0 :                     generation = std::cmp::max(generation, index_generation);
    2932              :                 }
    2933              :                 Err(DownloadError::NotFound) => {
    2934              :                     // This is normal for tenants that were created with multiple shards: they have an unsharded path
    2935              :                     // containing the timeline's initdb tarball but no index.  Otherwise it is a bit strange.
    2936            0 :                     tracing::info!(
    2937            0 :                         "Timeline path {tenant_shard_id}/{timeline_id} exists in remote storage but has no index, skipping"
    2938              :                     );
    2939            0 :                     continue;
    2940              :                 }
    2941            0 :                 Err(e) => {
    2942            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    2943              :                 }
    2944              :             };
    2945              :         }
    2946              : 
    2947            0 :         let result =
    2948            0 :             download_tenant_manifest(&state.remote_storage, &tenant_shard_id, generation, &cancel)
    2949            0 :                 .instrument(info_span!("download_tenant_manifest",
    2950              :                             tenant_id=%tenant_shard_id.tenant_id,
    2951            0 :                             shard_id=%tenant_shard_id.shard_slug()))
    2952            0 :                 .await;
    2953            0 :         let stripe_size = match result {
    2954            0 :             Ok((manifest, _, _)) => manifest.stripe_size,
    2955            0 :             Err(DownloadError::NotFound) => None,
    2956            0 :             Err(err) => return Err(ApiError::InternalServerError(anyhow!(err))),
    2957              :         };
    2958              : 
    2959            0 :         response.shards.push(TenantScanRemoteStorageShard {
    2960            0 :             tenant_shard_id,
    2961            0 :             generation: generation.into(),
    2962            0 :             stripe_size,
    2963            0 :         });
    2964              :     }
    2965              : 
    2966            0 :     if response.shards.is_empty() {
    2967            0 :         return Err(ApiError::NotFound(
    2968            0 :             anyhow::anyhow!("No shards found for tenant ID {tenant_id}").into(),
    2969            0 :         ));
    2970            0 :     }
    2971              : 
    2972            0 :     json_response(StatusCode::OK, response)
    2973            0 : }
    2974              : 
    2975            0 : async fn secondary_download_handler(
    2976            0 :     request: Request<Body>,
    2977            0 :     _cancel: CancellationToken,
    2978            0 : ) -> Result<Response<Body>, ApiError> {
    2979            0 :     let state = get_state(&request);
    2980            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    2981            0 :     let wait = parse_query_param(&request, "wait_ms")?.map(Duration::from_millis);
    2982              : 
    2983              :     // We don't need this to issue the download request, but:
    2984              :     // - it enables us to cleanly return 404 if we get a request for an absent shard
    2985              :     // - we will use this to provide status feedback in the response
    2986            0 :     let Some(secondary_tenant) = state
    2987            0 :         .tenant_manager
    2988            0 :         .get_secondary_tenant_shard(tenant_shard_id)
    2989              :     else {
    2990            0 :         return Err(ApiError::NotFound(
    2991            0 :             anyhow::anyhow!("Shard {} not found", tenant_shard_id).into(),
    2992            0 :         ));
    2993              :     };
    2994              : 
    2995            0 :     let timeout = wait.unwrap_or(Duration::MAX);
    2996              : 
    2997            0 :     let result = tokio::time::timeout(
    2998            0 :         timeout,
    2999            0 :         state.secondary_controller.download_tenant(tenant_shard_id),
    3000            0 :     )
    3001            0 :     .await;
    3002              : 
    3003            0 :     let progress = secondary_tenant.progress.lock().unwrap().clone();
    3004              : 
    3005            0 :     let status = match result {
    3006              :         Ok(Ok(())) => {
    3007            0 :             if progress.layers_downloaded >= progress.layers_total {
    3008              :                 // Download job ran to completion
    3009            0 :                 StatusCode::OK
    3010              :             } else {
    3011              :                 // Download dropped out without errors because it ran out of time budget
    3012            0 :                 StatusCode::ACCEPTED
    3013              :             }
    3014              :         }
    3015              :         // Edge case: downloads aren't usually fallible: things like a missing heatmap are considered
    3016              :         // okay.  We could get an error here in the unlikely edge case that the tenant
    3017              :         // was detached between our check above and executing the download job.
    3018            0 :         Ok(Err(e)) => return Err(e.into()),
    3019              :         // A timeout is not an error: we have started the download, we're just not done
    3020              :         // yet.  The caller will get a response body indicating status.
    3021            0 :         Err(_) => StatusCode::ACCEPTED,
    3022              :     };
    3023              : 
    3024            0 :     json_response(status, progress)
    3025            0 : }
    3026              : 
    3027            0 : async fn wait_lsn_handler(
    3028            0 :     mut request: Request<Body>,
    3029            0 :     cancel: CancellationToken,
    3030            0 : ) -> Result<Response<Body>, ApiError> {
    3031            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    3032            0 :     let wait_lsn_request: TenantWaitLsnRequest = json_request(&mut request).await?;
    3033              : 
    3034            0 :     let state = get_state(&request);
    3035            0 :     let tenant = state
    3036            0 :         .tenant_manager
    3037            0 :         .get_attached_tenant_shard(tenant_shard_id)?;
    3038              : 
    3039            0 :     let mut wait_futures = Vec::default();
    3040            0 :     for timeline in tenant.list_timelines() {
    3041            0 :         let Some(lsn) = wait_lsn_request.timelines.get(&timeline.timeline_id) else {
    3042            0 :             continue;
    3043              :         };
    3044              : 
    3045            0 :         let fut = {
    3046            0 :             let timeline = timeline.clone();
    3047            0 :             let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Error);
    3048            0 :             async move {
    3049            0 :                 timeline
    3050            0 :                     .wait_lsn(
    3051            0 :                         *lsn,
    3052            0 :                         WaitLsnWaiter::HttpEndpoint,
    3053            0 :                         WaitLsnTimeout::Custom(wait_lsn_request.timeout),
    3054            0 :                         &ctx,
    3055            0 :                     )
    3056            0 :                     .await
    3057            0 :             }
    3058              :         };
    3059            0 :         wait_futures.push(fut);
    3060              :     }
    3061              : 
    3062            0 :     if wait_futures.is_empty() {
    3063            0 :         return json_response(StatusCode::NOT_FOUND, ());
    3064            0 :     }
    3065              : 
    3066            0 :     let all_done = tokio::select! {
    3067            0 :         results = join_all(wait_futures) => {
    3068            0 :             results.iter().all(|res| res.is_ok())
    3069              :         },
    3070            0 :         _ = cancel.cancelled() => {
    3071            0 :             return Err(ApiError::Cancelled);
    3072              :         }
    3073              :     };
    3074              : 
    3075            0 :     let status = if all_done {
    3076            0 :         StatusCode::OK
    3077              :     } else {
    3078            0 :         StatusCode::ACCEPTED
    3079              :     };
    3080              : 
    3081            0 :     json_response(status, ())
    3082            0 : }
    3083              : 
    3084            0 : async fn secondary_status_handler(
    3085            0 :     request: Request<Body>,
    3086            0 :     _cancel: CancellationToken,
    3087            0 : ) -> Result<Response<Body>, ApiError> {
    3088            0 :     let state = get_state(&request);
    3089            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    3090              : 
    3091            0 :     let Some(secondary_tenant) = state
    3092            0 :         .tenant_manager
    3093            0 :         .get_secondary_tenant_shard(tenant_shard_id)
    3094              :     else {
    3095            0 :         return Err(ApiError::NotFound(
    3096            0 :             anyhow::anyhow!("Shard {} not found", tenant_shard_id).into(),
    3097            0 :         ));
    3098              :     };
    3099              : 
    3100            0 :     let progress = secondary_tenant.progress.lock().unwrap().clone();
    3101              : 
    3102            0 :     json_response(StatusCode::OK, progress)
    3103            0 : }
    3104              : 
    3105            0 : async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
    3106            0 :     json_response(
    3107              :         StatusCode::NOT_FOUND,
    3108            0 :         HttpErrorBody::from_msg("page not found".to_owned()),
    3109              :     )
    3110            0 : }
    3111              : 
    3112            0 : async fn post_tracing_event_handler(
    3113            0 :     mut r: Request<Body>,
    3114            0 :     _cancel: CancellationToken,
    3115            0 : ) -> Result<Response<Body>, ApiError> {
    3116            0 :     #[derive(Debug, serde::Deserialize)]
    3117              :     #[serde(rename_all = "lowercase")]
    3118              :     enum Level {
    3119              :         Error,
    3120              :         Warn,
    3121              :         Info,
    3122              :         Debug,
    3123              :         Trace,
    3124              :     }
    3125            0 :     #[derive(Debug, serde::Deserialize)]
    3126              :     struct Request {
    3127              :         level: Level,
    3128              :         message: String,
    3129              :     }
    3130            0 :     let body: Request = json_request(&mut r)
    3131            0 :         .await
    3132            0 :         .map_err(|_| ApiError::BadRequest(anyhow::anyhow!("invalid JSON body")))?;
    3133              : 
    3134            0 :     match body.level {
    3135            0 :         Level::Error => tracing::error!(?body.message),
    3136            0 :         Level::Warn => tracing::warn!(?body.message),
    3137            0 :         Level::Info => tracing::info!(?body.message),
    3138            0 :         Level::Debug => tracing::debug!(?body.message),
    3139            0 :         Level::Trace => tracing::trace!(?body.message),
    3140              :     }
    3141              : 
    3142            0 :     json_response(StatusCode::OK, ())
    3143            0 : }
    3144              : 
    3145            0 : async fn put_io_engine_handler(
    3146            0 :     mut r: Request<Body>,
    3147            0 :     _cancel: CancellationToken,
    3148            0 : ) -> Result<Response<Body>, ApiError> {
    3149            0 :     check_permission(&r, None)?;
    3150            0 :     let kind: crate::virtual_file::IoEngineKind = json_request(&mut r).await?;
    3151            0 :     crate::virtual_file::io_engine::set(kind);
    3152            0 :     json_response(StatusCode::OK, ())
    3153            0 : }
    3154              : 
    3155            0 : async fn put_io_mode_handler(
    3156            0 :     mut r: Request<Body>,
    3157            0 :     _cancel: CancellationToken,
    3158            0 : ) -> Result<Response<Body>, ApiError> {
    3159            0 :     check_permission(&r, None)?;
    3160            0 :     let mode: IoMode = json_request(&mut r).await?;
    3161            0 :     crate::virtual_file::set_io_mode(mode);
    3162            0 :     json_response(StatusCode::OK, ())
    3163            0 : }
    3164              : 
    3165              : /// Polled by control plane.
    3166              : ///
    3167              : /// See [`crate::utilization`].
    3168            0 : async fn get_utilization(
    3169            0 :     r: Request<Body>,
    3170            0 :     _cancel: CancellationToken,
    3171            0 : ) -> Result<Response<Body>, ApiError> {
    3172            0 :     fail::fail_point!("get-utilization-http-handler", |_| {
    3173            0 :         Err(ApiError::ResourceUnavailable("failpoint".into()))
    3174            0 :     });
    3175              : 
    3176              :     // this probably could be completely public, but lets make that change later.
    3177            0 :     check_permission(&r, None)?;
    3178              : 
    3179            0 :     let state = get_state(&r);
    3180            0 :     let mut g = state.latest_utilization.lock().await;
    3181              : 
    3182            0 :     let regenerate_every = Duration::from_secs(1);
    3183            0 :     let still_valid = g
    3184            0 :         .as_ref()
    3185            0 :         .is_some_and(|(captured_at, _)| captured_at.elapsed() < regenerate_every);
    3186              : 
    3187              :     // avoid needless statvfs calls even though those should be non-blocking fast.
    3188              :     // regenerate at most 1Hz to allow polling at any rate.
    3189            0 :     if !still_valid {
    3190            0 :         let path = state.conf.tenants_path();
    3191            0 :         let doc =
    3192            0 :             crate::utilization::regenerate(state.conf, path.as_std_path(), &state.tenant_manager)
    3193            0 :                 .map_err(ApiError::InternalServerError)?;
    3194              : 
    3195            0 :         let mut buf = Vec::new();
    3196            0 :         serde_json::to_writer(&mut buf, &doc)
    3197            0 :             .context("serialize")
    3198            0 :             .map_err(ApiError::InternalServerError)?;
    3199              : 
    3200            0 :         let body = bytes::Bytes::from(buf);
    3201              : 
    3202            0 :         *g = Some((std::time::Instant::now(), body));
    3203            0 :     }
    3204              : 
    3205              :     // hyper 0.14 doesn't yet have Response::clone so this is a bit of extra legwork
    3206            0 :     let cached = g.as_ref().expect("just set").1.clone();
    3207              : 
    3208            0 :     Response::builder()
    3209            0 :         .header(hyper::http::header::CONTENT_TYPE, "application/json")
    3210              :         // thought of using http date header, but that is second precision which does not give any
    3211              :         // debugging aid
    3212            0 :         .status(StatusCode::OK)
    3213            0 :         .body(hyper::Body::from(cached))
    3214            0 :         .context("build response")
    3215            0 :         .map_err(ApiError::InternalServerError)
    3216            0 : }
    3217              : 
    3218            0 : async fn list_aux_files(
    3219            0 :     mut request: Request<Body>,
    3220            0 :     _cancel: CancellationToken,
    3221            0 : ) -> Result<Response<Body>, ApiError> {
    3222            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    3223            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    3224            0 :     let body: ListAuxFilesRequest = json_request(&mut request).await?;
    3225            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    3226              : 
    3227            0 :     let state = get_state(&request);
    3228              : 
    3229            0 :     let timeline =
    3230            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    3231            0 :             .await?;
    3232              : 
    3233            0 :     let io_concurrency = IoConcurrency::spawn_from_conf(
    3234            0 :         state.conf.get_vectored_concurrent_io,
    3235            0 :         timeline.gate.enter().map_err(|_| ApiError::Cancelled)?,
    3236              :     );
    3237              : 
    3238            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
    3239            0 :         .with_scope_timeline(&timeline);
    3240            0 :     let files = timeline
    3241            0 :         .list_aux_files(body.lsn, &ctx, io_concurrency)
    3242            0 :         .await?;
    3243            0 :     json_response(StatusCode::OK, files)
    3244            0 : }
    3245              : 
    3246            0 : async fn perf_info(
    3247            0 :     request: Request<Body>,
    3248            0 :     _cancel: CancellationToken,
    3249            0 : ) -> Result<Response<Body>, ApiError> {
    3250            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    3251            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    3252            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    3253              : 
    3254            0 :     let state = get_state(&request);
    3255              : 
    3256            0 :     let timeline =
    3257            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    3258            0 :             .await?;
    3259              : 
    3260            0 :     let result = timeline.perf_info().await;
    3261              : 
    3262            0 :     json_response(StatusCode::OK, result)
    3263            0 : }
    3264              : 
    3265            0 : async fn ingest_aux_files(
    3266            0 :     mut request: Request<Body>,
    3267            0 :     _cancel: CancellationToken,
    3268            0 : ) -> Result<Response<Body>, ApiError> {
    3269            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    3270            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    3271            0 :     let body: IngestAuxFilesRequest = json_request(&mut request).await?;
    3272            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    3273              : 
    3274            0 :     let state = get_state(&request);
    3275              : 
    3276            0 :     let timeline =
    3277            0 :         active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
    3278            0 :             .await?;
    3279              : 
    3280            0 :     let mut modification = timeline.begin_modification(
    3281            0 :         Lsn(timeline.get_last_record_lsn().0 + 8), /* advance LSN by 8 */
    3282            0 :     );
    3283            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
    3284            0 :     for (fname, content) in body.aux_files {
    3285            0 :         modification
    3286            0 :             .put_file(&fname, content.as_bytes(), &ctx)
    3287            0 :             .await
    3288            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))?;
    3289              :     }
    3290            0 :     modification
    3291            0 :         .commit(&ctx)
    3292            0 :         .await
    3293            0 :         .map_err(ApiError::InternalServerError)?;
    3294              : 
    3295            0 :     json_response(StatusCode::OK, ())
    3296            0 : }
    3297              : 
    3298              : /// Report on the largest tenants on this pageserver, for the storage controller to identify
    3299              : /// candidates for splitting
    3300            0 : async fn post_top_tenants(
    3301            0 :     mut r: Request<Body>,
    3302            0 :     _cancel: CancellationToken,
    3303            0 : ) -> Result<Response<Body>, ApiError> {
    3304            0 :     check_permission(&r, None)?;
    3305            0 :     let request: TopTenantShardsRequest = json_request(&mut r).await?;
    3306            0 :     let state = get_state(&r);
    3307              : 
    3308            0 :     fn get_size_metric(sizes: &TopTenantShardItem, order_by: &TenantSorting) -> u64 {
    3309            0 :         match order_by {
    3310            0 :             TenantSorting::ResidentSize => sizes.resident_size,
    3311            0 :             TenantSorting::MaxLogicalSize => sizes.max_logical_size,
    3312            0 :             TenantSorting::MaxLogicalSizePerShard => sizes.max_logical_size_per_shard,
    3313              :         }
    3314            0 :     }
    3315              : 
    3316              :     #[derive(Eq, PartialEq)]
    3317              :     struct HeapItem {
    3318              :         metric: u64,
    3319              :         sizes: TopTenantShardItem,
    3320              :     }
    3321              : 
    3322              :     impl PartialOrd for HeapItem {
    3323            0 :         fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
    3324            0 :             Some(self.cmp(other))
    3325            0 :         }
    3326              :     }
    3327              : 
    3328              :     /// Heap items have reverse ordering on their metric: this enables using BinaryHeap, which
    3329              :     /// supports popping the greatest item but not the smallest.
    3330              :     impl Ord for HeapItem {
    3331            0 :         fn cmp(&self, other: &Self) -> std::cmp::Ordering {
    3332            0 :             Reverse(self.metric).cmp(&Reverse(other.metric))
    3333            0 :         }
    3334              :     }
    3335              : 
    3336            0 :     let mut top_n: BinaryHeap<HeapItem> = BinaryHeap::with_capacity(request.limit);
    3337              : 
    3338              :     // FIXME: this is a lot of clones to take this tenant list
    3339            0 :     for (tenant_shard_id, tenant_slot) in state.tenant_manager.list() {
    3340            0 :         if let Some(shards_lt) = request.where_shards_lt {
    3341              :             // Ignore tenants which already have >= this many shards
    3342            0 :             if tenant_shard_id.shard_count >= shards_lt {
    3343            0 :                 continue;
    3344            0 :             }
    3345            0 :         }
    3346              : 
    3347            0 :         let sizes = match tenant_slot {
    3348            0 :             TenantSlot::Attached(tenant) => tenant.get_sizes(),
    3349              :             TenantSlot::Secondary(_) | TenantSlot::InProgress(_) => {
    3350            0 :                 continue;
    3351              :             }
    3352              :         };
    3353            0 :         let metric = get_size_metric(&sizes, &request.order_by);
    3354              : 
    3355            0 :         if let Some(gt) = request.where_gt {
    3356              :             // Ignore tenants whose metric is <= the lower size threshold, to do less sorting work
    3357            0 :             if metric <= gt {
    3358            0 :                 continue;
    3359            0 :             }
    3360            0 :         };
    3361              : 
    3362            0 :         match top_n.peek() {
    3363            0 :             None => {
    3364            0 :                 // Top N list is empty: candidate becomes first member
    3365            0 :                 top_n.push(HeapItem { metric, sizes });
    3366            0 :             }
    3367            0 :             Some(i) if i.metric > metric && top_n.len() < request.limit => {
    3368            0 :                 // Lowest item in list is greater than our candidate, but we aren't at limit yet: push to end
    3369            0 :                 top_n.push(HeapItem { metric, sizes });
    3370            0 :             }
    3371            0 :             Some(i) if i.metric > metric => {
    3372            0 :                 // List is at limit and lowest value is greater than our candidate, drop it.
    3373            0 :             }
    3374            0 :             Some(_) => top_n.push(HeapItem { metric, sizes }),
    3375              :         }
    3376              : 
    3377            0 :         while top_n.len() > request.limit {
    3378            0 :             top_n.pop();
    3379            0 :         }
    3380              :     }
    3381              : 
    3382            0 :     json_response(
    3383              :         StatusCode::OK,
    3384              :         TopTenantShardsResponse {
    3385            0 :             shards: top_n.into_iter().map(|i| i.sizes).collect(),
    3386              :         },
    3387              :     )
    3388            0 : }
    3389              : 
    3390            0 : async fn put_tenant_timeline_import_basebackup(
    3391            0 :     request: Request<Body>,
    3392            0 :     _cancel: CancellationToken,
    3393            0 : ) -> Result<Response<Body>, ApiError> {
    3394            0 :     let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
    3395            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    3396            0 :     let base_lsn: Lsn = must_parse_query_param(&request, "base_lsn")?;
    3397            0 :     let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
    3398            0 :     let pg_version: PgMajorVersion = must_parse_query_param(&request, "pg_version")?;
    3399              : 
    3400            0 :     check_permission(&request, Some(tenant_id))?;
    3401              : 
    3402            0 :     let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
    3403              : 
    3404            0 :     let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    3405              : 
    3406            0 :     let span = info_span!("import_basebackup",
    3407            0 :         tenant_id=%tenant_id, timeline_id=%timeline_id, shard_id=%tenant_shard_id.shard_slug(),
    3408              :         base_lsn=%base_lsn, end_lsn=%end_lsn, pg_version=%pg_version);
    3409            0 :     async move {
    3410            0 :         let state = get_state(&request);
    3411            0 :         let tenant = state
    3412            0 :             .tenant_manager
    3413            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
    3414              : 
    3415            0 :         let broker_client = state.broker_client.clone();
    3416              : 
    3417            0 :         let mut body = StreamReader::new(
    3418            0 :             request
    3419            0 :                 .into_body()
    3420            0 :                 .map(|res| res.map_err(|error| std::io::Error::other(anyhow::anyhow!(error)))),
    3421              :         );
    3422              : 
    3423            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    3424              : 
    3425            0 :         let (timeline, timeline_ctx) = tenant
    3426            0 :             .create_empty_timeline(timeline_id, base_lsn, pg_version, &ctx)
    3427            0 :             .map_err(ApiError::InternalServerError)
    3428            0 :             .await?;
    3429              : 
    3430              :         // TODO mark timeline as not ready until it reaches end_lsn.
    3431              :         // We might have some wal to import as well, and we should prevent compute
    3432              :         // from connecting before that and writing conflicting wal.
    3433              :         //
    3434              :         // This is not relevant for pageserver->pageserver migrations, since there's
    3435              :         // no wal to import. But should be fixed if we want to import from postgres.
    3436              : 
    3437              :         // TODO leave clean state on error. For now you can use detach to clean
    3438              :         // up broken state from a failed import.
    3439              : 
    3440              :         // Import basebackup provided via CopyData
    3441            0 :         info!("importing basebackup");
    3442              : 
    3443            0 :         timeline
    3444            0 :             .import_basebackup_from_tar(
    3445            0 :                 tenant.clone(),
    3446            0 :                 &mut body,
    3447            0 :                 base_lsn,
    3448            0 :                 broker_client,
    3449            0 :                 &timeline_ctx,
    3450            0 :             )
    3451            0 :             .await
    3452            0 :             .map_err(ApiError::InternalServerError)?;
    3453              : 
    3454              :         // Read the end of the tar archive.
    3455            0 :         read_tar_eof(body)
    3456            0 :             .await
    3457            0 :             .map_err(ApiError::InternalServerError)?;
    3458              : 
    3459              :         // TODO check checksum
    3460              :         // Meanwhile you can verify client-side by taking fullbackup
    3461              :         // and checking that it matches in size with what was imported.
    3462              :         // It wouldn't work if base came from vanilla postgres though,
    3463              :         // since we discard some log files.
    3464              : 
    3465            0 :         info!("done");
    3466            0 :         json_response(StatusCode::OK, ())
    3467            0 :     }
    3468            0 :     .instrument(span)
    3469            0 :     .await
    3470            0 : }
    3471              : 
    3472            0 : async fn put_tenant_timeline_import_wal(
    3473            0 :     request: Request<Body>,
    3474            0 :     _cancel: CancellationToken,
    3475            0 : ) -> Result<Response<Body>, ApiError> {
    3476            0 :     let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
    3477            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    3478            0 :     let start_lsn: Lsn = must_parse_query_param(&request, "start_lsn")?;
    3479            0 :     let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
    3480              : 
    3481            0 :     check_permission(&request, Some(tenant_id))?;
    3482              : 
    3483            0 :     let span = info_span!("import_wal", tenant_id=%tenant_id, timeline_id=%timeline_id, start_lsn=%start_lsn, end_lsn=%end_lsn);
    3484            0 :     async move {
    3485            0 :         let state = get_state(&request);
    3486              : 
    3487            0 :         let timeline = active_timeline_of_active_tenant(&state.tenant_manager, TenantShardId::unsharded(tenant_id), timeline_id).await?;
    3488            0 :         let ctx = RequestContextBuilder::new(TaskKind::MgmtRequest)
    3489            0 :             .download_behavior(DownloadBehavior::Warn)
    3490            0 :             .scope(context::Scope::new_timeline(&timeline))
    3491            0 :             .root();
    3492              : 
    3493            0 :         let mut body = StreamReader::new(request.into_body().map(|res| {
    3494            0 :             res.map_err(|error| {
    3495            0 :                 std::io::Error::other( anyhow::anyhow!(error))
    3496            0 :             })
    3497            0 :         }));
    3498              : 
    3499            0 :         let last_record_lsn = timeline.get_last_record_lsn();
    3500            0 :         if last_record_lsn != start_lsn {
    3501            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}")));
    3502            0 :         }
    3503              : 
    3504              :         // TODO leave clean state on error. For now you can use detach to clean
    3505              :         // up broken state from a failed import.
    3506              : 
    3507              :         // Import wal provided via CopyData
    3508            0 :         info!("importing wal");
    3509            0 :         crate::import_datadir::import_wal_from_tar(&timeline, &mut body, start_lsn, end_lsn, &ctx).await.map_err(ApiError::InternalServerError)?;
    3510            0 :         info!("wal import complete");
    3511              : 
    3512              :         // Read the end of the tar archive.
    3513            0 :         read_tar_eof(body).await.map_err(ApiError::InternalServerError)?;
    3514              : 
    3515              :         // TODO Does it make sense to overshoot?
    3516            0 :         if timeline.get_last_record_lsn() < end_lsn {
    3517            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}")));
    3518            0 :         }
    3519              : 
    3520              :         // Flush data to disk, then upload to s3. No need for a forced checkpoint.
    3521              :         // We only want to persist the data, and it doesn't matter if it's in the
    3522              :         // shape of deltas or images.
    3523            0 :         info!("flushing layers");
    3524            0 :         timeline.freeze_and_flush().await.map_err(|e| match e {
    3525            0 :             tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown,
    3526            0 :             other => ApiError::InternalServerError(anyhow::anyhow!(other)),
    3527            0 :         })?;
    3528              : 
    3529            0 :         info!("done");
    3530              : 
    3531            0 :         json_response(StatusCode::OK, ())
    3532            0 :     }.instrument(span).await
    3533            0 : }
    3534              : 
    3535              : /// Activate a timeline after its import has completed
    3536              : ///
    3537              : /// The endpoint is idempotent and callers are expected to retry all
    3538              : /// errors until a successful response.
    3539            0 : async fn activate_post_import_handler(
    3540            0 :     request: Request<Body>,
    3541            0 :     _cancel: CancellationToken,
    3542            0 : ) -> Result<Response<Body>, ApiError> {
    3543            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    3544            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    3545              : 
    3546            0 :     let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
    3547              :     const DEFAULT_ACTIVATE_TIMEOUT: Duration = Duration::from_secs(1);
    3548            0 :     let activate_timeout = parse_query_param(&request, "timeline_activate_timeout_ms")?
    3549            0 :         .map(Duration::from_millis)
    3550            0 :         .unwrap_or(DEFAULT_ACTIVATE_TIMEOUT);
    3551              : 
    3552            0 :     let span = info_span!(
    3553              :         "activate_post_import_handler",
    3554              :         tenant_id=%tenant_shard_id.tenant_id,
    3555              :         timeline_id=%timeline_id,
    3556            0 :         shard_id=%tenant_shard_id.shard_slug()
    3557              :     );
    3558              : 
    3559            0 :     async move {
    3560            0 :         let state = get_state(&request);
    3561            0 :         let tenant = state
    3562            0 :             .tenant_manager
    3563            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
    3564              : 
    3565            0 :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    3566              : 
    3567            0 :         tenant.finalize_importing_timeline(timeline_id).await?;
    3568              : 
    3569            0 :         match tenant.get_timeline(timeline_id, false) {
    3570            0 :             Ok(_timeline) => {
    3571            0 :                 // Timeline is already visible. Reset not required: fall through.
    3572            0 :             }
    3573              :             Err(GetTimelineError::NotFound { .. }) => {
    3574              :                 // This is crude: we reset the whole tenant such that the new timeline is detected
    3575              :                 // and activated. We can come up with something more granular in the future.
    3576              :                 //
    3577              :                 // Note that we only reset the tenant if required: when the timeline is
    3578              :                 // not present in [`Tenant::timelines`].
    3579            0 :                 let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
    3580            0 :                 state
    3581            0 :                     .tenant_manager
    3582            0 :                     .reset_tenant(tenant_shard_id, false, &ctx)
    3583            0 :                     .await
    3584            0 :                     .map_err(ApiError::InternalServerError)?;
    3585              :             }
    3586              :             Err(GetTimelineError::ShuttingDown) => {
    3587            0 :                 return Err(ApiError::ShuttingDown);
    3588              :             }
    3589              :             Err(GetTimelineError::NotActive { .. }) => {
    3590            0 :                 unreachable!("Called get_timeline with active_only=false");
    3591              :             }
    3592              :         }
    3593              : 
    3594            0 :         let timeline = tenant.get_timeline(timeline_id, false)?;
    3595              : 
    3596            0 :         let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn)
    3597            0 :             .with_scope_timeline(&timeline);
    3598              : 
    3599            0 :         let result =
    3600            0 :             tokio::time::timeout(activate_timeout, timeline.wait_to_become_active(&ctx)).await;
    3601            0 :         match result {
    3602            0 :             Ok(Ok(())) => {
    3603            0 :                 // fallthrough
    3604            0 :             }
    3605              :             // Timeline reached some other state that's not active
    3606              :             // TODO(vlad): if the tenant is broken, return a permananet error
    3607            0 :             Ok(Err(_timeline_state)) => {
    3608            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3609            0 :                     "Timeline activation failed"
    3610            0 :                 )));
    3611              :             }
    3612              :             // Activation timed out
    3613              :             Err(_) => {
    3614            0 :                 return Err(ApiError::Timeout("Timeline activation timed out".into()));
    3615              :             }
    3616              :         }
    3617              : 
    3618            0 :         let timeline_info = build_timeline_info(
    3619            0 :             &timeline, false, // include_non_incremental_logical_size,
    3620            0 :             false, // force_await_initial_logical_size
    3621            0 :             &ctx,
    3622            0 :         )
    3623            0 :         .await
    3624            0 :         .context("get local timeline info")
    3625            0 :         .map_err(ApiError::InternalServerError)?;
    3626              : 
    3627            0 :         json_response(StatusCode::OK, timeline_info)
    3628            0 :     }
    3629            0 :     .instrument(span)
    3630            0 :     .await
    3631            0 : }
    3632              : 
    3633              : /// Read the end of a tar archive.
    3634              : ///
    3635              : /// A tar archive normally ends with two consecutive blocks of zeros, 512 bytes each.
    3636              : /// `tokio_tar` already read the first such block. Read the second all-zeros block,
    3637              : /// and check that there is no more data after the EOF marker.
    3638              : ///
    3639              : /// 'tar' command can also write extra blocks of zeros, up to a record
    3640              : /// size, controlled by the --record-size argument. Ignore them too.
    3641            0 : async fn read_tar_eof(mut reader: (impl tokio::io::AsyncRead + Unpin)) -> anyhow::Result<()> {
    3642              :     use tokio::io::AsyncReadExt;
    3643            0 :     let mut buf = [0u8; 512];
    3644              : 
    3645              :     // Read the all-zeros block, and verify it
    3646            0 :     let mut total_bytes = 0;
    3647            0 :     while total_bytes < 512 {
    3648            0 :         let nbytes = reader.read(&mut buf[total_bytes..]).await?;
    3649            0 :         total_bytes += nbytes;
    3650            0 :         if nbytes == 0 {
    3651            0 :             break;
    3652            0 :         }
    3653              :     }
    3654            0 :     if total_bytes < 512 {
    3655            0 :         anyhow::bail!("incomplete or invalid tar EOF marker");
    3656            0 :     }
    3657            0 :     if !buf.iter().all(|&x| x == 0) {
    3658            0 :         anyhow::bail!("invalid tar EOF marker");
    3659            0 :     }
    3660              : 
    3661              :     // Drain any extra zero-blocks after the EOF marker
    3662            0 :     let mut trailing_bytes = 0;
    3663            0 :     let mut seen_nonzero_bytes = false;
    3664              :     loop {
    3665            0 :         let nbytes = reader.read(&mut buf).await?;
    3666            0 :         trailing_bytes += nbytes;
    3667            0 :         if !buf.iter().all(|&x| x == 0) {
    3668            0 :             seen_nonzero_bytes = true;
    3669            0 :         }
    3670            0 :         if nbytes == 0 {
    3671            0 :             break;
    3672            0 :         }
    3673              :     }
    3674            0 :     if seen_nonzero_bytes {
    3675            0 :         anyhow::bail!("unexpected non-zero bytes after the tar archive");
    3676            0 :     }
    3677            0 :     if trailing_bytes % 512 != 0 {
    3678            0 :         anyhow::bail!(
    3679            0 :             "unexpected number of zeros ({trailing_bytes}), not divisible by tar block size (512 bytes), after the tar archive"
    3680              :         );
    3681            0 :     }
    3682            0 :     Ok(())
    3683            0 : }
    3684              : 
    3685            0 : async fn tenant_evaluate_feature_flag(
    3686            0 :     request: Request<Body>,
    3687            0 :     _cancel: CancellationToken,
    3688            0 : ) -> Result<Response<Body>, ApiError> {
    3689            0 :     let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
    3690            0 :     check_permission(&request, Some(tenant_shard_id.tenant_id))?;
    3691              : 
    3692            0 :     let flag: String = parse_request_param(&request, "flag_key")?;
    3693            0 :     let as_type: Option<String> = parse_query_param(&request, "as")?;
    3694              : 
    3695            0 :     let state = get_state(&request);
    3696              : 
    3697            0 :     async {
    3698            0 :         let tenant = state
    3699            0 :             .tenant_manager
    3700            0 :             .get_attached_tenant_shard(tenant_shard_id)?;
    3701              :         // TODO: the properties we get here might be stale right after it is collected. But such races are rare (updated every 10s) 
    3702              :         // and we don't need to worry about it for now.
    3703            0 :         let properties = tenant.feature_resolver.collect_properties();
    3704            0 :         if as_type.as_deref() == Some("boolean") {
    3705            0 :             let result = tenant.feature_resolver.evaluate_boolean(&flag);
    3706            0 :             let result = result.map(|_| true).map_err(|e| e.to_string());
    3707            0 :             json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
    3708            0 :         } else if as_type.as_deref() == Some("multivariate") {
    3709            0 :             let result = tenant.feature_resolver.evaluate_multivariate(&flag).map_err(|e| e.to_string());
    3710            0 :             json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
    3711              :         } else {
    3712              :             // Auto infer the type of the feature flag.
    3713            0 :             let is_boolean = tenant.feature_resolver.is_feature_flag_boolean(&flag).map_err(|e| ApiError::InternalServerError(anyhow::anyhow!("{e}")))?;
    3714            0 :             if is_boolean {
    3715            0 :                 let result = tenant.feature_resolver.evaluate_boolean(&flag);
    3716            0 :                 let result = result.map(|_| true).map_err(|e| e.to_string());
    3717            0 :                 json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
    3718              :             } else {
    3719            0 :                 let result = tenant.feature_resolver.evaluate_multivariate(&flag).map_err(|e| e.to_string());
    3720            0 :                 json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
    3721              :             }
    3722              :         }
    3723            0 :     }
    3724            0 :     .instrument(info_span!("tenant_evaluate_feature_flag", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug()))
    3725            0 :     .await
    3726            0 : }
    3727              : 
    3728            0 : async fn force_override_feature_flag_for_testing_put(
    3729            0 :     request: Request<Body>,
    3730            0 :     _cancel: CancellationToken,
    3731            0 : ) -> Result<Response<Body>, ApiError> {
    3732            0 :     check_permission(&request, None)?;
    3733              : 
    3734            0 :     let flag: String = parse_request_param(&request, "flag_key")?;
    3735            0 :     let value: String = must_parse_query_param(&request, "value")?;
    3736            0 :     let state = get_state(&request);
    3737            0 :     state
    3738            0 :         .feature_resolver
    3739            0 :         .force_override_for_testing(&flag, Some(&value));
    3740            0 :     json_response(StatusCode::OK, ())
    3741            0 : }
    3742              : 
    3743            0 : async fn force_override_feature_flag_for_testing_delete(
    3744            0 :     request: Request<Body>,
    3745            0 :     _cancel: CancellationToken,
    3746            0 : ) -> Result<Response<Body>, ApiError> {
    3747            0 :     check_permission(&request, None)?;
    3748              : 
    3749            0 :     let flag: String = parse_request_param(&request, "flag_key")?;
    3750            0 :     let state = get_state(&request);
    3751            0 :     state
    3752            0 :         .feature_resolver
    3753            0 :         .force_override_for_testing(&flag, None);
    3754            0 :     json_response(StatusCode::OK, ())
    3755            0 : }
    3756              : 
    3757            0 : async fn update_feature_flag_spec(
    3758            0 :     mut request: Request<Body>,
    3759            0 :     _cancel: CancellationToken,
    3760            0 : ) -> Result<Response<Body>, ApiError> {
    3761            0 :     check_permission(&request, None)?;
    3762            0 :     let body = json_request(&mut request).await?;
    3763            0 :     let state = get_state(&request);
    3764            0 :     state
    3765            0 :         .feature_resolver
    3766            0 :         .update(body)
    3767            0 :         .map_err(ApiError::InternalServerError)?;
    3768            0 :     json_response(StatusCode::OK, ())
    3769            0 : }
    3770              : 
    3771              : /// Common functionality of all the HTTP API handlers.
    3772              : ///
    3773              : /// - Adds a tracing span to each request (by `request_span`)
    3774              : /// - Logs the request depending on the request method (by `request_span`)
    3775              : /// - Logs the response if it was not successful (by `request_span`
    3776              : /// - Shields the handler function from async cancellations. Hyper can drop the handler
    3777              : ///   Future if the connection to the client is lost, but most of the pageserver code is
    3778              : ///   not async cancellation safe. This converts the dropped future into a graceful cancellation
    3779              : ///   request with a CancellationToken.
    3780            0 : async fn api_handler<R, H>(request: Request<Body>, handler: H) -> Result<Response<Body>, ApiError>
    3781            0 : where
    3782            0 :     R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
    3783            0 :     H: FnOnce(Request<Body>, CancellationToken) -> R + Send + Sync + 'static,
    3784            0 : {
    3785            0 :     if request.uri() != &"/v1/failpoints".parse::<Uri>().unwrap() {
    3786            0 :         fail::fail_point!("api-503", |_| Err(ApiError::ResourceUnavailable(
    3787            0 :             "failpoint".into()
    3788            0 :         )));
    3789              : 
    3790            0 :         fail::fail_point!("api-500", |_| Err(ApiError::InternalServerError(
    3791            0 :             anyhow::anyhow!("failpoint")
    3792            0 :         )));
    3793            0 :     }
    3794              : 
    3795              :     // Spawn a new task to handle the request, to protect the handler from unexpected
    3796              :     // async cancellations. Most pageserver functions are not async cancellation safe.
    3797              :     // We arm a drop-guard, so that if Hyper drops the Future, we signal the task
    3798              :     // with the cancellation token.
    3799            0 :     let token = CancellationToken::new();
    3800            0 :     let cancel_guard = token.clone().drop_guard();
    3801            0 :     let result = request_span(request, move |r| async {
    3802            0 :         let handle = tokio::spawn(
    3803            0 :             async {
    3804            0 :                 let token_cloned = token.clone();
    3805            0 :                 let result = handler(r, token).await;
    3806            0 :                 if token_cloned.is_cancelled() {
    3807              :                     // dropguard has executed: we will never turn this result into response.
    3808              :                     //
    3809              :                     // at least temporarily do {:?} logging; these failures are rare enough but
    3810              :                     // could hide difficult errors.
    3811            0 :                     match &result {
    3812            0 :                         Ok(response) => {
    3813            0 :                             let status = response.status();
    3814            0 :                             info!(%status, "Cancelled request finished successfully")
    3815              :                         }
    3816            0 :                         Err(e) => match e {
    3817              :                             ApiError::ShuttingDown | ApiError::ResourceUnavailable(_) => {
    3818              :                                 // Don't log this at error severity: they are normal during lifecycle of tenants/process
    3819            0 :                                 info!("Cancelled request aborted for shutdown")
    3820              :                             }
    3821              :                             _ => {
    3822              :                                 // Log these in a highly visible way, because we have no client to send the response to, but
    3823              :                                 // would like to know that something went wrong.
    3824            0 :                                 error!("Cancelled request finished with an error: {e:?}")
    3825              :                             }
    3826              :                         },
    3827              :                     }
    3828            0 :                 }
    3829              :                 // only logging for cancelled panicked request handlers is the tracing_panic_hook,
    3830              :                 // which should suffice.
    3831              :                 //
    3832              :                 // there is still a chance to lose the result due to race between
    3833              :                 // returning from here and the actual connection closing happening
    3834              :                 // before outer task gets to execute. leaving that up for #5815.
    3835            0 :                 result
    3836            0 :             }
    3837            0 :             .in_current_span(),
    3838              :         );
    3839              : 
    3840            0 :         match handle.await {
    3841              :             // TODO: never actually return Err from here, always Ok(...) so that we can log
    3842              :             // spanned errors. Call api_error_handler instead and return appropriate Body.
    3843            0 :             Ok(result) => result,
    3844            0 :             Err(e) => {
    3845              :                 // The handler task panicked. We have a global panic handler that logs the
    3846              :                 // panic with its backtrace, so no need to log that here. Only log a brief
    3847              :                 // message to make it clear that we returned the error to the client.
    3848            0 :                 error!("HTTP request handler task panicked: {e:#}");
    3849              : 
    3850              :                 // Don't return an Error here, because then fallback error handler that was
    3851              :                 // installed in make_router() will print the error. Instead, construct the
    3852              :                 // HTTP error response and return that.
    3853            0 :                 Ok(
    3854            0 :                     ApiError::InternalServerError(anyhow!("HTTP request handler task panicked"))
    3855            0 :                         .into_response(),
    3856            0 :                 )
    3857              :             }
    3858              :         }
    3859            0 :     })
    3860            0 :     .await;
    3861              : 
    3862            0 :     cancel_guard.disarm();
    3863              : 
    3864            0 :     result
    3865            0 : }
    3866              : 
    3867              : /// Like api_handler, but returns an error response if the server is built without
    3868              : /// the 'testing' feature.
    3869            0 : async fn testing_api_handler<R, H>(
    3870            0 :     desc: &str,
    3871            0 :     request: Request<Body>,
    3872            0 :     handler: H,
    3873            0 : ) -> Result<Response<Body>, ApiError>
    3874            0 : where
    3875            0 :     R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
    3876            0 :     H: FnOnce(Request<Body>, CancellationToken) -> R + Send + Sync + 'static,
    3877            0 : {
    3878            0 :     if cfg!(feature = "testing") {
    3879            0 :         api_handler(request, handler).await
    3880              :     } else {
    3881            0 :         std::future::ready(Err(ApiError::BadRequest(anyhow!(
    3882            0 :             "Cannot {desc} because pageserver was compiled without testing APIs",
    3883            0 :         ))))
    3884            0 :         .await
    3885              :     }
    3886            0 : }
    3887              : 
    3888            0 : pub fn make_router(
    3889            0 :     state: Arc<State>,
    3890            0 :     launch_ts: &'static LaunchTimestamp,
    3891            0 :     auth: Option<Arc<SwappableJwtAuth>>,
    3892            0 : ) -> anyhow::Result<RouterBuilder<hyper::Body, ApiError>> {
    3893            0 :     let spec = include_bytes!("openapi_spec.yml");
    3894            0 :     let mut router = attach_openapi_ui(endpoint::make_router(), spec, "/swagger.yml", "/v1/doc");
    3895            0 :     if auth.is_some() {
    3896            0 :         router = router.middleware(auth_middleware(|request| {
    3897            0 :             let state = get_state(request);
    3898            0 :             if state.allowlist_routes.contains(&request.uri().path()) {
    3899            0 :                 None
    3900              :             } else {
    3901            0 :                 state.auth.as_deref()
    3902              :             }
    3903            0 :         }))
    3904            0 :     }
    3905              : 
    3906            0 :     router = router.middleware(
    3907            0 :         endpoint::add_response_header_middleware(
    3908            0 :             "PAGESERVER_LAUNCH_TIMESTAMP",
    3909            0 :             &launch_ts.to_string(),
    3910              :         )
    3911            0 :         .expect("construct launch timestamp header middleware"),
    3912              :     );
    3913              : 
    3914            0 :     Ok(router
    3915            0 :         .data(state)
    3916            0 :         .get("/metrics", |r| request_span(r, prometheus_metrics_handler))
    3917            0 :         .get("/profile/cpu", |r| request_span(r, profile_cpu_handler))
    3918            0 :         .get("/profile/heap", |r| request_span(r, profile_heap_handler))
    3919            0 :         .get("/v1/status", |r| api_handler(r, status_handler))
    3920            0 :         .put("/v1/failpoints", |r| {
    3921            0 :             testing_api_handler("manage failpoints", r, failpoints_handler)
    3922            0 :         })
    3923            0 :         .post("/v1/reload_auth_validation_keys", |r| {
    3924            0 :             api_handler(r, reload_auth_validation_keys_handler)
    3925            0 :         })
    3926            0 :         .get("/v1/tenant", |r| api_handler(r, tenant_list_handler))
    3927            0 :         .get("/v1/tenant/:tenant_shard_id", |r| {
    3928            0 :             api_handler(r, tenant_status)
    3929            0 :         })
    3930            0 :         .delete("/v1/tenant/:tenant_shard_id", |r| {
    3931            0 :             api_handler(r, tenant_delete_handler)
    3932            0 :         })
    3933            0 :         .get("/v1/tenant/:tenant_shard_id/synthetic_size", |r| {
    3934            0 :             api_handler(r, tenant_size_handler)
    3935            0 :         })
    3936            0 :         .patch("/v1/tenant/config", |r| {
    3937            0 :             api_handler(r, patch_tenant_config_handler)
    3938            0 :         })
    3939            0 :         .put("/v1/tenant/config", |r| {
    3940            0 :             api_handler(r, update_tenant_config_handler)
    3941            0 :         })
    3942            0 :         .put("/v1/tenant/:tenant_shard_id/shard_split", |r| {
    3943            0 :             api_handler(r, tenant_shard_split_handler)
    3944            0 :         })
    3945            0 :         .get("/v1/tenant/:tenant_shard_id/config", |r| {
    3946            0 :             api_handler(r, get_tenant_config_handler)
    3947            0 :         })
    3948            0 :         .put("/v1/tenant/:tenant_shard_id/location_config", |r| {
    3949            0 :             api_handler(r, put_tenant_location_config_handler)
    3950            0 :         })
    3951            0 :         .get("/v1/location_config", |r| {
    3952            0 :             api_handler(r, list_location_config_handler)
    3953            0 :         })
    3954            0 :         .get("/v1/location_config/:tenant_shard_id", |r| {
    3955            0 :             api_handler(r, get_location_config_handler)
    3956            0 :         })
    3957            0 :         .put(
    3958              :             "/v1/tenant/:tenant_shard_id/time_travel_remote_storage",
    3959            0 :             |r| api_handler(r, tenant_time_travel_remote_storage_handler),
    3960              :         )
    3961            0 :         .get("/v1/tenant/:tenant_shard_id/timeline", |r| {
    3962            0 :             api_handler(r, timeline_list_handler)
    3963            0 :         })
    3964            0 :         .get("/v1/tenant/:tenant_shard_id/timeline_and_offloaded", |r| {
    3965            0 :             api_handler(r, timeline_and_offloaded_list_handler)
    3966            0 :         })
    3967            0 :         .post("/v1/tenant/:tenant_shard_id/timeline", |r| {
    3968            0 :             api_handler(r, timeline_create_handler)
    3969            0 :         })
    3970            0 :         .post("/v1/tenant/:tenant_shard_id/reset", |r| {
    3971            0 :             api_handler(r, tenant_reset_handler)
    3972            0 :         })
    3973            0 :         .post(
    3974              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/preserve_initdb_archive",
    3975            0 :             |r| api_handler(r, timeline_preserve_initdb_handler),
    3976              :         )
    3977            0 :         .put(
    3978              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/archival_config",
    3979            0 :             |r| api_handler(r, timeline_archival_config_handler),
    3980              :         )
    3981            0 :         .get("/v1/tenant/:tenant_shard_id/timeline/:timeline_id", |r| {
    3982            0 :             api_handler(r, timeline_detail_handler)
    3983            0 :         })
    3984            0 :         .get(
    3985              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_lsn_by_timestamp",
    3986            0 :             |r| api_handler(r, get_lsn_by_timestamp_handler),
    3987              :         )
    3988            0 :         .get(
    3989              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_timestamp_of_lsn",
    3990            0 :             |r| api_handler(r, get_timestamp_of_lsn_handler),
    3991              :         )
    3992            0 :         .post(
    3993              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/patch_index_part",
    3994            0 :             |r| api_handler(r, timeline_patch_index_part_handler),
    3995              :         )
    3996            0 :         .post(
    3997              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/lsn_lease",
    3998            0 :             |r| api_handler(r, lsn_lease_handler),
    3999              :         )
    4000            0 :         .put(
    4001              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/do_gc",
    4002            0 :             |r| api_handler(r, timeline_gc_handler),
    4003              :         )
    4004            0 :         .get(
    4005              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
    4006            0 :             |r| api_handler(r, timeline_compact_info_handler),
    4007              :         )
    4008            0 :         .put(
    4009              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
    4010            0 :             |r| api_handler(r, timeline_compact_handler),
    4011              :         )
    4012            0 :         .delete(
    4013              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
    4014            0 :             |r| api_handler(r, timeline_cancel_compact_handler),
    4015              :         )
    4016            0 :         .put(
    4017              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/offload",
    4018            0 :             |r| testing_api_handler("attempt timeline offload", r, timeline_offload_handler),
    4019              :         )
    4020            0 :         .put(
    4021              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/mark_invisible",
    4022            0 :             |r| api_handler( r, timeline_mark_invisible_handler),
    4023              :         )
    4024            0 :         .put(
    4025              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/checkpoint",
    4026            0 :             |r| testing_api_handler("run timeline checkpoint", r, timeline_checkpoint_handler),
    4027              :         )
    4028            0 :         .post(
    4029              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_remote_layers",
    4030            0 :             |r| api_handler(r, timeline_download_remote_layers_handler_post),
    4031              :         )
    4032            0 :         .get(
    4033              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_remote_layers",
    4034            0 :             |r| api_handler(r, timeline_download_remote_layers_handler_get),
    4035              :         )
    4036            0 :         .put(
    4037              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/detach_ancestor",
    4038            0 :             |r| api_handler(r, timeline_detach_ancestor_handler),
    4039              :         )
    4040            0 :         .delete("/v1/tenant/:tenant_shard_id/timeline/:timeline_id", |r| {
    4041            0 :             api_handler(r, timeline_delete_handler)
    4042            0 :         })
    4043            0 :         .get(
    4044              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer",
    4045            0 :             |r| api_handler(r, layer_map_info_handler),
    4046              :         )
    4047            0 :         .post(
    4048              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_heatmap_layers",
    4049            0 :             |r| api_handler(r, timeline_download_heatmap_layers_handler),
    4050              :         )
    4051            0 :         .delete(
    4052              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_heatmap_layers",
    4053            0 :             |r| api_handler(r, timeline_shutdown_download_heatmap_layers_handler),
    4054              :         )
    4055            0 :         .get(
    4056              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_file_name",
    4057            0 :             |r| api_handler(r, layer_download_handler),
    4058              :         )
    4059            0 :         .delete(
    4060              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_file_name",
    4061            0 :             |r| api_handler(r, evict_timeline_layer_handler),
    4062              :         )
    4063            0 :         .post(
    4064              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_name/scan_disposable_keys",
    4065            0 :             |r| testing_api_handler("timeline_layer_scan_disposable_keys", r, timeline_layer_scan_disposable_keys),
    4066              :         )
    4067            0 :         .post(
    4068              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/block_gc",
    4069            0 :             |r| api_handler(r, timeline_gc_blocking_handler),
    4070              :         )
    4071            0 :         .post(
    4072              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/unblock_gc",
    4073            0 :             |r| api_handler(r, timeline_gc_unblocking_handler),
    4074              :         )
    4075            0 :         .get(
    4076              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/page_trace",
    4077            0 :             |r| api_handler(r, timeline_page_trace_handler),
    4078              :         )
    4079            0 :         .post("/v1/tenant/:tenant_shard_id/heatmap_upload", |r| {
    4080            0 :             api_handler(r, secondary_upload_handler)
    4081            0 :         })
    4082            0 :         .get("/v1/tenant/:tenant_id/scan_remote_storage", |r| {
    4083            0 :             api_handler(r, tenant_scan_remote_handler)
    4084            0 :         })
    4085            0 :         .put("/v1/disk_usage_eviction/run", |r| {
    4086            0 :             api_handler(r, disk_usage_eviction_run)
    4087            0 :         })
    4088            0 :         .put("/v1/deletion_queue/flush", |r| {
    4089            0 :             api_handler(r, deletion_queue_flush)
    4090            0 :         })
    4091            0 :         .get("/v1/tenant/:tenant_shard_id/secondary/status", |r| {
    4092            0 :             api_handler(r, secondary_status_handler)
    4093            0 :         })
    4094            0 :         .post("/v1/tenant/:tenant_shard_id/secondary/download", |r| {
    4095            0 :             api_handler(r, secondary_download_handler)
    4096            0 :         })
    4097            0 :         .post("/v1/tenant/:tenant_shard_id/wait_lsn", |r| {
    4098            0 :             api_handler(r, wait_lsn_handler)
    4099            0 :         })
    4100            0 :         .put("/v1/tenant/:tenant_shard_id/break", |r| {
    4101            0 :             testing_api_handler("set tenant state to broken", r, handle_tenant_break)
    4102            0 :         })
    4103            0 :         .get("/v1/panic", |r| api_handler(r, always_panic_handler))
    4104            0 :         .post("/v1/tracing/event", |r| {
    4105            0 :             testing_api_handler("emit a tracing event", r, post_tracing_event_handler)
    4106            0 :         })
    4107            0 :         .get(
    4108              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/getpage",
    4109            0 :             |r| testing_api_handler("getpage@lsn", r, getpage_at_lsn_handler),
    4110              :         )
    4111            0 :         .get(
    4112              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/touchpage",
    4113            0 :             |r| api_handler(r, touchpage_at_lsn_handler),
    4114              :         )
    4115            0 :         .get(
    4116              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/keyspace",
    4117            0 :             |r| api_handler(r, timeline_collect_keyspace),
    4118              :         )
    4119            0 :         .put("/v1/io_engine", |r| api_handler(r, put_io_engine_handler))
    4120            0 :         .put("/v1/io_mode", |r| api_handler(r, put_io_mode_handler))
    4121            0 :         .get("/v1/utilization", |r| api_handler(r, get_utilization))
    4122            0 :         .post(
    4123              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/ingest_aux_files",
    4124            0 :             |r| testing_api_handler("ingest_aux_files", r, ingest_aux_files),
    4125              :         )
    4126            0 :         .post(
    4127              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/list_aux_files",
    4128            0 :             |r| testing_api_handler("list_aux_files", r, list_aux_files),
    4129              :         )
    4130            0 :         .post("/v1/top_tenants", |r| api_handler(r, post_top_tenants))
    4131            0 :         .post(
    4132              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/perf_info",
    4133            0 :             |r| testing_api_handler("perf_info", r, perf_info),
    4134              :         )
    4135            0 :         .put(
    4136              :             "/v1/tenant/:tenant_id/timeline/:timeline_id/import_basebackup",
    4137            0 :             |r| api_handler(r, put_tenant_timeline_import_basebackup),
    4138              :         )
    4139            0 :         .put(
    4140              :             "/v1/tenant/:tenant_id/timeline/:timeline_id/import_wal",
    4141            0 :             |r| api_handler(r, put_tenant_timeline_import_wal),
    4142              :         )
    4143            0 :         .put(
    4144              :             "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/activate_post_import",
    4145            0 :             |r| api_handler(r, activate_post_import_handler),
    4146              :         )
    4147            0 :         .get("/v1/tenant/:tenant_shard_id/feature_flag/:flag_key", |r| {
    4148            0 :             api_handler(r, tenant_evaluate_feature_flag)
    4149            0 :         })
    4150            0 :         .put("/v1/feature_flag/:flag_key", |r| {
    4151            0 :             testing_api_handler("force override feature flag - put", r, force_override_feature_flag_for_testing_put)
    4152            0 :         })
    4153            0 :         .delete("/v1/feature_flag/:flag_key", |r| {
    4154            0 :             testing_api_handler("force override feature flag - delete", r, force_override_feature_flag_for_testing_delete)
    4155            0 :         })
    4156            0 :         .post("/v1/feature_flag_spec", |r| {
    4157            0 :             api_handler(r, update_feature_flag_spec)
    4158            0 :         })
    4159            0 :         .any(handler_404))
    4160            0 : }
        

Generated by: LCOV version 2.1-beta