Line data Source code
1 : //!
2 : //! Management HTTP API
3 : //!
4 : use std::cmp::Reverse;
5 : use std::collections::BTreeMap;
6 : use std::collections::BinaryHeap;
7 : use std::collections::HashMap;
8 : use std::str::FromStr;
9 : use std::sync::Arc;
10 : use std::time::Duration;
11 :
12 : use anyhow::{Context, Result, anyhow};
13 : use bytes::Bytes;
14 : use enumset::EnumSet;
15 : use futures::future::join_all;
16 : use futures::{StreamExt, TryFutureExt};
17 : use http_utils::endpoint::{
18 : self, attach_openapi_ui, auth_middleware, check_permission_with, profile_cpu_handler,
19 : profile_heap_handler, prometheus_metrics_handler, request_span,
20 : };
21 : use http_utils::error::{ApiError, HttpErrorBody};
22 : use http_utils::failpoints::failpoints_handler;
23 : use http_utils::json::{json_request, json_request_maybe, json_response};
24 : use http_utils::request::{
25 : get_request_param, must_get_query_param, must_parse_query_param, parse_query_param,
26 : parse_request_param,
27 : };
28 : use http_utils::{RequestExt, RouterBuilder};
29 : use humantime::format_rfc3339;
30 : use hyper::{Body, Request, Response, StatusCode, Uri, header};
31 : use metrics::launch_timestamp::LaunchTimestamp;
32 : use pageserver_api::models::virtual_file::IoMode;
33 : use pageserver_api::models::{
34 : DetachBehavior, DownloadRemoteLayersTaskSpawnRequest, IngestAuxFilesRequest,
35 : ListAuxFilesRequest, LocationConfig, LocationConfigListResponse, LocationConfigMode, LsnLease,
36 : LsnLeaseRequest, OffloadedTimelineInfo, PageTraceEvent, ShardParameters, StatusResponse,
37 : TenantConfigPatchRequest, TenantConfigRequest, TenantDetails, TenantInfo,
38 : TenantLocationConfigRequest, TenantLocationConfigResponse, TenantScanRemoteStorageResponse,
39 : TenantScanRemoteStorageShard, TenantShardLocation, TenantShardSplitRequest,
40 : TenantShardSplitResponse, TenantSorting, TenantState, TenantWaitLsnRequest,
41 : TimelineArchivalConfigRequest, TimelineCreateRequest, TimelineCreateRequestMode,
42 : TimelineCreateRequestModeImportPgdata, TimelineGcRequest, TimelineInfo,
43 : TimelinePatchIndexPartRequest, TimelineVisibilityState, TimelinesInfoAndOffloaded,
44 : TopTenantShardItem, TopTenantShardsRequest, TopTenantShardsResponse,
45 : };
46 : use pageserver_api::shard::{ShardCount, TenantShardId};
47 : use postgres_ffi::PgMajorVersion;
48 : use remote_storage::{DownloadError, GenericRemoteStorage, TimeTravelError};
49 : use scopeguard::defer;
50 : use serde::{Deserialize, Serialize};
51 : use serde_json::json;
52 : use tenant_size_model::svg::SvgBranchKind;
53 : use tenant_size_model::{SizeResult, StorageModel};
54 : use tokio::time::Instant;
55 : use tokio_util::io::StreamReader;
56 : use tokio_util::sync::CancellationToken;
57 : use tracing::*;
58 : use utils::auth::SwappableJwtAuth;
59 : use utils::generation::Generation;
60 : use utils::id::{TenantId, TimelineId};
61 : use utils::lsn::Lsn;
62 : use wal_decoder::models::record::NeonWalRecord;
63 :
64 : use crate::config::PageServerConf;
65 : use crate::context;
66 : use crate::context::{DownloadBehavior, RequestContext, RequestContextBuilder};
67 : use crate::deletion_queue::DeletionQueueClient;
68 : use crate::feature_resolver::FeatureResolver;
69 : use crate::metrics::LOCAL_DATA_LOSS_SUSPECTED;
70 : use crate::pgdatadir_mapping::LsnForTimestamp;
71 : use crate::task_mgr::TaskKind;
72 : use crate::tenant::config::LocationConf;
73 : use crate::tenant::mgr::{
74 : GetActiveTenantError, GetTenantError, TenantManager, TenantMapError, TenantMapInsertError,
75 : TenantSlot, TenantSlotError, TenantSlotUpsertError, TenantStateError, UpsertLocationError,
76 : };
77 : use crate::tenant::remote_timeline_client::index::GcCompactionState;
78 : use crate::tenant::remote_timeline_client::{
79 : download_index_part, download_tenant_manifest, list_remote_tenant_shards, list_remote_timelines,
80 : };
81 : use crate::tenant::secondary::SecondaryController;
82 : use crate::tenant::size::ModelInputs;
83 : use crate::tenant::storage_layer::ValuesReconstructState;
84 : use crate::tenant::storage_layer::{IoConcurrency, LayerAccessStatsReset, LayerName};
85 : use crate::tenant::timeline::layer_manager::LayerManagerLockHolder;
86 : use crate::tenant::timeline::offload::{OffloadError, offload_timeline};
87 : use crate::tenant::timeline::{
88 : CompactFlags, CompactOptions, CompactRequest, MarkInvisibleRequest, Timeline, WaitLsnTimeout,
89 : WaitLsnWaiter, import_pgdata,
90 : };
91 : use crate::tenant::{
92 : GetTimelineError, LogicalSizeCalculationCause, OffloadedTimeline, PageReconstructError,
93 : remote_timeline_client,
94 : };
95 : use crate::{DEFAULT_PG_VERSION, disk_usage_eviction_task, tenant};
96 :
97 : // For APIs that require an Active tenant, how long should we block waiting for that state?
98 : // This is not functionally necessary (clients will retry), but avoids generating a lot of
99 : // failed API calls while tenants are activating.
100 : #[cfg(not(feature = "testing"))]
101 : pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(5000);
102 :
103 : // Tests run on slow/oversubscribed nodes, and may need to wait much longer for tenants to
104 : // finish attaching, if calls to remote storage are slow.
105 : #[cfg(feature = "testing")]
106 : pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);
107 :
108 : pub struct State {
109 : conf: &'static PageServerConf,
110 : tenant_manager: Arc<TenantManager>,
111 : auth: Option<Arc<SwappableJwtAuth>>,
112 : allowlist_routes: &'static [&'static str],
113 : remote_storage: GenericRemoteStorage,
114 : broker_client: storage_broker::BrokerClientChannel,
115 : disk_usage_eviction_state: Arc<disk_usage_eviction_task::State>,
116 : deletion_queue_client: DeletionQueueClient,
117 : secondary_controller: SecondaryController,
118 : latest_utilization: tokio::sync::Mutex<Option<(std::time::Instant, bytes::Bytes)>>,
119 : feature_resolver: FeatureResolver,
120 : }
121 :
122 : impl State {
123 : #[allow(clippy::too_many_arguments)]
124 0 : pub fn new(
125 0 : conf: &'static PageServerConf,
126 0 : tenant_manager: Arc<TenantManager>,
127 0 : auth: Option<Arc<SwappableJwtAuth>>,
128 0 : remote_storage: GenericRemoteStorage,
129 0 : broker_client: storage_broker::BrokerClientChannel,
130 0 : disk_usage_eviction_state: Arc<disk_usage_eviction_task::State>,
131 0 : deletion_queue_client: DeletionQueueClient,
132 0 : secondary_controller: SecondaryController,
133 0 : feature_resolver: FeatureResolver,
134 0 : ) -> anyhow::Result<Self> {
135 0 : let allowlist_routes = &[
136 0 : "/v1/status",
137 0 : "/v1/doc",
138 0 : "/swagger.yml",
139 0 : "/metrics",
140 0 : "/profile/cpu",
141 0 : "/profile/heap",
142 0 : ];
143 0 : Ok(Self {
144 0 : conf,
145 0 : tenant_manager,
146 0 : auth,
147 0 : allowlist_routes,
148 0 : remote_storage,
149 0 : broker_client,
150 0 : disk_usage_eviction_state,
151 0 : deletion_queue_client,
152 0 : secondary_controller,
153 0 : latest_utilization: Default::default(),
154 0 : feature_resolver,
155 0 : })
156 0 : }
157 : }
158 :
159 : #[inline(always)]
160 0 : fn get_state(request: &Request<Body>) -> &State {
161 0 : request
162 0 : .data::<Arc<State>>()
163 0 : .expect("unknown state type")
164 0 : .as_ref()
165 0 : }
166 :
167 : #[inline(always)]
168 0 : fn get_config(request: &Request<Body>) -> &'static PageServerConf {
169 0 : get_state(request).conf
170 0 : }
171 :
172 : /// Check that the requester is authorized to operate on given tenant
173 0 : fn check_permission(request: &Request<Body>, tenant_id: Option<TenantId>) -> Result<(), ApiError> {
174 0 : check_permission_with(request, |claims| {
175 0 : crate::auth::check_permission(claims, tenant_id)
176 0 : })
177 0 : }
178 :
179 : impl From<PageReconstructError> for ApiError {
180 0 : fn from(pre: PageReconstructError) -> ApiError {
181 0 : match pre {
182 0 : PageReconstructError::Other(other) => ApiError::InternalServerError(other),
183 0 : PageReconstructError::MissingKey(e) => ApiError::InternalServerError(e.into()),
184 0 : PageReconstructError::Cancelled => ApiError::Cancelled,
185 0 : PageReconstructError::AncestorLsnTimeout(e) => ApiError::Timeout(format!("{e}").into()),
186 0 : PageReconstructError::WalRedo(pre) => ApiError::InternalServerError(pre),
187 : }
188 0 : }
189 : }
190 :
191 : impl From<TenantMapInsertError> for ApiError {
192 0 : fn from(tmie: TenantMapInsertError) -> ApiError {
193 0 : match tmie {
194 0 : TenantMapInsertError::SlotError(e) => e.into(),
195 0 : TenantMapInsertError::SlotUpsertError(e) => e.into(),
196 0 : TenantMapInsertError::Other(e) => ApiError::InternalServerError(e),
197 : }
198 0 : }
199 : }
200 :
201 : impl From<TenantSlotError> for ApiError {
202 0 : fn from(e: TenantSlotError) -> ApiError {
203 : use TenantSlotError::*;
204 0 : match e {
205 0 : NotFound(tenant_id) => {
206 0 : ApiError::NotFound(anyhow::anyhow!("NotFound: tenant {tenant_id}").into())
207 : }
208 : InProgress => {
209 0 : ApiError::ResourceUnavailable("Tenant is being modified concurrently".into())
210 : }
211 0 : MapState(e) => e.into(),
212 : }
213 0 : }
214 : }
215 :
216 : impl From<TenantSlotUpsertError> for ApiError {
217 0 : fn from(e: TenantSlotUpsertError) -> ApiError {
218 : use TenantSlotUpsertError::*;
219 0 : match e {
220 0 : InternalError(e) => ApiError::InternalServerError(anyhow::anyhow!("{e}")),
221 0 : MapState(e) => e.into(),
222 0 : ShuttingDown(_) => ApiError::ShuttingDown,
223 : }
224 0 : }
225 : }
226 :
227 : impl From<UpsertLocationError> for ApiError {
228 0 : fn from(e: UpsertLocationError) -> ApiError {
229 : use UpsertLocationError::*;
230 0 : match e {
231 0 : BadRequest(e) => ApiError::BadRequest(e),
232 0 : Unavailable(_) => ApiError::ShuttingDown,
233 0 : e @ InProgress => ApiError::Conflict(format!("{e}")),
234 0 : Flush(e) | InternalError(e) => ApiError::InternalServerError(e),
235 : }
236 0 : }
237 : }
238 :
239 : impl From<TenantMapError> for ApiError {
240 0 : fn from(e: TenantMapError) -> ApiError {
241 : use TenantMapError::*;
242 0 : match e {
243 : StillInitializing | ShuttingDown => {
244 0 : ApiError::ResourceUnavailable(format!("{e}").into())
245 : }
246 : }
247 0 : }
248 : }
249 :
250 : impl From<TenantStateError> for ApiError {
251 0 : fn from(tse: TenantStateError) -> ApiError {
252 0 : match tse {
253 : TenantStateError::IsStopping(_) => {
254 0 : ApiError::ResourceUnavailable("Tenant is stopping".into())
255 : }
256 0 : TenantStateError::SlotError(e) => e.into(),
257 0 : TenantStateError::SlotUpsertError(e) => e.into(),
258 0 : TenantStateError::Other(e) => ApiError::InternalServerError(anyhow!(e)),
259 : }
260 0 : }
261 : }
262 :
263 : impl From<GetTenantError> for ApiError {
264 0 : fn from(tse: GetTenantError) -> ApiError {
265 0 : match tse {
266 0 : GetTenantError::NotFound(tid) => ApiError::NotFound(anyhow!("tenant {tid}").into()),
267 0 : GetTenantError::ShardNotFound(tid) => {
268 0 : ApiError::NotFound(anyhow!("tenant {tid}").into())
269 : }
270 : GetTenantError::NotActive(_) => {
271 : // Why is this not `ApiError::NotFound`?
272 : // Because we must be careful to never return 404 for a tenant if it does
273 : // in fact exist locally. If we did, the caller could draw the conclusion
274 : // that it can attach the tenant to another PS and we'd be in split-brain.
275 0 : ApiError::ResourceUnavailable("Tenant not yet active".into())
276 : }
277 0 : GetTenantError::MapState(e) => ApiError::ResourceUnavailable(format!("{e}").into()),
278 : }
279 0 : }
280 : }
281 :
282 : impl From<GetTimelineError> for ApiError {
283 0 : fn from(gte: GetTimelineError) -> Self {
284 : // Rationale: tenant is activated only after eligble timelines activate
285 0 : ApiError::NotFound(gte.into())
286 0 : }
287 : }
288 :
289 : impl From<GetActiveTenantError> for ApiError {
290 0 : fn from(e: GetActiveTenantError) -> ApiError {
291 0 : match e {
292 0 : GetActiveTenantError::Broken(reason) => {
293 0 : ApiError::InternalServerError(anyhow!("tenant is broken: {}", reason))
294 : }
295 : GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => {
296 0 : ApiError::ShuttingDown
297 : }
298 0 : GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{e}")),
299 0 : GetActiveTenantError::Cancelled => ApiError::ShuttingDown,
300 0 : GetActiveTenantError::NotFound(gte) => gte.into(),
301 : GetActiveTenantError::WaitForActiveTimeout { .. } => {
302 0 : ApiError::ResourceUnavailable(format!("{e}").into())
303 : }
304 : GetActiveTenantError::SwitchedTenant => {
305 : // in our HTTP handlers, this error doesn't happen
306 : // TODO: separate error types
307 0 : ApiError::ResourceUnavailable("switched tenant".into())
308 : }
309 : }
310 0 : }
311 : }
312 :
313 : impl From<crate::tenant::DeleteTimelineError> for ApiError {
314 0 : fn from(value: crate::tenant::DeleteTimelineError) -> Self {
315 : use crate::tenant::DeleteTimelineError::*;
316 0 : match value {
317 0 : NotFound => ApiError::NotFound(anyhow::anyhow!("timeline not found").into()),
318 0 : HasChildren(children) => ApiError::PreconditionFailed(
319 0 : format!("Cannot delete timeline which has child timelines: {children:?}")
320 0 : .into_boxed_str(),
321 0 : ),
322 0 : a @ AlreadyInProgress(_) => ApiError::Conflict(a.to_string()),
323 0 : Cancelled => ApiError::ResourceUnavailable("shutting down".into()),
324 0 : Other(e) => ApiError::InternalServerError(e),
325 : }
326 0 : }
327 : }
328 :
329 : impl From<crate::tenant::TimelineArchivalError> for ApiError {
330 0 : fn from(value: crate::tenant::TimelineArchivalError) -> Self {
331 : use crate::tenant::TimelineArchivalError::*;
332 0 : match value {
333 0 : NotFound => ApiError::NotFound(anyhow::anyhow!("timeline not found").into()),
334 0 : Timeout => ApiError::Timeout("hit pageserver internal timeout".into()),
335 0 : Cancelled => ApiError::ShuttingDown,
336 0 : e @ HasArchivedParent(_) => {
337 0 : ApiError::PreconditionFailed(e.to_string().into_boxed_str())
338 : }
339 0 : HasUnarchivedChildren(children) => ApiError::PreconditionFailed(
340 0 : format!(
341 0 : "Cannot archive timeline which has non-archived child timelines: {children:?}"
342 0 : )
343 0 : .into_boxed_str(),
344 0 : ),
345 0 : a @ AlreadyInProgress => ApiError::Conflict(a.to_string()),
346 0 : Other(e) => ApiError::InternalServerError(e),
347 : }
348 0 : }
349 : }
350 :
351 : impl From<crate::tenant::mgr::DeleteTimelineError> for ApiError {
352 0 : fn from(value: crate::tenant::mgr::DeleteTimelineError) -> Self {
353 : use crate::tenant::mgr::DeleteTimelineError::*;
354 0 : match value {
355 : // Report Precondition failed so client can distinguish between
356 : // "tenant is missing" case from "timeline is missing"
357 0 : Tenant(GetTenantError::NotFound(..)) => ApiError::PreconditionFailed(
358 0 : "Requested tenant is missing".to_owned().into_boxed_str(),
359 0 : ),
360 0 : Tenant(t) => ApiError::from(t),
361 0 : Timeline(t) => ApiError::from(t),
362 : }
363 0 : }
364 : }
365 :
366 : impl From<crate::tenant::mgr::DeleteTenantError> for ApiError {
367 0 : fn from(value: crate::tenant::mgr::DeleteTenantError) -> Self {
368 : use crate::tenant::mgr::DeleteTenantError::*;
369 0 : match value {
370 0 : SlotError(e) => e.into(),
371 0 : Other(o) => ApiError::InternalServerError(o),
372 0 : Cancelled => ApiError::ShuttingDown,
373 : }
374 0 : }
375 : }
376 :
377 : impl From<crate::tenant::secondary::SecondaryTenantError> for ApiError {
378 0 : fn from(ste: crate::tenant::secondary::SecondaryTenantError) -> ApiError {
379 : use crate::tenant::secondary::SecondaryTenantError;
380 0 : match ste {
381 0 : SecondaryTenantError::GetTenant(gte) => gte.into(),
382 0 : SecondaryTenantError::ShuttingDown => ApiError::ShuttingDown,
383 : }
384 0 : }
385 : }
386 :
387 : impl From<crate::tenant::FinalizeTimelineImportError> for ApiError {
388 0 : fn from(err: crate::tenant::FinalizeTimelineImportError) -> ApiError {
389 : use crate::tenant::FinalizeTimelineImportError::*;
390 0 : match err {
391 : ImportTaskStillRunning => {
392 0 : ApiError::ResourceUnavailable("Import task still running".into())
393 : }
394 0 : ShuttingDown => ApiError::ShuttingDown,
395 : }
396 0 : }
397 : }
398 :
399 : // Helper function to construct a TimelineInfo struct for a timeline
400 0 : async fn build_timeline_info(
401 0 : timeline: &Arc<Timeline>,
402 0 : include_non_incremental_logical_size: bool,
403 0 : force_await_initial_logical_size: bool,
404 0 : include_image_consistent_lsn: bool,
405 0 : ctx: &RequestContext,
406 0 : ) -> anyhow::Result<TimelineInfo> {
407 0 : crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id();
408 :
409 0 : if force_await_initial_logical_size {
410 0 : timeline.clone().await_initial_logical_size().await
411 0 : }
412 :
413 0 : let mut info = build_timeline_info_common(
414 0 : timeline,
415 0 : ctx,
416 0 : tenant::timeline::GetLogicalSizePriority::Background,
417 0 : )
418 0 : .await?;
419 0 : if include_non_incremental_logical_size {
420 : // XXX we should be using spawn_ondemand_logical_size_calculation here.
421 : // Otherwise, if someone deletes the timeline / detaches the tenant while
422 : // we're executing this function, we will outlive the timeline on-disk state.
423 : info.current_logical_size_non_incremental = Some(
424 0 : timeline
425 0 : .get_current_logical_size_non_incremental(info.last_record_lsn, ctx)
426 0 : .await?,
427 : );
428 0 : }
429 : // HADRON
430 0 : if include_image_consistent_lsn {
431 0 : info.image_consistent_lsn = Some(timeline.compute_image_consistent_lsn().await?);
432 0 : }
433 0 : Ok(info)
434 0 : }
435 :
436 0 : async fn build_timeline_info_common(
437 0 : timeline: &Arc<Timeline>,
438 0 : ctx: &RequestContext,
439 0 : logical_size_task_priority: tenant::timeline::GetLogicalSizePriority,
440 0 : ) -> anyhow::Result<TimelineInfo> {
441 0 : crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id();
442 0 : let initdb_lsn = timeline.initdb_lsn;
443 0 : let last_record_lsn = timeline.get_last_record_lsn();
444 0 : let (wal_source_connstr, last_received_msg_lsn, last_received_msg_ts) = {
445 0 : let guard = timeline.last_received_wal.lock().unwrap();
446 0 : if let Some(info) = guard.as_ref() {
447 0 : (
448 0 : Some(format!("{}", info.wal_source_connconf)), // Password is hidden, but it's for statistics only.
449 0 : Some(info.last_received_msg_lsn),
450 0 : Some(info.last_received_msg_ts),
451 0 : )
452 : } else {
453 0 : (None, None, None)
454 : }
455 : };
456 :
457 0 : let ancestor_timeline_id = timeline.get_ancestor_timeline_id();
458 0 : let ancestor_lsn = match timeline.get_ancestor_lsn() {
459 0 : Lsn(0) => None,
460 0 : lsn @ Lsn(_) => Some(lsn),
461 : };
462 0 : let current_logical_size = timeline.get_current_logical_size(logical_size_task_priority, ctx);
463 0 : let current_physical_size = Some(timeline.layer_size_sum().await);
464 0 : let state = timeline.current_state();
465 : // Report is_archived = false if the timeline is still loading
466 0 : let is_archived = timeline.is_archived().unwrap_or(false);
467 0 : let remote_consistent_lsn_projected = timeline
468 0 : .get_remote_consistent_lsn_projected()
469 0 : .unwrap_or(Lsn(0));
470 0 : let remote_consistent_lsn_visible = timeline
471 0 : .get_remote_consistent_lsn_visible()
472 0 : .unwrap_or(Lsn(0));
473 0 : let is_invisible = timeline.remote_client.is_invisible().unwrap_or(false);
474 :
475 0 : let walreceiver_status = timeline.walreceiver_status();
476 :
477 0 : let (pitr_history_size, within_ancestor_pitr) = timeline.get_pitr_history_stats();
478 :
479 : // Externally, expose the lowest LSN that can be used to create a branch.
480 : // Internally we distinguish between the planned GC cutoff (PITR point) and the "applied" GC cutoff (where we
481 : // actually trimmed data to), which can pass each other when PITR is changed.
482 0 : let min_readable_lsn = std::cmp::max(
483 0 : timeline.get_gc_cutoff_lsn().unwrap_or_default(),
484 0 : *timeline.get_applied_gc_cutoff_lsn(),
485 : );
486 :
487 0 : let info = TimelineInfo {
488 0 : tenant_id: timeline.tenant_shard_id,
489 0 : timeline_id: timeline.timeline_id,
490 0 : ancestor_timeline_id,
491 0 : ancestor_lsn,
492 0 : disk_consistent_lsn: timeline.get_disk_consistent_lsn(),
493 0 : remote_consistent_lsn: remote_consistent_lsn_projected,
494 0 : remote_consistent_lsn_visible,
495 0 : initdb_lsn,
496 0 : last_record_lsn,
497 0 : prev_record_lsn: Some(timeline.get_prev_record_lsn()),
498 0 : min_readable_lsn,
499 0 : applied_gc_cutoff_lsn: *timeline.get_applied_gc_cutoff_lsn(),
500 0 : current_logical_size: current_logical_size.size_dont_care_about_accuracy(),
501 0 : current_logical_size_is_accurate: match current_logical_size.accuracy() {
502 0 : tenant::timeline::logical_size::Accuracy::Approximate => false,
503 0 : tenant::timeline::logical_size::Accuracy::Exact => true,
504 : },
505 0 : directory_entries_counts: timeline.get_directory_metrics().to_vec(),
506 0 : current_physical_size,
507 0 : current_logical_size_non_incremental: None,
508 0 : pitr_history_size,
509 0 : within_ancestor_pitr,
510 0 : timeline_dir_layer_file_size_sum: None,
511 0 : wal_source_connstr,
512 0 : last_received_msg_lsn,
513 0 : last_received_msg_ts,
514 0 : pg_version: timeline.pg_version,
515 :
516 0 : state,
517 0 : is_archived: Some(is_archived),
518 0 : rel_size_migration: Some(timeline.get_rel_size_v2_status()),
519 0 : is_invisible: Some(is_invisible),
520 :
521 0 : walreceiver_status,
522 : // HADRON
523 0 : image_consistent_lsn: None,
524 : };
525 0 : Ok(info)
526 0 : }
527 :
528 0 : fn build_timeline_offloaded_info(offloaded: &Arc<OffloadedTimeline>) -> OffloadedTimelineInfo {
529 : let &OffloadedTimeline {
530 0 : tenant_shard_id,
531 0 : timeline_id,
532 0 : ancestor_retain_lsn,
533 0 : ancestor_timeline_id,
534 0 : archived_at,
535 : ..
536 0 : } = offloaded.as_ref();
537 0 : OffloadedTimelineInfo {
538 0 : tenant_id: tenant_shard_id,
539 0 : timeline_id,
540 0 : ancestor_retain_lsn,
541 0 : ancestor_timeline_id,
542 0 : archived_at: archived_at.and_utc(),
543 0 : }
544 0 : }
545 :
546 : // healthcheck handler
547 0 : async fn status_handler(
548 0 : request: Request<Body>,
549 0 : _cancel: CancellationToken,
550 0 : ) -> Result<Response<Body>, ApiError> {
551 0 : check_permission(&request, None)?;
552 0 : let config = get_config(&request);
553 0 : json_response(StatusCode::OK, StatusResponse { id: config.id })
554 0 : }
555 :
556 0 : async fn reload_auth_validation_keys_handler(
557 0 : request: Request<Body>,
558 0 : _cancel: CancellationToken,
559 0 : ) -> Result<Response<Body>, ApiError> {
560 0 : check_permission(&request, None)?;
561 0 : let config = get_config(&request);
562 0 : let state = get_state(&request);
563 0 : let Some(shared_auth) = &state.auth else {
564 0 : return json_response(StatusCode::BAD_REQUEST, ());
565 : };
566 : // unwrap is ok because check is performed when creating config, so path is set and exists
567 0 : let key_path = config.auth_validation_public_key_path.as_ref().unwrap();
568 0 : info!("Reloading public key(s) for verifying JWT tokens from {key_path:?}");
569 :
570 0 : match utils::auth::JwtAuth::from_key_path(key_path) {
571 0 : Ok(new_auth) => {
572 0 : shared_auth.swap(new_auth);
573 0 : json_response(StatusCode::OK, ())
574 : }
575 0 : Err(e) => {
576 0 : let err_msg = "Error reloading public keys";
577 0 : warn!("Error reloading public keys from {key_path:?}: {e:}");
578 0 : json_response(
579 : StatusCode::INTERNAL_SERVER_ERROR,
580 0 : HttpErrorBody::from_msg(err_msg.to_string()),
581 : )
582 : }
583 : }
584 0 : }
585 :
586 0 : async fn timeline_create_handler(
587 0 : mut request: Request<Body>,
588 0 : _cancel: CancellationToken,
589 0 : ) -> Result<Response<Body>, ApiError> {
590 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
591 0 : let request_data: TimelineCreateRequest = json_request(&mut request).await?;
592 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
593 :
594 0 : let new_timeline_id = request_data.new_timeline_id;
595 : // fill in the default pg_version if not provided & convert request into domain model
596 0 : let params: tenant::CreateTimelineParams = match request_data.mode {
597 : TimelineCreateRequestMode::Bootstrap {
598 0 : existing_initdb_timeline_id,
599 0 : pg_version,
600 0 : } => tenant::CreateTimelineParams::Bootstrap(tenant::CreateTimelineParamsBootstrap {
601 0 : new_timeline_id,
602 0 : existing_initdb_timeline_id,
603 0 : pg_version: pg_version.unwrap_or(DEFAULT_PG_VERSION),
604 0 : }),
605 : TimelineCreateRequestMode::Branch {
606 0 : ancestor_timeline_id,
607 0 : ancestor_start_lsn,
608 : read_only: _,
609 : pg_version: _,
610 0 : } => tenant::CreateTimelineParams::Branch(tenant::CreateTimelineParamsBranch {
611 0 : new_timeline_id,
612 0 : ancestor_timeline_id,
613 0 : ancestor_start_lsn,
614 0 : }),
615 : TimelineCreateRequestMode::ImportPgdata {
616 : import_pgdata:
617 : TimelineCreateRequestModeImportPgdata {
618 0 : location,
619 0 : idempotency_key,
620 : },
621 : } => tenant::CreateTimelineParams::ImportPgdata(tenant::CreateTimelineParamsImportPgdata {
622 0 : idempotency_key: import_pgdata::index_part_format::IdempotencyKey::new(
623 0 : idempotency_key.0,
624 : ),
625 0 : new_timeline_id,
626 : location: {
627 : use import_pgdata::index_part_format::Location;
628 : use pageserver_api::models::ImportPgdataLocation;
629 0 : match location {
630 : #[cfg(feature = "testing")]
631 0 : ImportPgdataLocation::LocalFs { path } => Location::LocalFs { path },
632 : ImportPgdataLocation::AwsS3 {
633 0 : region,
634 0 : bucket,
635 0 : key,
636 0 : } => Location::AwsS3 {
637 0 : region,
638 0 : bucket,
639 0 : key,
640 0 : },
641 : }
642 : },
643 : }),
644 : };
645 :
646 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Error);
647 :
648 0 : let state = get_state(&request);
649 :
650 0 : async {
651 0 : let tenant = state
652 0 : .tenant_manager
653 0 : .get_attached_tenant_shard(tenant_shard_id)?;
654 :
655 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
656 :
657 : // earlier versions of the code had pg_version and ancestor_lsn in the span
658 : // => continue to provide that information, but, through a log message that doesn't require us to destructure
659 0 : tracing::info!(?params, "creating timeline");
660 :
661 0 : match tenant
662 0 : .create_timeline(params, state.broker_client.clone(), &ctx)
663 0 : .await
664 : {
665 0 : Ok(new_timeline) => {
666 : // Created. Construct a TimelineInfo for it.
667 0 : let timeline_info = build_timeline_info_common(
668 0 : &new_timeline,
669 0 : &ctx,
670 0 : tenant::timeline::GetLogicalSizePriority::User,
671 0 : )
672 0 : .await
673 0 : .map_err(ApiError::InternalServerError)?;
674 0 : json_response(StatusCode::CREATED, timeline_info)
675 : }
676 0 : Err(_) if tenant.cancel.is_cancelled() => {
677 : // In case we get some ugly error type during shutdown, cast it into a clean 503.
678 0 : json_response(
679 : StatusCode::SERVICE_UNAVAILABLE,
680 0 : HttpErrorBody::from_msg("Tenant shutting down".to_string()),
681 : )
682 : }
683 0 : Err(e @ tenant::CreateTimelineError::Conflict) => {
684 0 : json_response(StatusCode::CONFLICT, HttpErrorBody::from_msg(e.to_string()))
685 : }
686 0 : Err(e @ tenant::CreateTimelineError::AlreadyCreating) => json_response(
687 : StatusCode::TOO_MANY_REQUESTS,
688 0 : HttpErrorBody::from_msg(e.to_string()),
689 : ),
690 0 : Err(tenant::CreateTimelineError::AncestorLsn(err)) => json_response(
691 : StatusCode::NOT_ACCEPTABLE,
692 0 : HttpErrorBody::from_msg(format!("{err:#}")),
693 : ),
694 0 : Err(e @ tenant::CreateTimelineError::AncestorNotActive) => json_response(
695 : StatusCode::SERVICE_UNAVAILABLE,
696 0 : HttpErrorBody::from_msg(e.to_string()),
697 : ),
698 0 : Err(e @ tenant::CreateTimelineError::AncestorArchived) => json_response(
699 : StatusCode::NOT_ACCEPTABLE,
700 0 : HttpErrorBody::from_msg(e.to_string()),
701 : ),
702 0 : Err(tenant::CreateTimelineError::ShuttingDown) => json_response(
703 : StatusCode::SERVICE_UNAVAILABLE,
704 0 : HttpErrorBody::from_msg("tenant shutting down".to_string()),
705 : ),
706 0 : Err(tenant::CreateTimelineError::Other(err)) => Err(ApiError::InternalServerError(err)),
707 : }
708 0 : }
709 0 : .instrument(info_span!("timeline_create",
710 : tenant_id = %tenant_shard_id.tenant_id,
711 0 : shard_id = %tenant_shard_id.shard_slug(),
712 : timeline_id = %new_timeline_id,
713 : ))
714 0 : .await
715 0 : }
716 :
717 0 : async fn timeline_list_handler(
718 0 : request: Request<Body>,
719 0 : _cancel: CancellationToken,
720 0 : ) -> Result<Response<Body>, ApiError> {
721 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
722 0 : let include_non_incremental_logical_size: Option<bool> =
723 0 : parse_query_param(&request, "include-non-incremental-logical-size")?;
724 0 : let force_await_initial_logical_size: Option<bool> =
725 0 : parse_query_param(&request, "force-await-initial-logical-size")?;
726 0 : let include_image_consistent_lsn: Option<bool> =
727 0 : parse_query_param(&request, "include-image-consistent-lsn")?;
728 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
729 :
730 0 : let state = get_state(&request);
731 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
732 :
733 0 : let response_data = async {
734 0 : let tenant = state
735 0 : .tenant_manager
736 0 : .get_attached_tenant_shard(tenant_shard_id)?;
737 :
738 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
739 :
740 0 : let timelines = tenant.list_timelines();
741 :
742 0 : let mut response_data = Vec::with_capacity(timelines.len());
743 0 : for timeline in timelines {
744 0 : let timeline_info = build_timeline_info(
745 0 : &timeline,
746 0 : include_non_incremental_logical_size.unwrap_or(false),
747 0 : force_await_initial_logical_size.unwrap_or(false),
748 0 : include_image_consistent_lsn.unwrap_or(false),
749 0 : &ctx,
750 : )
751 0 : .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id))
752 0 : .await
753 0 : .context("Failed to build timeline info")
754 0 : .map_err(ApiError::InternalServerError)?;
755 :
756 0 : response_data.push(timeline_info);
757 : }
758 0 : Ok::<Vec<TimelineInfo>, ApiError>(response_data)
759 0 : }
760 0 : .instrument(info_span!("timeline_list",
761 : tenant_id = %tenant_shard_id.tenant_id,
762 0 : shard_id = %tenant_shard_id.shard_slug()))
763 0 : .await?;
764 :
765 0 : json_response(StatusCode::OK, response_data)
766 0 : }
767 :
768 0 : async fn timeline_and_offloaded_list_handler(
769 0 : request: Request<Body>,
770 0 : _cancel: CancellationToken,
771 0 : ) -> Result<Response<Body>, ApiError> {
772 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
773 0 : let include_non_incremental_logical_size: Option<bool> =
774 0 : parse_query_param(&request, "include-non-incremental-logical-size")?;
775 0 : let force_await_initial_logical_size: Option<bool> =
776 0 : parse_query_param(&request, "force-await-initial-logical-size")?;
777 0 : let include_image_consistent_lsn: Option<bool> =
778 0 : parse_query_param(&request, "include-image-consistent-lsn")?;
779 :
780 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
781 :
782 0 : let state = get_state(&request);
783 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
784 :
785 0 : let response_data = async {
786 0 : let tenant = state
787 0 : .tenant_manager
788 0 : .get_attached_tenant_shard(tenant_shard_id)?;
789 :
790 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
791 :
792 0 : let (timelines, offloadeds) = tenant.list_timelines_and_offloaded();
793 :
794 0 : let mut timeline_infos = Vec::with_capacity(timelines.len());
795 0 : for timeline in timelines {
796 0 : let timeline_info = build_timeline_info(
797 0 : &timeline,
798 0 : include_non_incremental_logical_size.unwrap_or(false),
799 0 : force_await_initial_logical_size.unwrap_or(false),
800 0 : include_image_consistent_lsn.unwrap_or(false),
801 0 : &ctx,
802 : )
803 0 : .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id))
804 0 : .await
805 0 : .context("Failed to build timeline info")
806 0 : .map_err(ApiError::InternalServerError)?;
807 :
808 0 : timeline_infos.push(timeline_info);
809 : }
810 0 : let offloaded_infos = offloadeds
811 0 : .into_iter()
812 0 : .map(|offloaded| build_timeline_offloaded_info(&offloaded))
813 0 : .collect::<Vec<_>>();
814 0 : let res = TimelinesInfoAndOffloaded {
815 0 : timelines: timeline_infos,
816 0 : offloaded: offloaded_infos,
817 0 : };
818 0 : Ok::<TimelinesInfoAndOffloaded, ApiError>(res)
819 0 : }
820 0 : .instrument(info_span!("timeline_and_offloaded_list",
821 : tenant_id = %tenant_shard_id.tenant_id,
822 0 : shard_id = %tenant_shard_id.shard_slug()))
823 0 : .await?;
824 :
825 0 : json_response(StatusCode::OK, response_data)
826 0 : }
827 :
828 0 : async fn timeline_preserve_initdb_handler(
829 0 : request: Request<Body>,
830 0 : _cancel: CancellationToken,
831 0 : ) -> Result<Response<Body>, ApiError> {
832 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
833 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
834 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
835 0 : let state = get_state(&request);
836 :
837 : // Part of the process for disaster recovery from safekeeper-stored WAL:
838 : // If we don't recover into a new timeline but want to keep the timeline ID,
839 : // then the initdb archive is deleted. This endpoint copies it to a different
840 : // location where timeline recreation cand find it.
841 :
842 0 : async {
843 0 : let tenant = state
844 0 : .tenant_manager
845 0 : .get_attached_tenant_shard(tenant_shard_id)?;
846 :
847 0 : let timeline = tenant.get_timeline(timeline_id, false)?;
848 :
849 0 : timeline
850 0 : .preserve_initdb_archive()
851 0 : .await
852 0 : .context("preserving initdb archive")
853 0 : .map_err(ApiError::InternalServerError)?;
854 :
855 0 : Ok::<_, ApiError>(())
856 0 : }
857 0 : .instrument(info_span!("timeline_preserve_initdb_archive",
858 : tenant_id = %tenant_shard_id.tenant_id,
859 0 : shard_id = %tenant_shard_id.shard_slug(),
860 : %timeline_id))
861 0 : .await?;
862 :
863 0 : json_response(StatusCode::OK, ())
864 0 : }
865 :
866 0 : async fn timeline_archival_config_handler(
867 0 : mut request: Request<Body>,
868 0 : _cancel: CancellationToken,
869 0 : ) -> Result<Response<Body>, ApiError> {
870 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
871 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
872 :
873 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
874 :
875 0 : let request_data: TimelineArchivalConfigRequest = json_request(&mut request).await?;
876 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
877 0 : let state = get_state(&request);
878 :
879 0 : async {
880 0 : let tenant = state
881 0 : .tenant_manager
882 0 : .get_attached_tenant_shard(tenant_shard_id)?;
883 :
884 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
885 :
886 0 : tenant
887 0 : .apply_timeline_archival_config(
888 0 : timeline_id,
889 0 : request_data.state,
890 0 : state.broker_client.clone(),
891 0 : ctx,
892 0 : )
893 0 : .await?;
894 0 : Ok::<_, ApiError>(())
895 0 : }
896 0 : .instrument(info_span!("timeline_archival_config",
897 : tenant_id = %tenant_shard_id.tenant_id,
898 0 : shard_id = %tenant_shard_id.shard_slug(),
899 : state = ?request_data.state,
900 : %timeline_id))
901 0 : .await?;
902 :
903 0 : json_response(StatusCode::OK, ())
904 0 : }
905 :
906 : /// This API is used to patch the index part of a timeline. You must ensure such patches are safe to apply. Use this API as an emergency
907 : /// measure only.
908 : ///
909 : /// Some examples of safe patches:
910 : /// - Increase the gc_cutoff and gc_compaction_cutoff to a larger value in case of a bug that didn't bump the cutoff and cause read errors.
911 : /// - Force set the index part to use reldir v2 (migrating/migrated).
912 : ///
913 : /// Some examples of unsafe patches:
914 : /// - Force set the index part from v2 to v1 (legacy). This will cause the code path to ignore anything written to the new keyspace and cause
915 : /// errors.
916 : /// - Decrease the gc_cutoff without validating the data really exists. It will cause read errors in the background.
917 0 : async fn timeline_patch_index_part_handler(
918 0 : mut request: Request<Body>,
919 0 : _cancel: CancellationToken,
920 0 : ) -> Result<Response<Body>, ApiError> {
921 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
922 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
923 :
924 0 : let request_data: TimelinePatchIndexPartRequest = json_request(&mut request).await?;
925 0 : check_permission(&request, None)?; // require global permission for this request
926 0 : let state = get_state(&request);
927 :
928 0 : async {
929 0 : let timeline =
930 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
931 0 : .await?;
932 :
933 0 : if let Some(rel_size_migration) = request_data.rel_size_migration {
934 0 : timeline
935 0 : .update_rel_size_v2_status(rel_size_migration)
936 0 : .map_err(ApiError::InternalServerError)?;
937 0 : }
938 :
939 0 : if let Some(gc_compaction_last_completed_lsn) =
940 0 : request_data.gc_compaction_last_completed_lsn
941 : {
942 0 : timeline
943 0 : .update_gc_compaction_state(GcCompactionState {
944 0 : last_completed_lsn: gc_compaction_last_completed_lsn,
945 0 : })
946 0 : .map_err(ApiError::InternalServerError)?;
947 0 : }
948 :
949 0 : if let Some(applied_gc_cutoff_lsn) = request_data.applied_gc_cutoff_lsn {
950 0 : {
951 0 : let guard = timeline.applied_gc_cutoff_lsn.lock_for_write();
952 0 : guard.store_and_unlock(applied_gc_cutoff_lsn);
953 0 : }
954 0 : }
955 :
956 0 : if request_data.force_index_update {
957 0 : timeline
958 0 : .remote_client
959 0 : .force_schedule_index_upload()
960 0 : .context("force schedule index upload")
961 0 : .map_err(ApiError::InternalServerError)?;
962 0 : }
963 :
964 0 : Ok::<_, ApiError>(())
965 0 : }
966 0 : .instrument(info_span!("timeline_patch_index_part",
967 : tenant_id = %tenant_shard_id.tenant_id,
968 0 : shard_id = %tenant_shard_id.shard_slug(),
969 : %timeline_id))
970 0 : .await?;
971 :
972 0 : json_response(StatusCode::OK, ())
973 0 : }
974 :
975 0 : async fn timeline_detail_handler(
976 0 : request: Request<Body>,
977 0 : _cancel: CancellationToken,
978 0 : ) -> Result<Response<Body>, ApiError> {
979 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
980 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
981 0 : let include_non_incremental_logical_size: Option<bool> =
982 0 : parse_query_param(&request, "include-non-incremental-logical-size")?;
983 0 : let force_await_initial_logical_size: Option<bool> =
984 0 : parse_query_param(&request, "force-await-initial-logical-size")?;
985 : // HADRON
986 0 : let include_image_consistent_lsn: Option<bool> =
987 0 : parse_query_param(&request, "include-image-consistent-lsn")?;
988 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
989 :
990 : // Logical size calculation needs downloading.
991 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
992 0 : let state = get_state(&request);
993 :
994 0 : let timeline_info = async {
995 0 : let tenant = state
996 0 : .tenant_manager
997 0 : .get_attached_tenant_shard(tenant_shard_id)?;
998 :
999 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1000 :
1001 0 : let timeline = tenant.get_timeline(timeline_id, false)?;
1002 0 : let ctx = &ctx.with_scope_timeline(&timeline);
1003 :
1004 0 : let timeline_info = build_timeline_info(
1005 0 : &timeline,
1006 0 : include_non_incremental_logical_size.unwrap_or(false),
1007 0 : force_await_initial_logical_size.unwrap_or(false),
1008 0 : include_image_consistent_lsn.unwrap_or(false),
1009 0 : ctx,
1010 0 : )
1011 0 : .await
1012 0 : .context("get local timeline info")
1013 0 : .map_err(ApiError::InternalServerError)?;
1014 :
1015 0 : Ok::<_, ApiError>(timeline_info)
1016 0 : }
1017 0 : .instrument(info_span!("timeline_detail",
1018 : tenant_id = %tenant_shard_id.tenant_id,
1019 0 : shard_id = %tenant_shard_id.shard_slug(),
1020 : %timeline_id))
1021 0 : .await?;
1022 :
1023 0 : json_response(StatusCode::OK, timeline_info)
1024 0 : }
1025 :
1026 0 : async fn get_lsn_by_timestamp_handler(
1027 0 : request: Request<Body>,
1028 0 : cancel: CancellationToken,
1029 0 : ) -> Result<Response<Body>, ApiError> {
1030 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1031 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1032 0 : let state = get_state(&request);
1033 :
1034 0 : if !tenant_shard_id.is_shard_zero() {
1035 : // Requires SLRU contents, which are only stored on shard zero
1036 0 : return Err(ApiError::BadRequest(anyhow!(
1037 0 : "Lsn calculations by timestamp are only available on shard zero"
1038 0 : )));
1039 0 : }
1040 :
1041 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1042 0 : let timestamp_raw = must_get_query_param(&request, "timestamp")?;
1043 0 : let timestamp = humantime::parse_rfc3339(×tamp_raw)
1044 0 : .with_context(|| format!("Invalid time: {timestamp_raw:?}"))
1045 0 : .map_err(ApiError::BadRequest)?;
1046 0 : let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp);
1047 :
1048 0 : let with_lease = parse_query_param(&request, "with_lease")?.unwrap_or(false);
1049 :
1050 0 : let timeline =
1051 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1052 0 : .await?;
1053 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1054 0 : .with_scope_timeline(&timeline);
1055 0 : let result = timeline
1056 0 : .find_lsn_for_timestamp(timestamp_pg, &cancel, &ctx)
1057 0 : .await?;
1058 :
1059 : #[derive(serde::Serialize, Debug)]
1060 : struct Result {
1061 : lsn: Lsn,
1062 : kind: &'static str,
1063 : #[serde(default)]
1064 : #[serde(skip_serializing_if = "Option::is_none")]
1065 : #[serde(flatten)]
1066 : lease: Option<LsnLease>,
1067 : }
1068 0 : let (lsn, kind) = match result {
1069 0 : LsnForTimestamp::Present(lsn) => (lsn, "present"),
1070 0 : LsnForTimestamp::Future(lsn) => (lsn, "future"),
1071 0 : LsnForTimestamp::Past(lsn) => (lsn, "past"),
1072 0 : LsnForTimestamp::NoData(lsn) => (lsn, "nodata"),
1073 : };
1074 :
1075 0 : let lease = if with_lease {
1076 0 : timeline
1077 0 : .init_lsn_lease(lsn, timeline.get_lsn_lease_length_for_ts(), &ctx)
1078 0 : .inspect_err(|_| {
1079 0 : warn!("fail to grant a lease to {}", lsn);
1080 0 : })
1081 0 : .ok()
1082 : } else {
1083 0 : None
1084 : };
1085 :
1086 0 : let result = Result { lsn, kind, lease };
1087 0 : let valid_until = result
1088 0 : .lease
1089 0 : .as_ref()
1090 0 : .map(|l| humantime::format_rfc3339_millis(l.valid_until).to_string());
1091 0 : tracing::info!(
1092 : lsn=?result.lsn,
1093 : kind=%result.kind,
1094 : timestamp=%timestamp_raw,
1095 : valid_until=?valid_until,
1096 0 : "lsn_by_timestamp finished"
1097 : );
1098 0 : json_response(StatusCode::OK, result)
1099 0 : }
1100 :
1101 0 : async fn get_timestamp_of_lsn_handler(
1102 0 : request: Request<Body>,
1103 0 : _cancel: CancellationToken,
1104 0 : ) -> Result<Response<Body>, ApiError> {
1105 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1106 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1107 0 : let state = get_state(&request);
1108 :
1109 0 : if !tenant_shard_id.is_shard_zero() {
1110 : // Requires SLRU contents, which are only stored on shard zero
1111 0 : return Err(ApiError::BadRequest(anyhow!(
1112 0 : "Timestamp calculations by lsn are only available on shard zero"
1113 0 : )));
1114 0 : }
1115 :
1116 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1117 :
1118 0 : let lsn_str = must_get_query_param(&request, "lsn")?;
1119 0 : let lsn = Lsn::from_str(&lsn_str)
1120 0 : .with_context(|| format!("Invalid LSN: {lsn_str:?}"))
1121 0 : .map_err(ApiError::BadRequest)?;
1122 :
1123 0 : let timeline =
1124 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1125 0 : .await?;
1126 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1127 0 : .with_scope_timeline(&timeline);
1128 0 : let result = timeline.get_timestamp_for_lsn(lsn, &ctx).await?;
1129 :
1130 0 : match result {
1131 0 : Some(time) => {
1132 0 : let time = format_rfc3339(
1133 0 : postgres_ffi::try_from_pg_timestamp(time).map_err(ApiError::InternalServerError)?,
1134 : )
1135 0 : .to_string();
1136 0 : json_response(StatusCode::OK, time)
1137 : }
1138 0 : None => Err(ApiError::PreconditionFailed(
1139 0 : format!("Timestamp for lsn {lsn} not found").into(),
1140 0 : )),
1141 : }
1142 0 : }
1143 :
1144 0 : async fn timeline_delete_handler(
1145 0 : request: Request<Body>,
1146 0 : _cancel: CancellationToken,
1147 0 : ) -> Result<Response<Body>, ApiError> {
1148 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1149 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1150 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1151 :
1152 0 : let state = get_state(&request);
1153 :
1154 0 : let tenant = state
1155 0 : .tenant_manager
1156 0 : .get_attached_tenant_shard(tenant_shard_id)
1157 0 : .map_err(|e| {
1158 0 : match e {
1159 : // GetTenantError has a built-in conversion to ApiError, but in this context we don't
1160 : // want to treat missing tenants as 404, to avoid ambiguity with successful deletions.
1161 : GetTenantError::NotFound(_) | GetTenantError::ShardNotFound(_) => {
1162 0 : ApiError::PreconditionFailed(
1163 0 : "Requested tenant is missing".to_string().into_boxed_str(),
1164 0 : )
1165 : }
1166 0 : e => e.into(),
1167 : }
1168 0 : })?;
1169 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1170 0 : tenant.delete_timeline(timeline_id).instrument(info_span!("timeline_delete", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id))
1171 0 : .await?;
1172 :
1173 0 : json_response(StatusCode::ACCEPTED, ())
1174 0 : }
1175 :
1176 0 : async fn tenant_reset_handler(
1177 0 : request: Request<Body>,
1178 0 : _cancel: CancellationToken,
1179 0 : ) -> Result<Response<Body>, ApiError> {
1180 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1181 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1182 :
1183 0 : let drop_cache: Option<bool> = parse_query_param(&request, "drop_cache")?;
1184 :
1185 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
1186 0 : let state = get_state(&request);
1187 0 : state
1188 0 : .tenant_manager
1189 0 : .reset_tenant(tenant_shard_id, drop_cache.unwrap_or(false), &ctx)
1190 0 : .await
1191 0 : .map_err(ApiError::InternalServerError)?;
1192 :
1193 0 : json_response(StatusCode::OK, ())
1194 0 : }
1195 :
1196 0 : async fn tenant_list_handler(
1197 0 : request: Request<Body>,
1198 0 : _cancel: CancellationToken,
1199 0 : ) -> Result<Response<Body>, ApiError> {
1200 0 : check_permission(&request, None)?;
1201 0 : let state = get_state(&request);
1202 :
1203 0 : let response_data = state
1204 0 : .tenant_manager
1205 0 : .list_tenants()
1206 0 : .map_err(|_| {
1207 0 : ApiError::ResourceUnavailable("Tenant map is initializing or shutting down".into())
1208 0 : })?
1209 0 : .iter()
1210 0 : .map(|(id, state, gen_)| TenantInfo {
1211 0 : id: *id,
1212 0 : state: state.clone(),
1213 0 : current_physical_size: None,
1214 0 : attachment_status: state.attachment_status(),
1215 0 : generation: (*gen_)
1216 0 : .into()
1217 0 : .expect("Tenants are always attached with a generation"),
1218 0 : gc_blocking: None,
1219 0 : })
1220 0 : .collect::<Vec<TenantInfo>>();
1221 :
1222 0 : json_response(StatusCode::OK, response_data)
1223 0 : }
1224 :
1225 0 : async fn tenant_status(
1226 0 : request: Request<Body>,
1227 0 : _cancel: CancellationToken,
1228 0 : ) -> Result<Response<Body>, ApiError> {
1229 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1230 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1231 0 : let state = get_state(&request);
1232 :
1233 : // In tests, sometimes we want to query the state of a tenant without auto-activating it if it's currently waiting.
1234 0 : let activate = true;
1235 : #[cfg(feature = "testing")]
1236 0 : let activate = parse_query_param(&request, "activate")?.unwrap_or(activate);
1237 :
1238 0 : let tenant_info = async {
1239 0 : let tenant = state
1240 0 : .tenant_manager
1241 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1242 :
1243 0 : if activate {
1244 : // This is advisory: we prefer to let the tenant activate on-demand when this function is
1245 : // called, but it is still valid to return 200 and describe the current state of the tenant
1246 : // if it doesn't make it into an active state.
1247 0 : tenant
1248 0 : .wait_to_become_active(ACTIVE_TENANT_TIMEOUT)
1249 0 : .await
1250 0 : .ok();
1251 0 : }
1252 :
1253 : // Calculate total physical size of all timelines
1254 0 : let mut current_physical_size = 0;
1255 0 : for timeline in tenant.list_timelines().iter() {
1256 0 : current_physical_size += timeline.layer_size_sum().await;
1257 : }
1258 :
1259 0 : let state = tenant.current_state();
1260 : Result::<_, ApiError>::Ok(TenantDetails {
1261 : tenant_info: TenantInfo {
1262 0 : id: tenant_shard_id,
1263 0 : state: state.clone(),
1264 0 : current_physical_size: Some(current_physical_size),
1265 0 : attachment_status: state.attachment_status(),
1266 0 : generation: tenant
1267 0 : .generation()
1268 0 : .into()
1269 0 : .expect("Tenants are always attached with a generation"),
1270 0 : gc_blocking: tenant.gc_block.summary().map(|x| format!("{x:?}")),
1271 : },
1272 0 : walredo: tenant.wal_redo_manager_status(),
1273 0 : timelines: tenant.list_timeline_ids(),
1274 : })
1275 0 : }
1276 0 : .instrument(info_span!("tenant_status_handler",
1277 : tenant_id = %tenant_shard_id.tenant_id,
1278 0 : shard_id = %tenant_shard_id.shard_slug()))
1279 0 : .await?;
1280 :
1281 0 : json_response(StatusCode::OK, tenant_info)
1282 0 : }
1283 :
1284 0 : async fn tenant_delete_handler(
1285 0 : request: Request<Body>,
1286 0 : _cancel: CancellationToken,
1287 0 : ) -> Result<Response<Body>, ApiError> {
1288 : // TODO openapi spec
1289 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1290 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1291 :
1292 0 : let state = get_state(&request);
1293 :
1294 0 : state
1295 0 : .tenant_manager
1296 0 : .delete_tenant(tenant_shard_id)
1297 0 : .instrument(info_span!("tenant_delete_handler",
1298 : tenant_id = %tenant_shard_id.tenant_id,
1299 0 : shard_id = %tenant_shard_id.shard_slug()
1300 : ))
1301 0 : .await?;
1302 :
1303 0 : json_response(StatusCode::OK, ())
1304 0 : }
1305 :
1306 : /// HTTP endpoint to query the current tenant_size of a tenant.
1307 : ///
1308 : /// This is not used by consumption metrics under [`crate::consumption_metrics`], but can be used
1309 : /// to debug any of the calculations. Requires `tenant_id` request parameter, supports
1310 : /// `inputs_only=true|false` (default false) which supports debugging failure to calculate model
1311 : /// values.
1312 : ///
1313 : /// 'retention_period' query parameter overrides the cutoff that is used to calculate the size
1314 : /// (only if it is shorter than the real cutoff).
1315 : ///
1316 : /// Note: we don't update the cached size and prometheus metric here.
1317 : /// The retention period might be different, and it's nice to have a method to just calculate it
1318 : /// without modifying anything anyway.
1319 0 : async fn tenant_size_handler(
1320 0 : request: Request<Body>,
1321 0 : cancel: CancellationToken,
1322 0 : ) -> Result<Response<Body>, ApiError> {
1323 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1324 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1325 0 : let inputs_only: Option<bool> = parse_query_param(&request, "inputs_only")?;
1326 0 : let retention_period: Option<u64> = parse_query_param(&request, "retention_period")?;
1327 0 : let headers = request.headers();
1328 0 : let state = get_state(&request);
1329 :
1330 0 : if !tenant_shard_id.is_shard_zero() {
1331 0 : return Err(ApiError::BadRequest(anyhow!(
1332 0 : "Size calculations are only available on shard zero"
1333 0 : )));
1334 0 : }
1335 :
1336 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
1337 0 : let tenant = state
1338 0 : .tenant_manager
1339 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1340 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1341 :
1342 : // this can be long operation
1343 0 : let inputs = tenant
1344 0 : .gather_size_inputs(
1345 0 : retention_period,
1346 0 : LogicalSizeCalculationCause::TenantSizeHandler,
1347 0 : &cancel,
1348 0 : &ctx,
1349 0 : )
1350 0 : .await
1351 0 : .map_err(|e| match e {
1352 0 : crate::tenant::size::CalculateSyntheticSizeError::Cancelled => ApiError::ShuttingDown,
1353 0 : other => ApiError::InternalServerError(anyhow::anyhow!(other)),
1354 0 : })?;
1355 :
1356 0 : let mut sizes = None;
1357 0 : let accepts_html = headers
1358 0 : .get(header::ACCEPT)
1359 0 : .map(|v| v == "text/html")
1360 0 : .unwrap_or_default();
1361 0 : if !inputs_only.unwrap_or(false) {
1362 0 : let storage_model = inputs.calculate_model();
1363 0 : let size = storage_model.calculate();
1364 :
1365 : // If request header expects html, return html
1366 0 : if accepts_html {
1367 0 : return synthetic_size_html_response(inputs, storage_model, size);
1368 0 : }
1369 0 : sizes = Some(size);
1370 0 : } else if accepts_html {
1371 0 : return Err(ApiError::BadRequest(anyhow!(
1372 0 : "inputs_only parameter is incompatible with html output request"
1373 0 : )));
1374 0 : }
1375 :
1376 : /// The type resides in the pageserver not to expose `ModelInputs`.
1377 : #[derive(serde::Serialize)]
1378 : struct TenantHistorySize {
1379 : id: TenantId,
1380 : /// Size is a mixture of WAL and logical size, so the unit is bytes.
1381 : ///
1382 : /// Will be none if `?inputs_only=true` was given.
1383 : size: Option<u64>,
1384 : /// Size of each segment used in the model.
1385 : /// Will be null if `?inputs_only=true` was given.
1386 : segment_sizes: Option<Vec<tenant_size_model::SegmentSizeResult>>,
1387 : inputs: crate::tenant::size::ModelInputs,
1388 : }
1389 :
1390 0 : json_response(
1391 : StatusCode::OK,
1392 : TenantHistorySize {
1393 0 : id: tenant_shard_id.tenant_id,
1394 0 : size: sizes.as_ref().map(|x| x.total_size),
1395 0 : segment_sizes: sizes.map(|x| x.segments),
1396 0 : inputs,
1397 : },
1398 : )
1399 0 : }
1400 :
1401 0 : async fn tenant_shard_split_handler(
1402 0 : mut request: Request<Body>,
1403 0 : _cancel: CancellationToken,
1404 0 : ) -> Result<Response<Body>, ApiError> {
1405 0 : let req: TenantShardSplitRequest = json_request(&mut request).await?;
1406 :
1407 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1408 0 : let state = get_state(&request);
1409 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
1410 :
1411 0 : let tenant = state
1412 0 : .tenant_manager
1413 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1414 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1415 :
1416 0 : let new_shards = state
1417 0 : .tenant_manager
1418 0 : .shard_split(
1419 0 : tenant,
1420 0 : ShardCount::new(req.new_shard_count),
1421 0 : req.new_stripe_size,
1422 0 : &ctx,
1423 0 : )
1424 0 : .await
1425 0 : .map_err(ApiError::InternalServerError)?;
1426 :
1427 0 : json_response(StatusCode::OK, TenantShardSplitResponse { new_shards })
1428 0 : }
1429 :
1430 0 : async fn layer_map_info_handler(
1431 0 : request: Request<Body>,
1432 0 : _cancel: CancellationToken,
1433 0 : ) -> Result<Response<Body>, ApiError> {
1434 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1435 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1436 0 : let reset: LayerAccessStatsReset =
1437 0 : parse_query_param(&request, "reset")?.unwrap_or(LayerAccessStatsReset::NoReset);
1438 0 : let state = get_state(&request);
1439 :
1440 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1441 :
1442 0 : let timeline =
1443 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1444 0 : .await?;
1445 0 : let layer_map_info = timeline
1446 0 : .layer_map_info(reset)
1447 0 : .await
1448 0 : .map_err(|_shutdown| ApiError::ShuttingDown)?;
1449 :
1450 0 : json_response(StatusCode::OK, layer_map_info)
1451 0 : }
1452 :
1453 : #[instrument(skip_all, fields(tenant_id, shard_id, timeline_id, layer_name))]
1454 : async fn timeline_layer_scan_disposable_keys(
1455 : request: Request<Body>,
1456 : cancel: CancellationToken,
1457 : ) -> Result<Response<Body>, ApiError> {
1458 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1459 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1460 : let layer_name: LayerName = parse_request_param(&request, "layer_name")?;
1461 :
1462 : tracing::Span::current().record(
1463 : "tenant_id",
1464 : tracing::field::display(&tenant_shard_id.tenant_id),
1465 : );
1466 : tracing::Span::current().record(
1467 : "shard_id",
1468 : tracing::field::display(tenant_shard_id.shard_slug()),
1469 : );
1470 : tracing::Span::current().record("timeline_id", tracing::field::display(&timeline_id));
1471 : tracing::Span::current().record("layer_name", tracing::field::display(&layer_name));
1472 :
1473 : let state = get_state(&request);
1474 :
1475 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1476 :
1477 : // technically the timeline need not be active for this scan to complete
1478 : let timeline =
1479 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1480 : .await?;
1481 :
1482 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1483 : .with_scope_timeline(&timeline);
1484 :
1485 : let guard = timeline
1486 : .layers
1487 : .read(LayerManagerLockHolder::GetLayerMapInfo)
1488 : .await;
1489 : let Some(layer) = guard.try_get_from_key(&layer_name.clone().into()) else {
1490 : return Err(ApiError::NotFound(
1491 : anyhow::anyhow!("Layer {tenant_shard_id}/{timeline_id}/{layer_name} not found").into(),
1492 : ));
1493 : };
1494 :
1495 : let resident_layer = layer
1496 : .download_and_keep_resident(&ctx)
1497 : .await
1498 0 : .map_err(|err| match err {
1499 : tenant::storage_layer::layer::DownloadError::TimelineShutdown
1500 : | tenant::storage_layer::layer::DownloadError::DownloadCancelled => {
1501 0 : ApiError::ShuttingDown
1502 : }
1503 : tenant::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
1504 : | tenant::storage_layer::layer::DownloadError::DownloadRequired
1505 : | tenant::storage_layer::layer::DownloadError::NotFile(_)
1506 : | tenant::storage_layer::layer::DownloadError::DownloadFailed
1507 : | tenant::storage_layer::layer::DownloadError::PreStatFailed(_) => {
1508 0 : ApiError::InternalServerError(err.into())
1509 : }
1510 : #[cfg(test)]
1511 : tenant::storage_layer::layer::DownloadError::Failpoint(_) => {
1512 0 : ApiError::InternalServerError(err.into())
1513 : }
1514 0 : })?;
1515 :
1516 : let keys = resident_layer
1517 : .load_keys(&ctx)
1518 : .await
1519 : .map_err(ApiError::InternalServerError)?;
1520 :
1521 : let shard_identity = timeline.get_shard_identity();
1522 :
1523 : let mut disposable_count = 0;
1524 : let mut not_disposable_count = 0;
1525 : let cancel = cancel.clone();
1526 : for (i, key) in keys.into_iter().enumerate() {
1527 : if shard_identity.is_key_disposable(&key) {
1528 : disposable_count += 1;
1529 : tracing::debug!(key = %key, key.dbg=?key, "disposable key");
1530 : } else {
1531 : not_disposable_count += 1;
1532 : }
1533 : #[allow(clippy::collapsible_if)]
1534 : if i % 10000 == 0 {
1535 : if cancel.is_cancelled() || timeline.cancel.is_cancelled() || timeline.is_stopping() {
1536 : return Err(ApiError::ShuttingDown);
1537 : }
1538 : }
1539 : }
1540 :
1541 : json_response(
1542 : StatusCode::OK,
1543 : pageserver_api::models::ScanDisposableKeysResponse {
1544 : disposable_count,
1545 : not_disposable_count,
1546 : },
1547 : )
1548 : }
1549 :
1550 0 : async fn timeline_download_heatmap_layers_handler(
1551 0 : request: Request<Body>,
1552 0 : _cancel: CancellationToken,
1553 0 : ) -> Result<Response<Body>, ApiError> {
1554 : // Only used in the case where remote storage is not configured.
1555 : const DEFAULT_MAX_CONCURRENCY: usize = 100;
1556 : // A conservative default.
1557 : const DEFAULT_CONCURRENCY: usize = 16;
1558 :
1559 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1560 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1561 :
1562 0 : let desired_concurrency =
1563 0 : parse_query_param(&request, "concurrency")?.unwrap_or(DEFAULT_CONCURRENCY);
1564 0 : let recurse = parse_query_param(&request, "recurse")?.unwrap_or(false);
1565 :
1566 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1567 :
1568 0 : let state = get_state(&request);
1569 0 : let timeline =
1570 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1571 0 : .await?;
1572 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1573 0 : .with_scope_timeline(&timeline);
1574 :
1575 0 : let max_concurrency = get_config(&request)
1576 0 : .remote_storage_config
1577 0 : .as_ref()
1578 0 : .map(|c| c.concurrency_limit())
1579 0 : .unwrap_or(DEFAULT_MAX_CONCURRENCY);
1580 0 : let concurrency = std::cmp::min(max_concurrency, desired_concurrency);
1581 :
1582 0 : timeline.start_heatmap_layers_download(concurrency, recurse, &ctx)?;
1583 :
1584 0 : json_response(StatusCode::ACCEPTED, ())
1585 0 : }
1586 :
1587 0 : async fn timeline_shutdown_download_heatmap_layers_handler(
1588 0 : request: Request<Body>,
1589 0 : _cancel: CancellationToken,
1590 0 : ) -> Result<Response<Body>, ApiError> {
1591 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1592 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1593 :
1594 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1595 :
1596 0 : let state = get_state(&request);
1597 0 : let timeline =
1598 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1599 0 : .await?;
1600 :
1601 0 : timeline.stop_and_drain_heatmap_layers_download().await;
1602 :
1603 0 : json_response(StatusCode::OK, ())
1604 0 : }
1605 :
1606 0 : async fn layer_download_handler(
1607 0 : request: Request<Body>,
1608 0 : _cancel: CancellationToken,
1609 0 : ) -> Result<Response<Body>, ApiError> {
1610 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1611 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1612 0 : let layer_file_name = get_request_param(&request, "layer_file_name")?;
1613 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1614 0 : let layer_name = LayerName::from_str(layer_file_name)
1615 0 : .map_err(|s| ApiError::BadRequest(anyhow::anyhow!(s)))?;
1616 0 : let state = get_state(&request);
1617 :
1618 0 : let timeline =
1619 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1620 0 : .await?;
1621 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1622 0 : .with_scope_timeline(&timeline);
1623 0 : let downloaded = timeline
1624 0 : .download_layer(&layer_name, &ctx)
1625 0 : .await
1626 0 : .map_err(|e| match e {
1627 : tenant::storage_layer::layer::DownloadError::TimelineShutdown
1628 : | tenant::storage_layer::layer::DownloadError::DownloadCancelled => {
1629 0 : ApiError::ShuttingDown
1630 : }
1631 0 : other => ApiError::InternalServerError(other.into()),
1632 0 : })?;
1633 :
1634 0 : match downloaded {
1635 0 : Some(true) => json_response(StatusCode::OK, ()),
1636 0 : Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
1637 0 : None => json_response(
1638 : StatusCode::BAD_REQUEST,
1639 0 : format!("Layer {tenant_shard_id}/{timeline_id}/{layer_file_name} not found"),
1640 : ),
1641 : }
1642 0 : }
1643 :
1644 0 : async fn evict_timeline_layer_handler(
1645 0 : request: Request<Body>,
1646 0 : _cancel: CancellationToken,
1647 0 : ) -> Result<Response<Body>, ApiError> {
1648 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1649 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1650 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1651 0 : let layer_file_name = get_request_param(&request, "layer_file_name")?;
1652 0 : let state = get_state(&request);
1653 :
1654 0 : let layer_name = LayerName::from_str(layer_file_name)
1655 0 : .map_err(|s| ApiError::BadRequest(anyhow::anyhow!(s)))?;
1656 :
1657 0 : let timeline =
1658 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1659 0 : .await?;
1660 0 : let evicted = timeline
1661 0 : .evict_layer(&layer_name)
1662 0 : .await
1663 0 : .map_err(ApiError::InternalServerError)?;
1664 :
1665 0 : match evicted {
1666 0 : Some(true) => json_response(StatusCode::OK, ()),
1667 0 : Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
1668 0 : None => json_response(
1669 : StatusCode::BAD_REQUEST,
1670 0 : format!("Layer {tenant_shard_id}/{timeline_id}/{layer_file_name} not found"),
1671 : ),
1672 : }
1673 0 : }
1674 :
1675 0 : async fn timeline_gc_blocking_handler(
1676 0 : request: Request<Body>,
1677 0 : _cancel: CancellationToken,
1678 0 : ) -> Result<Response<Body>, ApiError> {
1679 0 : block_or_unblock_gc(request, true).await
1680 0 : }
1681 :
1682 0 : async fn timeline_gc_unblocking_handler(
1683 0 : request: Request<Body>,
1684 0 : _cancel: CancellationToken,
1685 0 : ) -> Result<Response<Body>, ApiError> {
1686 0 : block_or_unblock_gc(request, false).await
1687 0 : }
1688 :
1689 : /// Traces GetPage@LSN requests for a timeline, and emits metadata in an efficient binary encoding.
1690 : /// Use the `pagectl page-trace` command to decode and analyze the output.
1691 0 : async fn timeline_page_trace_handler(
1692 0 : request: Request<Body>,
1693 0 : cancel: CancellationToken,
1694 0 : ) -> Result<Response<Body>, ApiError> {
1695 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1696 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1697 0 : let state = get_state(&request);
1698 0 : check_permission(&request, None)?;
1699 :
1700 0 : let size_limit: usize = parse_query_param(&request, "size_limit_bytes")?.unwrap_or(1024 * 1024);
1701 0 : let time_limit_secs: u64 = parse_query_param(&request, "time_limit_secs")?.unwrap_or(5);
1702 :
1703 : // Convert size limit to event limit based on the serialized size of an event. The event size is
1704 : // fixed, as the default bincode serializer uses fixed-width integer encoding.
1705 0 : let event_size = bincode::serialize(&PageTraceEvent::default())
1706 0 : .map_err(|err| ApiError::InternalServerError(err.into()))?
1707 0 : .len();
1708 0 : let event_limit = size_limit / event_size;
1709 :
1710 0 : let timeline =
1711 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1712 0 : .await?;
1713 :
1714 : // Install a page trace, unless one is already in progress. We just use a buffered channel,
1715 : // which may 2x the memory usage in the worst case, but it's still bounded.
1716 0 : let (trace_tx, mut trace_rx) = tokio::sync::mpsc::channel(event_limit);
1717 0 : let cur = timeline.page_trace.load();
1718 0 : let installed = cur.is_none()
1719 0 : && timeline
1720 0 : .page_trace
1721 0 : .compare_and_swap(cur, Some(Arc::new(trace_tx)))
1722 0 : .is_none();
1723 0 : if !installed {
1724 0 : return Err(ApiError::Conflict("page trace already active".to_string()));
1725 0 : }
1726 0 : defer!(timeline.page_trace.store(None)); // uninstall on return
1727 :
1728 : // Collect the trace and return it to the client. We could stream the response, but this is
1729 : // simple and fine.
1730 0 : let mut body = Vec::with_capacity(size_limit);
1731 0 : let deadline = Instant::now() + Duration::from_secs(time_limit_secs);
1732 :
1733 0 : while body.len() < size_limit {
1734 0 : tokio::select! {
1735 0 : event = trace_rx.recv() => {
1736 0 : let Some(event) = event else {
1737 0 : break; // shouldn't happen (sender doesn't close, unless timeline dropped)
1738 : };
1739 0 : bincode::serialize_into(&mut body, &event)
1740 0 : .map_err(|err| ApiError::InternalServerError(err.into()))?;
1741 : }
1742 0 : _ = tokio::time::sleep_until(deadline) => break, // time limit reached
1743 0 : _ = cancel.cancelled() => return Err(ApiError::Cancelled),
1744 : }
1745 : }
1746 :
1747 0 : Ok(Response::builder()
1748 0 : .status(StatusCode::OK)
1749 0 : .header(header::CONTENT_TYPE, "application/octet-stream")
1750 0 : .body(hyper::Body::from(body))
1751 0 : .unwrap())
1752 0 : }
1753 :
1754 : /// Adding a block is `POST ../block_gc`, removing a block is `POST ../unblock_gc`.
1755 : ///
1756 : /// Both are technically unsafe because they might fire off index uploads, thus they are POST.
1757 0 : async fn block_or_unblock_gc(
1758 0 : request: Request<Body>,
1759 0 : block: bool,
1760 0 : ) -> Result<Response<Body>, ApiError> {
1761 : use crate::tenant::remote_timeline_client::WaitCompletionError;
1762 : use crate::tenant::upload_queue::NotInitialized;
1763 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1764 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1765 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1766 0 : let state = get_state(&request);
1767 :
1768 0 : let tenant = state
1769 0 : .tenant_manager
1770 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1771 :
1772 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1773 :
1774 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
1775 :
1776 0 : let fut = async {
1777 0 : if block {
1778 0 : timeline.block_gc(&tenant).await.map(|_| ())
1779 : } else {
1780 0 : timeline.unblock_gc(&tenant).await
1781 : }
1782 0 : };
1783 :
1784 0 : let span = tracing::info_span!(
1785 : "block_or_unblock_gc",
1786 : tenant_id = %tenant_shard_id.tenant_id,
1787 0 : shard_id = %tenant_shard_id.shard_slug(),
1788 : timeline_id = %timeline_id,
1789 : block = block,
1790 : );
1791 :
1792 0 : let res = fut.instrument(span).await;
1793 :
1794 0 : res.map_err(|e| {
1795 0 : if e.is::<NotInitialized>() || e.is::<WaitCompletionError>() {
1796 0 : ApiError::ShuttingDown
1797 : } else {
1798 0 : ApiError::InternalServerError(e)
1799 : }
1800 0 : })?;
1801 :
1802 0 : json_response(StatusCode::OK, ())
1803 0 : }
1804 :
1805 : /// Get tenant_size SVG graph along with the JSON data.
1806 0 : fn synthetic_size_html_response(
1807 0 : inputs: ModelInputs,
1808 0 : storage_model: StorageModel,
1809 0 : sizes: SizeResult,
1810 0 : ) -> Result<Response<Body>, ApiError> {
1811 0 : let mut timeline_ids: Vec<String> = Vec::new();
1812 0 : let mut timeline_map: HashMap<TimelineId, usize> = HashMap::new();
1813 0 : for (index, ti) in inputs.timeline_inputs.iter().enumerate() {
1814 0 : timeline_map.insert(ti.timeline_id, index);
1815 0 : timeline_ids.push(ti.timeline_id.to_string());
1816 0 : }
1817 0 : let seg_to_branch: Vec<(usize, SvgBranchKind)> = inputs
1818 0 : .segments
1819 0 : .iter()
1820 0 : .map(|seg| {
1821 0 : (
1822 0 : *timeline_map.get(&seg.timeline_id).unwrap(),
1823 0 : seg.kind.into(),
1824 0 : )
1825 0 : })
1826 0 : .collect();
1827 :
1828 0 : let svg =
1829 0 : tenant_size_model::svg::draw_svg(&storage_model, &timeline_ids, &seg_to_branch, &sizes)
1830 0 : .map_err(ApiError::InternalServerError)?;
1831 :
1832 0 : let mut response = String::new();
1833 :
1834 : use std::fmt::Write;
1835 0 : write!(response, "<html>\n<body>\n").unwrap();
1836 0 : write!(response, "<div>\n{svg}\n</div>").unwrap();
1837 0 : writeln!(response, "Project size: {}", sizes.total_size).unwrap();
1838 0 : writeln!(response, "<pre>").unwrap();
1839 0 : writeln!(
1840 0 : response,
1841 0 : "{}",
1842 0 : serde_json::to_string_pretty(&inputs).unwrap()
1843 : )
1844 0 : .unwrap();
1845 0 : writeln!(
1846 0 : response,
1847 0 : "{}",
1848 0 : serde_json::to_string_pretty(&sizes.segments).unwrap()
1849 : )
1850 0 : .unwrap();
1851 0 : writeln!(response, "</pre>").unwrap();
1852 0 : write!(response, "</body>\n</html>\n").unwrap();
1853 :
1854 0 : html_response(StatusCode::OK, response)
1855 0 : }
1856 :
1857 0 : pub fn html_response(status: StatusCode, data: String) -> Result<Response<Body>, ApiError> {
1858 0 : let response = Response::builder()
1859 0 : .status(status)
1860 0 : .header(header::CONTENT_TYPE, "text/html")
1861 0 : .body(Body::from(data.as_bytes().to_vec()))
1862 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
1863 0 : Ok(response)
1864 0 : }
1865 :
1866 0 : async fn get_tenant_config_handler(
1867 0 : request: Request<Body>,
1868 0 : _cancel: CancellationToken,
1869 0 : ) -> Result<Response<Body>, ApiError> {
1870 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1871 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1872 0 : let state = get_state(&request);
1873 :
1874 0 : let tenant = state
1875 0 : .tenant_manager
1876 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1877 :
1878 0 : let response = HashMap::from([
1879 : (
1880 : "tenant_specific_overrides",
1881 0 : serde_json::to_value(tenant.tenant_specific_overrides())
1882 0 : .context("serializing tenant specific overrides")
1883 0 : .map_err(ApiError::InternalServerError)?,
1884 : ),
1885 : (
1886 0 : "effective_config",
1887 0 : serde_json::to_value(tenant.effective_config())
1888 0 : .context("serializing effective config")
1889 0 : .map_err(ApiError::InternalServerError)?,
1890 : ),
1891 : ]);
1892 :
1893 0 : json_response(StatusCode::OK, response)
1894 0 : }
1895 :
1896 0 : async fn update_tenant_config_handler(
1897 0 : mut request: Request<Body>,
1898 0 : _cancel: CancellationToken,
1899 0 : ) -> Result<Response<Body>, ApiError> {
1900 0 : let request_data: TenantConfigRequest = json_request(&mut request).await?;
1901 0 : let tenant_id = request_data.tenant_id;
1902 0 : check_permission(&request, Some(tenant_id))?;
1903 :
1904 0 : let new_tenant_conf = request_data.config;
1905 :
1906 0 : let state = get_state(&request);
1907 :
1908 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
1909 :
1910 0 : let tenant = state
1911 0 : .tenant_manager
1912 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1913 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1914 :
1915 : // This is a legacy API that only operates on attached tenants: the preferred
1916 : // API to use is the location_config/ endpoint, which lets the caller provide
1917 : // the full LocationConf.
1918 0 : let location_conf = LocationConf::attached_single(
1919 0 : new_tenant_conf.clone(),
1920 0 : tenant.get_generation(),
1921 0 : ShardParameters::from(tenant.get_shard_identity()),
1922 : );
1923 :
1924 0 : tenant
1925 0 : .get_shard_identity()
1926 0 : .assert_equal(location_conf.shard); // not strictly necessary since we construct it above
1927 :
1928 0 : crate::tenant::TenantShard::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
1929 0 : .await
1930 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
1931 :
1932 0 : let _ = tenant
1933 0 : .update_tenant_config(|_crnt| Ok(new_tenant_conf.clone()))
1934 0 : .expect("Closure returns Ok()");
1935 :
1936 0 : json_response(StatusCode::OK, ())
1937 0 : }
1938 :
1939 0 : async fn patch_tenant_config_handler(
1940 0 : mut request: Request<Body>,
1941 0 : _cancel: CancellationToken,
1942 0 : ) -> Result<Response<Body>, ApiError> {
1943 0 : let request_data: TenantConfigPatchRequest = json_request(&mut request).await?;
1944 0 : let tenant_id = request_data.tenant_id;
1945 0 : check_permission(&request, Some(tenant_id))?;
1946 :
1947 0 : let state = get_state(&request);
1948 :
1949 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
1950 :
1951 0 : let tenant = state
1952 0 : .tenant_manager
1953 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1954 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1955 :
1956 0 : let updated = tenant
1957 0 : .update_tenant_config(|crnt| {
1958 0 : crnt.apply_patch(request_data.config.clone())
1959 0 : .map_err(anyhow::Error::new)
1960 0 : })
1961 0 : .map_err(ApiError::BadRequest)?;
1962 :
1963 : // This is a legacy API that only operates on attached tenants: the preferred
1964 : // API to use is the location_config/ endpoint, which lets the caller provide
1965 : // the full LocationConf.
1966 0 : let location_conf = LocationConf::attached_single(
1967 0 : updated,
1968 0 : tenant.get_generation(),
1969 0 : ShardParameters::from(tenant.get_shard_identity()),
1970 : );
1971 :
1972 0 : tenant
1973 0 : .get_shard_identity()
1974 0 : .assert_equal(location_conf.shard); // not strictly necessary since we construct it above
1975 :
1976 0 : crate::tenant::TenantShard::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
1977 0 : .await
1978 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
1979 :
1980 0 : json_response(StatusCode::OK, ())
1981 0 : }
1982 :
1983 0 : async fn put_tenant_location_config_handler(
1984 0 : mut request: Request<Body>,
1985 0 : _cancel: CancellationToken,
1986 0 : ) -> Result<Response<Body>, ApiError> {
1987 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1988 :
1989 0 : let request_data: TenantLocationConfigRequest = json_request(&mut request).await?;
1990 0 : let flush = parse_query_param(&request, "flush_ms")?.map(Duration::from_millis);
1991 0 : let lazy = parse_query_param(&request, "lazy")?.unwrap_or(false);
1992 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1993 :
1994 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
1995 0 : let state = get_state(&request);
1996 0 : let conf = state.conf;
1997 :
1998 : // The `Detached` state is special, it doesn't upsert a tenant, it removes
1999 : // its local disk content and drops it from memory.
2000 0 : if let LocationConfigMode::Detached = request_data.config.mode {
2001 0 : if let Err(e) = state
2002 0 : .tenant_manager
2003 0 : .detach_tenant(conf, tenant_shard_id, &state.deletion_queue_client)
2004 0 : .instrument(info_span!("tenant_detach",
2005 : tenant_id = %tenant_shard_id.tenant_id,
2006 0 : shard_id = %tenant_shard_id.shard_slug()
2007 : ))
2008 0 : .await
2009 : {
2010 0 : match e {
2011 0 : TenantStateError::SlotError(TenantSlotError::NotFound(_)) => {
2012 0 : // This API is idempotent: a NotFound on a detach is fine.
2013 0 : }
2014 0 : _ => return Err(e.into()),
2015 : }
2016 0 : }
2017 0 : return json_response(StatusCode::OK, ());
2018 0 : }
2019 :
2020 0 : let location_conf =
2021 0 : LocationConf::try_from(&request_data.config).map_err(ApiError::BadRequest)?;
2022 :
2023 : // lazy==true queues up for activation or jumps the queue like normal when a compute connects,
2024 : // similar to at startup ordering.
2025 0 : let spawn_mode = if lazy {
2026 0 : tenant::SpawnMode::Lazy
2027 : } else {
2028 0 : tenant::SpawnMode::Eager
2029 : };
2030 :
2031 0 : let tenant = state
2032 0 : .tenant_manager
2033 0 : .upsert_location(tenant_shard_id, location_conf, flush, spawn_mode, &ctx)
2034 0 : .await?;
2035 0 : let stripe_size = tenant.as_ref().map(|t| t.get_shard_stripe_size());
2036 0 : let attached = tenant.is_some();
2037 :
2038 0 : if let Some(_flush_ms) = flush {
2039 0 : match state
2040 0 : .secondary_controller
2041 0 : .upload_tenant(tenant_shard_id)
2042 0 : .await
2043 : {
2044 : Ok(()) => {
2045 0 : tracing::info!("Uploaded heatmap during flush");
2046 : }
2047 0 : Err(e) => {
2048 0 : tracing::warn!("Failed to flush heatmap: {e}");
2049 : }
2050 : }
2051 : } else {
2052 0 : tracing::info!("No flush requested when configuring");
2053 : }
2054 :
2055 : // This API returns a vector of pageservers where the tenant is attached: this is
2056 : // primarily for use in the sharding service. For compatibilty, we also return this
2057 : // when called directly on a pageserver, but the payload is always zero or one shards.
2058 0 : let mut response = TenantLocationConfigResponse {
2059 0 : shards: Vec::new(),
2060 0 : stripe_size: None,
2061 0 : };
2062 0 : if attached {
2063 0 : response.shards.push(TenantShardLocation {
2064 0 : shard_id: tenant_shard_id,
2065 0 : node_id: state.conf.id,
2066 0 : });
2067 0 : if tenant_shard_id.shard_count.count() > 1 {
2068 : // Stripe size should be set if we are attached
2069 0 : debug_assert!(stripe_size.is_some());
2070 0 : response.stripe_size = stripe_size;
2071 0 : }
2072 0 : }
2073 :
2074 0 : json_response(StatusCode::OK, response)
2075 0 : }
2076 :
2077 0 : async fn list_location_config_handler(
2078 0 : request: Request<Body>,
2079 0 : _cancel: CancellationToken,
2080 0 : ) -> Result<Response<Body>, ApiError> {
2081 0 : let state = get_state(&request);
2082 0 : let slots = state.tenant_manager.list();
2083 0 : let result = LocationConfigListResponse {
2084 0 : tenant_shards: slots
2085 0 : .into_iter()
2086 0 : .map(|(tenant_shard_id, slot)| {
2087 0 : let v = match slot {
2088 0 : TenantSlot::Attached(t) => Some(t.get_location_conf()),
2089 0 : TenantSlot::Secondary(s) => Some(s.get_location_conf()),
2090 0 : TenantSlot::InProgress(_) => None,
2091 : };
2092 0 : (tenant_shard_id, v)
2093 0 : })
2094 0 : .collect(),
2095 : };
2096 0 : json_response(StatusCode::OK, result)
2097 0 : }
2098 :
2099 0 : async fn get_location_config_handler(
2100 0 : request: Request<Body>,
2101 0 : _cancel: CancellationToken,
2102 0 : ) -> Result<Response<Body>, ApiError> {
2103 0 : let state = get_state(&request);
2104 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2105 0 : let slot = state.tenant_manager.get(tenant_shard_id);
2106 :
2107 0 : let Some(slot) = slot else {
2108 0 : return Err(ApiError::NotFound(
2109 0 : anyhow::anyhow!("Tenant shard not found").into(),
2110 0 : ));
2111 : };
2112 :
2113 0 : let result: Option<LocationConfig> = match slot {
2114 0 : TenantSlot::Attached(t) => Some(t.get_location_conf()),
2115 0 : TenantSlot::Secondary(s) => Some(s.get_location_conf()),
2116 0 : TenantSlot::InProgress(_) => None,
2117 : };
2118 :
2119 0 : json_response(StatusCode::OK, result)
2120 0 : }
2121 :
2122 : // Do a time travel recovery on the given tenant/tenant shard. Tenant needs to be detached
2123 : // (from all pageservers) as it invalidates consistency assumptions.
2124 0 : async fn tenant_time_travel_remote_storage_handler(
2125 0 : request: Request<Body>,
2126 0 : cancel: CancellationToken,
2127 0 : ) -> Result<Response<Body>, ApiError> {
2128 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2129 :
2130 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2131 :
2132 0 : let timestamp_raw = must_get_query_param(&request, "travel_to")?;
2133 0 : let timestamp = humantime::parse_rfc3339(×tamp_raw)
2134 0 : .with_context(|| format!("Invalid time for travel_to: {timestamp_raw:?}"))
2135 0 : .map_err(ApiError::BadRequest)?;
2136 :
2137 0 : let done_if_after_raw = must_get_query_param(&request, "done_if_after")?;
2138 0 : let done_if_after = humantime::parse_rfc3339(&done_if_after_raw)
2139 0 : .with_context(|| format!("Invalid time for done_if_after: {done_if_after_raw:?}"))
2140 0 : .map_err(ApiError::BadRequest)?;
2141 :
2142 : // This is just a sanity check to fend off naive wrong usages of the API:
2143 : // the tenant needs to be detached *everywhere*
2144 0 : let state = get_state(&request);
2145 0 : let we_manage_tenant = state.tenant_manager.manages_tenant_shard(tenant_shard_id);
2146 0 : if we_manage_tenant {
2147 0 : return Err(ApiError::BadRequest(anyhow!(
2148 0 : "Tenant {tenant_shard_id} is already attached at this pageserver"
2149 0 : )));
2150 0 : }
2151 :
2152 0 : if timestamp > done_if_after {
2153 0 : return Err(ApiError::BadRequest(anyhow!(
2154 0 : "The done_if_after timestamp comes before the timestamp to recover to"
2155 0 : )));
2156 0 : }
2157 :
2158 0 : tracing::info!(
2159 0 : "Issuing time travel request internally. timestamp={timestamp_raw}, done_if_after={done_if_after_raw}"
2160 : );
2161 :
2162 0 : remote_timeline_client::upload::time_travel_recover_tenant(
2163 0 : &state.remote_storage,
2164 0 : &tenant_shard_id,
2165 0 : timestamp,
2166 0 : done_if_after,
2167 0 : &cancel,
2168 0 : )
2169 0 : .await
2170 0 : .map_err(|e| match e {
2171 0 : TimeTravelError::BadInput(e) => {
2172 0 : warn!("bad input error: {e}");
2173 0 : ApiError::BadRequest(anyhow!("bad input error"))
2174 : }
2175 : TimeTravelError::Unimplemented => {
2176 0 : ApiError::BadRequest(anyhow!("unimplemented for the configured remote storage"))
2177 : }
2178 0 : TimeTravelError::Cancelled => ApiError::InternalServerError(anyhow!("cancelled")),
2179 : TimeTravelError::TooManyVersions => {
2180 0 : ApiError::InternalServerError(anyhow!("too many versions in remote storage"))
2181 : }
2182 0 : TimeTravelError::Other(e) => {
2183 0 : warn!("internal error: {e}");
2184 0 : ApiError::InternalServerError(anyhow!("internal error"))
2185 : }
2186 0 : })?;
2187 :
2188 0 : json_response(StatusCode::OK, ())
2189 0 : }
2190 :
2191 : /// Testing helper to transition a tenant to [`crate::tenant::TenantState::Broken`].
2192 0 : async fn handle_tenant_break(
2193 0 : r: Request<Body>,
2194 0 : _cancel: CancellationToken,
2195 0 : ) -> Result<Response<Body>, ApiError> {
2196 0 : let tenant_shard_id: TenantShardId = parse_request_param(&r, "tenant_shard_id")?;
2197 :
2198 0 : let state = get_state(&r);
2199 0 : state
2200 0 : .tenant_manager
2201 0 : .get_attached_tenant_shard(tenant_shard_id)?
2202 0 : .set_broken("broken from test".to_owned())
2203 0 : .await;
2204 :
2205 0 : json_response(StatusCode::OK, ())
2206 0 : }
2207 :
2208 : // Obtains an lsn lease on the given timeline.
2209 0 : async fn lsn_lease_handler(
2210 0 : mut request: Request<Body>,
2211 0 : _cancel: CancellationToken,
2212 0 : ) -> Result<Response<Body>, ApiError> {
2213 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2214 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2215 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2216 0 : let lsn = json_request::<LsnLeaseRequest>(&mut request).await?.lsn;
2217 :
2218 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
2219 :
2220 0 : let state = get_state(&request);
2221 :
2222 0 : let timeline =
2223 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2224 0 : .await?;
2225 :
2226 0 : let result = async {
2227 0 : timeline
2228 0 : .init_lsn_lease(lsn, timeline.get_lsn_lease_length(), &ctx)
2229 0 : .map_err(|e| {
2230 0 : ApiError::InternalServerError(
2231 0 : e.context(format!("invalid lsn lease request at {lsn}")),
2232 0 : )
2233 0 : })
2234 0 : }
2235 0 : .instrument(info_span!("init_lsn_lease", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2236 0 : .await?;
2237 :
2238 0 : json_response(StatusCode::OK, result)
2239 0 : }
2240 :
2241 : // Run GC immediately on given timeline.
2242 0 : async fn timeline_gc_handler(
2243 0 : mut request: Request<Body>,
2244 0 : cancel: CancellationToken,
2245 0 : ) -> Result<Response<Body>, ApiError> {
2246 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2247 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2248 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2249 :
2250 0 : let gc_req: TimelineGcRequest = json_request(&mut request).await?;
2251 :
2252 0 : let state = get_state(&request);
2253 :
2254 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
2255 0 : let gc_result = state
2256 0 : .tenant_manager
2257 0 : .immediate_gc(tenant_shard_id, timeline_id, gc_req, cancel, &ctx)
2258 0 : .await?;
2259 :
2260 0 : json_response(StatusCode::OK, gc_result)
2261 0 : }
2262 :
2263 : // Cancel scheduled compaction tasks
2264 0 : async fn timeline_cancel_compact_handler(
2265 0 : request: Request<Body>,
2266 0 : _cancel: CancellationToken,
2267 0 : ) -> Result<Response<Body>, ApiError> {
2268 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2269 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2270 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2271 0 : let state = get_state(&request);
2272 0 : async {
2273 0 : let tenant = state
2274 0 : .tenant_manager
2275 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2276 0 : tenant.cancel_scheduled_compaction(timeline_id);
2277 0 : json_response(StatusCode::OK, ())
2278 0 : }
2279 0 : .instrument(info_span!("timeline_cancel_compact", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2280 0 : .await
2281 0 : }
2282 :
2283 : // Get compact info of a timeline
2284 0 : async fn timeline_compact_info_handler(
2285 0 : request: Request<Body>,
2286 0 : _cancel: CancellationToken,
2287 0 : ) -> Result<Response<Body>, ApiError> {
2288 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2289 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2290 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2291 0 : let state = get_state(&request);
2292 0 : async {
2293 0 : let tenant = state
2294 0 : .tenant_manager
2295 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2296 0 : let resp = tenant.get_scheduled_compaction_tasks(timeline_id);
2297 0 : json_response(StatusCode::OK, resp)
2298 0 : }
2299 0 : .instrument(info_span!("timeline_compact_info", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2300 0 : .await
2301 0 : }
2302 :
2303 : // Run compaction immediately on given timeline.
2304 0 : async fn timeline_compact_handler(
2305 0 : mut request: Request<Body>,
2306 0 : cancel: CancellationToken,
2307 0 : ) -> Result<Response<Body>, ApiError> {
2308 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2309 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2310 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2311 :
2312 0 : let compact_request = json_request_maybe::<Option<CompactRequest>>(&mut request).await?;
2313 :
2314 0 : let state = get_state(&request);
2315 :
2316 0 : let mut flags = EnumSet::empty();
2317 :
2318 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_l0_compaction")? {
2319 0 : flags |= CompactFlags::ForceL0Compaction;
2320 0 : }
2321 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_repartition")? {
2322 0 : flags |= CompactFlags::ForceRepartition;
2323 0 : }
2324 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? {
2325 0 : flags |= CompactFlags::ForceImageLayerCreation;
2326 0 : }
2327 0 : if Some(true) == parse_query_param::<_, bool>(&request, "enhanced_gc_bottom_most_compaction")? {
2328 0 : flags |= CompactFlags::EnhancedGcBottomMostCompaction;
2329 0 : }
2330 0 : if Some(true) == parse_query_param::<_, bool>(&request, "dry_run")? {
2331 0 : flags |= CompactFlags::DryRun;
2332 0 : }
2333 : // Manual compaction does not yield for L0.
2334 :
2335 0 : let wait_until_uploaded =
2336 0 : parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false);
2337 :
2338 0 : let wait_until_scheduled_compaction_done =
2339 0 : parse_query_param::<_, bool>(&request, "wait_until_scheduled_compaction_done")?
2340 0 : .unwrap_or(false);
2341 :
2342 0 : let sub_compaction = compact_request
2343 0 : .as_ref()
2344 0 : .map(|r| r.sub_compaction)
2345 0 : .unwrap_or(false);
2346 0 : let sub_compaction_max_job_size_mb = compact_request
2347 0 : .as_ref()
2348 0 : .and_then(|r| r.sub_compaction_max_job_size_mb);
2349 :
2350 0 : let options = CompactOptions {
2351 0 : compact_key_range: compact_request
2352 0 : .as_ref()
2353 0 : .and_then(|r| r.compact_key_range.clone()),
2354 0 : compact_lsn_range: compact_request
2355 0 : .as_ref()
2356 0 : .and_then(|r| r.compact_lsn_range.clone()),
2357 0 : flags,
2358 0 : sub_compaction,
2359 0 : sub_compaction_max_job_size_mb,
2360 : };
2361 :
2362 0 : let scheduled = compact_request
2363 0 : .as_ref()
2364 0 : .map(|r| r.scheduled)
2365 0 : .unwrap_or(false);
2366 :
2367 0 : async {
2368 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2369 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
2370 0 : if scheduled {
2371 0 : let tenant = state
2372 0 : .tenant_manager
2373 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2374 0 : let rx = tenant.schedule_compaction(timeline_id, options).await.map_err(ApiError::InternalServerError)?;
2375 0 : if wait_until_scheduled_compaction_done {
2376 : // It is possible that this will take a long time, dropping the HTTP request will not cancel the compaction.
2377 0 : rx.await.ok();
2378 0 : }
2379 : } else {
2380 0 : timeline
2381 0 : .compact_with_options(&cancel, options, &ctx)
2382 0 : .await
2383 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
2384 0 : if wait_until_uploaded {
2385 0 : timeline.remote_client.wait_completion().await
2386 : // XXX map to correct ApiError for the cases where it's due to shutdown
2387 0 : .context("wait completion").map_err(ApiError::InternalServerError)?;
2388 0 : }
2389 : }
2390 0 : json_response(StatusCode::OK, ())
2391 0 : }
2392 0 : .instrument(info_span!("manual_compaction", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2393 0 : .await
2394 0 : }
2395 :
2396 0 : async fn timeline_mark_invisible_handler(
2397 0 : mut request: Request<Body>,
2398 0 : _cancel: CancellationToken,
2399 0 : ) -> Result<Response<Body>, ApiError> {
2400 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2401 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2402 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2403 :
2404 0 : let compact_request = json_request_maybe::<Option<MarkInvisibleRequest>>(&mut request).await?;
2405 :
2406 0 : let state = get_state(&request);
2407 :
2408 0 : let visibility = match compact_request {
2409 0 : Some(req) => match req.is_visible {
2410 0 : Some(true) => TimelineVisibilityState::Visible,
2411 0 : Some(false) | None => TimelineVisibilityState::Invisible,
2412 : },
2413 0 : None => TimelineVisibilityState::Invisible,
2414 : };
2415 :
2416 0 : async {
2417 0 : let tenant = state
2418 0 : .tenant_manager
2419 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2420 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
2421 0 : timeline.remote_client.schedule_index_upload_for_timeline_invisible_state(visibility).map_err(ApiError::InternalServerError)?;
2422 0 : json_response(StatusCode::OK, ())
2423 0 : }
2424 0 : .instrument(info_span!("manual_timeline_mark_invisible", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2425 0 : .await
2426 0 : }
2427 :
2428 : // Run offload immediately on given timeline.
2429 0 : async fn timeline_offload_handler(
2430 0 : request: Request<Body>,
2431 0 : _cancel: CancellationToken,
2432 0 : ) -> Result<Response<Body>, ApiError> {
2433 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2434 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2435 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2436 :
2437 0 : let state = get_state(&request);
2438 :
2439 0 : async {
2440 0 : let tenant = state
2441 0 : .tenant_manager
2442 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2443 :
2444 0 : if tenant.get_offloaded_timeline(timeline_id).is_ok() {
2445 0 : return json_response(StatusCode::OK, ());
2446 0 : }
2447 0 : let timeline =
2448 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2449 0 : .await?;
2450 :
2451 0 : if !tenant.timeline_has_no_attached_children(timeline_id) {
2452 0 : return Err(ApiError::PreconditionFailed(
2453 0 : "timeline has attached children".into(),
2454 0 : ));
2455 0 : }
2456 0 : if let (false, reason) = timeline.can_offload() {
2457 0 : return Err(ApiError::PreconditionFailed(
2458 0 : format!("Timeline::can_offload() check failed: {reason}") .into(),
2459 0 : ));
2460 0 : }
2461 0 : offload_timeline(&tenant, &timeline)
2462 0 : .await
2463 0 : .map_err(|e| {
2464 0 : match e {
2465 0 : OffloadError::Cancelled => ApiError::ResourceUnavailable("Timeline shutting down".into()),
2466 0 : OffloadError::AlreadyInProgress => ApiError::Conflict("Timeline already being offloaded or deleted".into()),
2467 0 : _ => ApiError::InternalServerError(anyhow!(e))
2468 : }
2469 0 : })?;
2470 :
2471 0 : json_response(StatusCode::OK, ())
2472 0 : }
2473 0 : .instrument(info_span!("manual_timeline_offload", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2474 0 : .await
2475 0 : }
2476 :
2477 : // Run checkpoint immediately on given timeline.
2478 0 : async fn timeline_checkpoint_handler(
2479 0 : request: Request<Body>,
2480 0 : cancel: CancellationToken,
2481 0 : ) -> Result<Response<Body>, ApiError> {
2482 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2483 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2484 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2485 :
2486 0 : let state = get_state(&request);
2487 :
2488 0 : let mut flags = EnumSet::empty();
2489 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_l0_compaction")? {
2490 0 : flags |= CompactFlags::ForceL0Compaction;
2491 0 : }
2492 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_repartition")? {
2493 0 : flags |= CompactFlags::ForceRepartition;
2494 0 : }
2495 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? {
2496 0 : flags |= CompactFlags::ForceImageLayerCreation;
2497 0 : }
2498 :
2499 : // By default, checkpoints come with a compaction, but this may be optionally disabled by tests that just want to flush + upload.
2500 0 : let compact = parse_query_param::<_, bool>(&request, "compact")?.unwrap_or(true);
2501 :
2502 0 : let wait_until_flushed: bool =
2503 0 : parse_query_param(&request, "wait_until_flushed")?.unwrap_or(true);
2504 :
2505 0 : let wait_until_uploaded =
2506 0 : parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false);
2507 :
2508 0 : async {
2509 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2510 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
2511 0 : if wait_until_flushed {
2512 0 : timeline.freeze_and_flush().await
2513 : } else {
2514 0 : timeline.freeze().await.and(Ok(()))
2515 0 : }.map_err(|e| {
2516 0 : match e {
2517 0 : tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown,
2518 0 : other => ApiError::InternalServerError(other.into()),
2519 :
2520 : }
2521 0 : })?;
2522 0 : if compact {
2523 0 : timeline
2524 0 : .compact(&cancel, flags, &ctx)
2525 0 : .await
2526 0 : .map_err(|e|
2527 0 : if e.is_cancel() {
2528 0 : ApiError::ShuttingDown
2529 : } else {
2530 0 : ApiError::InternalServerError(e.into_anyhow())
2531 0 : }
2532 0 : )?;
2533 0 : }
2534 :
2535 0 : if wait_until_uploaded {
2536 0 : tracing::info!("Waiting for uploads to complete...");
2537 0 : timeline.remote_client.wait_completion().await
2538 : // XXX map to correct ApiError for the cases where it's due to shutdown
2539 0 : .context("wait completion").map_err(ApiError::InternalServerError)?;
2540 0 : tracing::info!("Uploads completed up to {}", timeline.get_remote_consistent_lsn_projected().unwrap_or(Lsn(0)));
2541 0 : }
2542 :
2543 0 : json_response(StatusCode::OK, ())
2544 0 : }
2545 0 : .instrument(info_span!("manual_checkpoint", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2546 0 : .await
2547 0 : }
2548 :
2549 0 : async fn timeline_download_remote_layers_handler_post(
2550 0 : mut request: Request<Body>,
2551 0 : _cancel: CancellationToken,
2552 0 : ) -> Result<Response<Body>, ApiError> {
2553 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2554 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2555 0 : let body: DownloadRemoteLayersTaskSpawnRequest = json_request(&mut request).await?;
2556 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2557 :
2558 0 : let state = get_state(&request);
2559 :
2560 0 : let timeline =
2561 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2562 0 : .await?;
2563 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
2564 0 : .with_scope_timeline(&timeline);
2565 0 : match timeline.spawn_download_all_remote_layers(body, &ctx).await {
2566 0 : Ok(st) => json_response(StatusCode::ACCEPTED, st),
2567 0 : Err(st) => json_response(StatusCode::CONFLICT, st),
2568 : }
2569 0 : }
2570 :
2571 0 : async fn timeline_download_remote_layers_handler_get(
2572 0 : request: Request<Body>,
2573 0 : _cancel: CancellationToken,
2574 0 : ) -> Result<Response<Body>, ApiError> {
2575 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2576 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2577 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2578 0 : let state = get_state(&request);
2579 :
2580 0 : let timeline =
2581 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2582 0 : .await?;
2583 0 : let info = timeline
2584 0 : .get_download_all_remote_layers_task_info()
2585 0 : .context("task never started since last pageserver process start")
2586 0 : .map_err(|e| ApiError::NotFound(e.into()))?;
2587 0 : json_response(StatusCode::OK, info)
2588 0 : }
2589 :
2590 0 : async fn timeline_detach_ancestor_handler(
2591 0 : request: Request<Body>,
2592 0 : _cancel: CancellationToken,
2593 0 : ) -> Result<Response<Body>, ApiError> {
2594 : use pageserver_api::models::detach_ancestor::AncestorDetached;
2595 :
2596 : use crate::tenant::timeline::detach_ancestor;
2597 :
2598 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2599 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2600 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2601 0 : let behavior: Option<DetachBehavior> = parse_query_param(&request, "detach_behavior")?;
2602 :
2603 0 : let behavior = behavior.unwrap_or_default();
2604 :
2605 0 : let span = tracing::info_span!("detach_ancestor", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id);
2606 :
2607 0 : async move {
2608 0 : let mut options = detach_ancestor::Options::default();
2609 :
2610 0 : let rewrite_concurrency =
2611 0 : parse_query_param::<_, std::num::NonZeroUsize>(&request, "rewrite_concurrency")?;
2612 0 : let copy_concurrency =
2613 0 : parse_query_param::<_, std::num::NonZeroUsize>(&request, "copy_concurrency")?;
2614 :
2615 0 : [
2616 0 : (&mut options.rewrite_concurrency, rewrite_concurrency),
2617 0 : (&mut options.copy_concurrency, copy_concurrency),
2618 0 : ]
2619 0 : .into_iter()
2620 0 : .filter_map(|(target, val)| val.map(|val| (target, val)))
2621 0 : .for_each(|(target, val)| *target = val);
2622 :
2623 0 : let state = get_state(&request);
2624 :
2625 0 : let tenant = state
2626 0 : .tenant_manager
2627 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2628 :
2629 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
2630 :
2631 0 : let ctx = RequestContext::new(TaskKind::DetachAncestor, DownloadBehavior::Download);
2632 0 : let ctx = &ctx;
2633 :
2634 : // Flush the upload queues of all timelines before detaching ancestor. We do the same thing again
2635 : // during shutdown. This early upload ensures the pageserver does not need to upload too many
2636 : // things and creates downtime during timeline reloads.
2637 0 : for timeline in tenant.list_timelines() {
2638 0 : timeline
2639 0 : .remote_client
2640 0 : .wait_completion()
2641 0 : .await
2642 0 : .map_err(|e| {
2643 0 : ApiError::PreconditionFailed(format!("cannot drain upload queue: {e}").into())
2644 0 : })?;
2645 : }
2646 :
2647 0 : tracing::info!("all timeline upload queues are drained");
2648 :
2649 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
2650 0 : let ctx = &ctx.with_scope_timeline(&timeline);
2651 :
2652 0 : let progress = timeline
2653 0 : .prepare_to_detach_from_ancestor(&tenant, options, behavior, ctx)
2654 0 : .await?;
2655 :
2656 : // uncomment to allow early as possible Tenant::drop
2657 : // drop(tenant);
2658 :
2659 0 : let resp = match progress {
2660 0 : detach_ancestor::Progress::Prepared(attempt, prepared) => {
2661 : // it would be great to tag the guard on to the tenant activation future
2662 0 : let reparented_timelines = state
2663 0 : .tenant_manager
2664 0 : .complete_detaching_timeline_ancestor(
2665 0 : tenant_shard_id,
2666 0 : timeline_id,
2667 0 : prepared,
2668 0 : behavior,
2669 0 : attempt,
2670 0 : ctx,
2671 0 : )
2672 0 : .await?;
2673 :
2674 0 : AncestorDetached {
2675 0 : reparented_timelines,
2676 0 : }
2677 : }
2678 0 : detach_ancestor::Progress::Done(resp) => resp,
2679 : };
2680 :
2681 0 : json_response(StatusCode::OK, resp)
2682 0 : }
2683 0 : .instrument(span)
2684 0 : .await
2685 0 : }
2686 :
2687 0 : async fn deletion_queue_flush(
2688 0 : r: Request<Body>,
2689 0 : cancel: CancellationToken,
2690 0 : ) -> Result<Response<Body>, ApiError> {
2691 0 : let state = get_state(&r);
2692 :
2693 0 : let execute = parse_query_param(&r, "execute")?.unwrap_or(false);
2694 :
2695 0 : let flush = async {
2696 0 : if execute {
2697 0 : state.deletion_queue_client.flush_execute().await
2698 : } else {
2699 0 : state.deletion_queue_client.flush().await
2700 : }
2701 0 : }
2702 : // DeletionQueueError's only case is shutting down.
2703 0 : .map_err(|_| ApiError::ShuttingDown);
2704 :
2705 0 : tokio::select! {
2706 0 : res = flush => {
2707 0 : res.map(|()| json_response(StatusCode::OK, ()))?
2708 : }
2709 0 : _ = cancel.cancelled() => {
2710 0 : Err(ApiError::ShuttingDown)
2711 : }
2712 : }
2713 0 : }
2714 :
2715 : /// Try if `GetPage@Lsn` is successful, useful for manual debugging.
2716 0 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
2717 : struct GetPageResponse {
2718 : pub page: Bytes,
2719 : pub layers_visited: u32,
2720 : pub delta_layers_visited: u32,
2721 : pub records: Vec<(Lsn, NeonWalRecord)>,
2722 : pub img: Option<(Lsn, Bytes)>,
2723 : }
2724 :
2725 0 : async fn getpage_at_lsn_handler(
2726 0 : request: Request<Body>,
2727 0 : cancel: CancellationToken,
2728 0 : ) -> Result<Response<Body>, ApiError> {
2729 0 : getpage_at_lsn_handler_inner(false, request, cancel).await
2730 0 : }
2731 :
2732 0 : async fn touchpage_at_lsn_handler(
2733 0 : request: Request<Body>,
2734 0 : cancel: CancellationToken,
2735 0 : ) -> Result<Response<Body>, ApiError> {
2736 0 : getpage_at_lsn_handler_inner(true, request, cancel).await
2737 0 : }
2738 :
2739 : /// Try if `GetPage@Lsn` is successful, useful for manual debugging.
2740 0 : async fn getpage_at_lsn_handler_inner(
2741 0 : touch: bool,
2742 0 : request: Request<Body>,
2743 0 : _cancel: CancellationToken,
2744 0 : ) -> Result<Response<Body>, ApiError> {
2745 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2746 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2747 : // Require pageserver admin permission for this API instead of only tenant-level token.
2748 0 : check_permission(&request, None)?;
2749 0 : let state = get_state(&request);
2750 :
2751 : struct Key(pageserver_api::key::Key);
2752 :
2753 : impl std::str::FromStr for Key {
2754 : type Err = anyhow::Error;
2755 :
2756 0 : fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
2757 0 : pageserver_api::key::Key::from_hex(s).map(Key)
2758 0 : }
2759 : }
2760 :
2761 0 : let key: Key = parse_query_param(&request, "key")?
2762 0 : .ok_or_else(|| ApiError::BadRequest(anyhow!("missing 'key' query parameter")))?;
2763 0 : let lsn: Option<Lsn> = parse_query_param(&request, "lsn")?;
2764 :
2765 0 : async {
2766 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2767 0 : let ctx = RequestContextBuilder::new(TaskKind::MgmtRequest)
2768 0 : .download_behavior(DownloadBehavior::Download)
2769 0 : .scope(context::Scope::new_timeline(&timeline))
2770 0 : .read_path_debug(true)
2771 0 : .root();
2772 :
2773 : // Use last_record_lsn if no lsn is provided
2774 0 : let lsn = lsn.unwrap_or_else(|| timeline.get_last_record_lsn());
2775 :
2776 0 : if touch {
2777 0 : json_response(StatusCode::OK, ())
2778 : } else {
2779 0 : let mut reconstruct_state = ValuesReconstructState::new_with_debug(IoConcurrency::sequential());
2780 0 : let page = timeline.debug_get(key.0, lsn, &ctx, &mut reconstruct_state).await?;
2781 0 : let response = GetPageResponse {
2782 0 : page,
2783 0 : layers_visited: reconstruct_state.get_layers_visited(),
2784 0 : delta_layers_visited: reconstruct_state.get_delta_layers_visited(),
2785 0 : records: reconstruct_state.debug_state.records.clone(),
2786 0 : img: reconstruct_state.debug_state.img.clone(),
2787 0 : };
2788 :
2789 0 : json_response(StatusCode::OK, response)
2790 : }
2791 0 : }
2792 0 : .instrument(info_span!("timeline_debug_get", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2793 0 : .await
2794 0 : }
2795 :
2796 0 : async fn timeline_collect_keyspace(
2797 0 : request: Request<Body>,
2798 0 : _cancel: CancellationToken,
2799 0 : ) -> Result<Response<Body>, ApiError> {
2800 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2801 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2802 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2803 0 : let state = get_state(&request);
2804 :
2805 0 : let at_lsn: Option<Lsn> = parse_query_param(&request, "at_lsn")?;
2806 :
2807 0 : async {
2808 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2809 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
2810 0 : let at_lsn = at_lsn.unwrap_or_else(|| timeline.get_last_record_lsn());
2811 0 : let (dense_ks, sparse_ks) = timeline
2812 0 : .collect_keyspace(at_lsn, &ctx)
2813 0 : .await
2814 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
2815 :
2816 : // This API is currently used by pagebench. Pagebench will iterate all keys within the keyspace.
2817 : // Therefore, we split dense/sparse keys in this API.
2818 0 : let res = pageserver_api::models::partitioning::Partitioning { keys: dense_ks, sparse_keys: sparse_ks, at_lsn };
2819 :
2820 0 : json_response(StatusCode::OK, res)
2821 0 : }
2822 0 : .instrument(info_span!("timeline_collect_keyspace", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2823 0 : .await
2824 0 : }
2825 :
2826 0 : async fn active_timeline_of_active_tenant(
2827 0 : tenant_manager: &TenantManager,
2828 0 : tenant_shard_id: TenantShardId,
2829 0 : timeline_id: TimelineId,
2830 0 : ) -> Result<Arc<Timeline>, ApiError> {
2831 0 : let tenant = tenant_manager.get_attached_tenant_shard(tenant_shard_id)?;
2832 :
2833 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
2834 :
2835 0 : Ok(tenant.get_timeline(timeline_id, true)?)
2836 0 : }
2837 :
2838 0 : async fn always_panic_handler(
2839 0 : req: Request<Body>,
2840 0 : _cancel: CancellationToken,
2841 0 : ) -> Result<Response<Body>, ApiError> {
2842 : // Deliberately cause a panic to exercise the panic hook registered via std::panic::set_hook().
2843 : // For pageserver, the relevant panic hook is `tracing_panic_hook` , and the `sentry` crate's wrapper around it.
2844 : // Use catch_unwind to ensure that tokio nor hyper are distracted by our panic.
2845 0 : let query = req.uri().query();
2846 0 : let _ = std::panic::catch_unwind(|| {
2847 0 : panic!("unconditional panic for testing panic hook integration; request query: {query:?}")
2848 : });
2849 0 : json_response(StatusCode::NO_CONTENT, ())
2850 0 : }
2851 :
2852 0 : async fn disk_usage_eviction_run(
2853 0 : mut r: Request<Body>,
2854 0 : cancel: CancellationToken,
2855 0 : ) -> Result<Response<Body>, ApiError> {
2856 0 : check_permission(&r, None)?;
2857 :
2858 0 : #[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)]
2859 : struct Config {
2860 : /// How many bytes to evict before reporting that pressure is relieved.
2861 : evict_bytes: u64,
2862 :
2863 : #[serde(default)]
2864 : eviction_order: pageserver_api::config::EvictionOrder,
2865 : }
2866 :
2867 : #[derive(Debug, Clone, Copy, serde::Serialize)]
2868 : struct Usage {
2869 : // remains unchanged after instantiation of the struct
2870 : evict_bytes: u64,
2871 : // updated by `add_available_bytes`
2872 : freed_bytes: u64,
2873 : }
2874 :
2875 : impl crate::disk_usage_eviction_task::Usage for Usage {
2876 0 : fn has_pressure(&self) -> bool {
2877 0 : self.evict_bytes > self.freed_bytes
2878 0 : }
2879 :
2880 0 : fn add_available_bytes(&mut self, bytes: u64) {
2881 0 : self.freed_bytes += bytes;
2882 0 : }
2883 : }
2884 :
2885 0 : let config = json_request::<Config>(&mut r).await?;
2886 :
2887 0 : let usage = Usage {
2888 0 : evict_bytes: config.evict_bytes,
2889 0 : freed_bytes: 0,
2890 0 : };
2891 :
2892 0 : let state = get_state(&r);
2893 0 : let eviction_state = state.disk_usage_eviction_state.clone();
2894 :
2895 0 : let res = crate::disk_usage_eviction_task::disk_usage_eviction_task_iteration_impl(
2896 0 : &eviction_state,
2897 0 : &state.remote_storage,
2898 0 : usage,
2899 0 : &state.tenant_manager,
2900 0 : config.eviction_order.into(),
2901 0 : &cancel,
2902 0 : )
2903 0 : .await;
2904 :
2905 0 : info!(?res, "disk_usage_eviction_task_iteration_impl finished");
2906 :
2907 0 : let res = res.map_err(ApiError::InternalServerError)?;
2908 :
2909 0 : json_response(StatusCode::OK, res)
2910 0 : }
2911 :
2912 0 : async fn secondary_upload_handler(
2913 0 : request: Request<Body>,
2914 0 : _cancel: CancellationToken,
2915 0 : ) -> Result<Response<Body>, ApiError> {
2916 0 : let state = get_state(&request);
2917 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2918 0 : state
2919 0 : .secondary_controller
2920 0 : .upload_tenant(tenant_shard_id)
2921 0 : .await?;
2922 :
2923 0 : json_response(StatusCode::OK, ())
2924 0 : }
2925 :
2926 0 : async fn tenant_scan_remote_handler(
2927 0 : request: Request<Body>,
2928 0 : cancel: CancellationToken,
2929 0 : ) -> Result<Response<Body>, ApiError> {
2930 0 : let state = get_state(&request);
2931 0 : let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
2932 :
2933 0 : let mut response = TenantScanRemoteStorageResponse::default();
2934 :
2935 0 : let (shards, _other_keys) =
2936 0 : list_remote_tenant_shards(&state.remote_storage, tenant_id, cancel.clone())
2937 0 : .await
2938 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
2939 :
2940 0 : for tenant_shard_id in shards {
2941 0 : let (timeline_ids, _other_keys) =
2942 0 : list_remote_timelines(&state.remote_storage, tenant_shard_id, cancel.clone())
2943 0 : .await
2944 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
2945 :
2946 0 : let mut generation = Generation::none();
2947 0 : for timeline_id in timeline_ids {
2948 0 : match download_index_part(
2949 0 : &state.remote_storage,
2950 0 : &tenant_shard_id,
2951 0 : &timeline_id,
2952 : Generation::MAX,
2953 0 : &cancel,
2954 : )
2955 0 : .instrument(info_span!("download_index_part",
2956 : tenant_id=%tenant_shard_id.tenant_id,
2957 0 : shard_id=%tenant_shard_id.shard_slug(),
2958 : %timeline_id))
2959 0 : .await
2960 : {
2961 0 : Ok((index_part, index_generation, _index_mtime)) => {
2962 0 : tracing::info!(
2963 0 : "Found timeline {tenant_shard_id}/{timeline_id} metadata (gen {index_generation:?}, {} layers, {} consistent LSN)",
2964 0 : index_part.layer_metadata.len(),
2965 0 : index_part.metadata.disk_consistent_lsn()
2966 : );
2967 0 : generation = std::cmp::max(generation, index_generation);
2968 : }
2969 : Err(DownloadError::NotFound) => {
2970 : // This is normal for tenants that were created with multiple shards: they have an unsharded path
2971 : // containing the timeline's initdb tarball but no index. Otherwise it is a bit strange.
2972 0 : tracing::info!(
2973 0 : "Timeline path {tenant_shard_id}/{timeline_id} exists in remote storage but has no index, skipping"
2974 : );
2975 0 : continue;
2976 : }
2977 0 : Err(e) => {
2978 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
2979 : }
2980 : };
2981 : }
2982 :
2983 0 : let result =
2984 0 : download_tenant_manifest(&state.remote_storage, &tenant_shard_id, generation, &cancel)
2985 0 : .instrument(info_span!("download_tenant_manifest",
2986 : tenant_id=%tenant_shard_id.tenant_id,
2987 0 : shard_id=%tenant_shard_id.shard_slug()))
2988 0 : .await;
2989 0 : let stripe_size = match result {
2990 0 : Ok((manifest, _, _)) => manifest.stripe_size,
2991 0 : Err(DownloadError::NotFound) => None,
2992 0 : Err(err) => return Err(ApiError::InternalServerError(anyhow!(err))),
2993 : };
2994 :
2995 0 : response.shards.push(TenantScanRemoteStorageShard {
2996 0 : tenant_shard_id,
2997 0 : generation: generation.into(),
2998 0 : stripe_size,
2999 0 : });
3000 : }
3001 :
3002 0 : if response.shards.is_empty() {
3003 0 : return Err(ApiError::NotFound(
3004 0 : anyhow::anyhow!("No shards found for tenant ID {tenant_id}").into(),
3005 0 : ));
3006 0 : }
3007 :
3008 0 : json_response(StatusCode::OK, response)
3009 0 : }
3010 :
3011 0 : async fn secondary_download_handler(
3012 0 : request: Request<Body>,
3013 0 : _cancel: CancellationToken,
3014 0 : ) -> Result<Response<Body>, ApiError> {
3015 0 : let state = get_state(&request);
3016 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3017 0 : let wait = parse_query_param(&request, "wait_ms")?.map(Duration::from_millis);
3018 :
3019 : // We don't need this to issue the download request, but:
3020 : // - it enables us to cleanly return 404 if we get a request for an absent shard
3021 : // - we will use this to provide status feedback in the response
3022 0 : let Some(secondary_tenant) = state
3023 0 : .tenant_manager
3024 0 : .get_secondary_tenant_shard(tenant_shard_id)
3025 : else {
3026 0 : return Err(ApiError::NotFound(
3027 0 : anyhow::anyhow!("Shard {} not found", tenant_shard_id).into(),
3028 0 : ));
3029 : };
3030 :
3031 0 : let timeout = wait.unwrap_or(Duration::MAX);
3032 :
3033 0 : let result = tokio::time::timeout(
3034 0 : timeout,
3035 0 : state.secondary_controller.download_tenant(tenant_shard_id),
3036 0 : )
3037 0 : .await;
3038 :
3039 0 : let progress = secondary_tenant.progress.lock().unwrap().clone();
3040 :
3041 0 : let status = match result {
3042 : Ok(Ok(())) => {
3043 0 : if progress.layers_downloaded >= progress.layers_total {
3044 : // Download job ran to completion
3045 0 : StatusCode::OK
3046 : } else {
3047 : // Download dropped out without errors because it ran out of time budget
3048 0 : StatusCode::ACCEPTED
3049 : }
3050 : }
3051 : // Edge case: downloads aren't usually fallible: things like a missing heatmap are considered
3052 : // okay. We could get an error here in the unlikely edge case that the tenant
3053 : // was detached between our check above and executing the download job.
3054 0 : Ok(Err(e)) => return Err(e.into()),
3055 : // A timeout is not an error: we have started the download, we're just not done
3056 : // yet. The caller will get a response body indicating status.
3057 0 : Err(_) => StatusCode::ACCEPTED,
3058 : };
3059 :
3060 0 : json_response(status, progress)
3061 0 : }
3062 :
3063 0 : async fn wait_lsn_handler(
3064 0 : mut request: Request<Body>,
3065 0 : cancel: CancellationToken,
3066 0 : ) -> Result<Response<Body>, ApiError> {
3067 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3068 0 : let wait_lsn_request: TenantWaitLsnRequest = json_request(&mut request).await?;
3069 :
3070 0 : let state = get_state(&request);
3071 0 : let tenant = state
3072 0 : .tenant_manager
3073 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3074 :
3075 0 : let mut wait_futures = Vec::default();
3076 0 : for timeline in tenant.list_timelines() {
3077 0 : let Some(lsn) = wait_lsn_request.timelines.get(&timeline.timeline_id) else {
3078 0 : continue;
3079 : };
3080 :
3081 0 : let fut = {
3082 0 : let timeline = timeline.clone();
3083 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Error);
3084 0 : async move {
3085 0 : timeline
3086 0 : .wait_lsn(
3087 0 : *lsn,
3088 0 : WaitLsnWaiter::HttpEndpoint,
3089 0 : WaitLsnTimeout::Custom(wait_lsn_request.timeout),
3090 0 : &ctx,
3091 0 : )
3092 0 : .await
3093 0 : }
3094 : };
3095 0 : wait_futures.push(fut);
3096 : }
3097 :
3098 0 : if wait_futures.is_empty() {
3099 0 : return json_response(StatusCode::NOT_FOUND, ());
3100 0 : }
3101 :
3102 0 : let all_done = tokio::select! {
3103 0 : results = join_all(wait_futures) => {
3104 0 : results.iter().all(|res| res.is_ok())
3105 : },
3106 0 : _ = cancel.cancelled() => {
3107 0 : return Err(ApiError::Cancelled);
3108 : }
3109 : };
3110 :
3111 0 : let status = if all_done {
3112 0 : StatusCode::OK
3113 : } else {
3114 0 : StatusCode::ACCEPTED
3115 : };
3116 :
3117 0 : json_response(status, ())
3118 0 : }
3119 :
3120 0 : async fn secondary_status_handler(
3121 0 : request: Request<Body>,
3122 0 : _cancel: CancellationToken,
3123 0 : ) -> Result<Response<Body>, ApiError> {
3124 0 : let state = get_state(&request);
3125 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3126 :
3127 0 : let Some(secondary_tenant) = state
3128 0 : .tenant_manager
3129 0 : .get_secondary_tenant_shard(tenant_shard_id)
3130 : else {
3131 0 : return Err(ApiError::NotFound(
3132 0 : anyhow::anyhow!("Shard {} not found", tenant_shard_id).into(),
3133 0 : ));
3134 : };
3135 :
3136 0 : let progress = secondary_tenant.progress.lock().unwrap().clone();
3137 :
3138 0 : json_response(StatusCode::OK, progress)
3139 0 : }
3140 :
3141 0 : async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
3142 0 : json_response(
3143 : StatusCode::NOT_FOUND,
3144 0 : HttpErrorBody::from_msg("page not found".to_owned()),
3145 : )
3146 0 : }
3147 :
3148 0 : async fn post_tracing_event_handler(
3149 0 : mut r: Request<Body>,
3150 0 : _cancel: CancellationToken,
3151 0 : ) -> Result<Response<Body>, ApiError> {
3152 0 : #[derive(Debug, serde::Deserialize)]
3153 : #[serde(rename_all = "lowercase")]
3154 : enum Level {
3155 : Error,
3156 : Warn,
3157 : Info,
3158 : Debug,
3159 : Trace,
3160 : }
3161 0 : #[derive(Debug, serde::Deserialize)]
3162 : struct Request {
3163 : level: Level,
3164 : message: String,
3165 : }
3166 0 : let body: Request = json_request(&mut r)
3167 0 : .await
3168 0 : .map_err(|_| ApiError::BadRequest(anyhow::anyhow!("invalid JSON body")))?;
3169 :
3170 0 : match body.level {
3171 0 : Level::Error => tracing::error!(?body.message),
3172 0 : Level::Warn => tracing::warn!(?body.message),
3173 0 : Level::Info => tracing::info!(?body.message),
3174 0 : Level::Debug => tracing::debug!(?body.message),
3175 0 : Level::Trace => tracing::trace!(?body.message),
3176 : }
3177 :
3178 0 : json_response(StatusCode::OK, ())
3179 0 : }
3180 :
3181 0 : async fn put_io_engine_handler(
3182 0 : mut r: Request<Body>,
3183 0 : _cancel: CancellationToken,
3184 0 : ) -> Result<Response<Body>, ApiError> {
3185 0 : check_permission(&r, None)?;
3186 0 : let kind: crate::virtual_file::IoEngineKind = json_request(&mut r).await?;
3187 0 : crate::virtual_file::io_engine::set(kind);
3188 0 : json_response(StatusCode::OK, ())
3189 0 : }
3190 :
3191 0 : async fn put_io_mode_handler(
3192 0 : mut r: Request<Body>,
3193 0 : _cancel: CancellationToken,
3194 0 : ) -> Result<Response<Body>, ApiError> {
3195 0 : check_permission(&r, None)?;
3196 0 : let mode: IoMode = json_request(&mut r).await?;
3197 0 : crate::virtual_file::set_io_mode(mode);
3198 0 : json_response(StatusCode::OK, ())
3199 0 : }
3200 :
3201 : /// Polled by control plane.
3202 : ///
3203 : /// See [`crate::utilization`].
3204 0 : async fn get_utilization(
3205 0 : r: Request<Body>,
3206 0 : _cancel: CancellationToken,
3207 0 : ) -> Result<Response<Body>, ApiError> {
3208 0 : fail::fail_point!("get-utilization-http-handler", |_| {
3209 0 : Err(ApiError::ResourceUnavailable("failpoint".into()))
3210 0 : });
3211 :
3212 : // this probably could be completely public, but lets make that change later.
3213 0 : check_permission(&r, None)?;
3214 :
3215 0 : let state = get_state(&r);
3216 0 : let mut g = state.latest_utilization.lock().await;
3217 :
3218 0 : let regenerate_every = Duration::from_secs(1);
3219 0 : let still_valid = g
3220 0 : .as_ref()
3221 0 : .is_some_and(|(captured_at, _)| captured_at.elapsed() < regenerate_every);
3222 :
3223 : // avoid needless statvfs calls even though those should be non-blocking fast.
3224 : // regenerate at most 1Hz to allow polling at any rate.
3225 0 : if !still_valid {
3226 0 : let path = state.conf.tenants_path();
3227 0 : let doc =
3228 0 : crate::utilization::regenerate(state.conf, path.as_std_path(), &state.tenant_manager)
3229 0 : .map_err(ApiError::InternalServerError)?;
3230 :
3231 0 : let mut buf = Vec::new();
3232 0 : serde_json::to_writer(&mut buf, &doc)
3233 0 : .context("serialize")
3234 0 : .map_err(ApiError::InternalServerError)?;
3235 :
3236 0 : let body = bytes::Bytes::from(buf);
3237 :
3238 0 : *g = Some((std::time::Instant::now(), body));
3239 0 : }
3240 :
3241 : // hyper 0.14 doesn't yet have Response::clone so this is a bit of extra legwork
3242 0 : let cached = g.as_ref().expect("just set").1.clone();
3243 :
3244 0 : Response::builder()
3245 0 : .header(hyper::http::header::CONTENT_TYPE, "application/json")
3246 : // thought of using http date header, but that is second precision which does not give any
3247 : // debugging aid
3248 0 : .status(StatusCode::OK)
3249 0 : .body(hyper::Body::from(cached))
3250 0 : .context("build response")
3251 0 : .map_err(ApiError::InternalServerError)
3252 0 : }
3253 :
3254 : /// HADRON
3255 0 : async fn list_tenant_visible_size_handler(
3256 0 : request: Request<Body>,
3257 0 : _cancel: CancellationToken,
3258 0 : ) -> Result<Response<Body>, ApiError> {
3259 0 : check_permission(&request, None)?;
3260 0 : let state = get_state(&request);
3261 :
3262 0 : let mut map = BTreeMap::new();
3263 0 : for (tenant_shard_id, slot) in state.tenant_manager.list() {
3264 0 : match slot {
3265 0 : TenantSlot::Attached(tenant) => {
3266 0 : let visible_size = tenant.get_visible_size();
3267 0 : map.insert(tenant_shard_id, visible_size);
3268 0 : }
3269 : TenantSlot::Secondary(_) | TenantSlot::InProgress(_) => {
3270 0 : continue;
3271 : }
3272 : }
3273 : }
3274 :
3275 0 : json_response(StatusCode::OK, map)
3276 0 : }
3277 :
3278 0 : async fn list_aux_files(
3279 0 : mut request: Request<Body>,
3280 0 : _cancel: CancellationToken,
3281 0 : ) -> Result<Response<Body>, ApiError> {
3282 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3283 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3284 0 : let body: ListAuxFilesRequest = json_request(&mut request).await?;
3285 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3286 :
3287 0 : let state = get_state(&request);
3288 :
3289 0 : let timeline =
3290 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
3291 0 : .await?;
3292 :
3293 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
3294 0 : state.conf.get_vectored_concurrent_io,
3295 0 : timeline.gate.enter().map_err(|_| ApiError::Cancelled)?,
3296 : );
3297 :
3298 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
3299 0 : .with_scope_timeline(&timeline);
3300 0 : let files = timeline
3301 0 : .list_aux_files(body.lsn, &ctx, io_concurrency)
3302 0 : .await?;
3303 0 : json_response(StatusCode::OK, files)
3304 0 : }
3305 :
3306 0 : async fn perf_info(
3307 0 : request: Request<Body>,
3308 0 : _cancel: CancellationToken,
3309 0 : ) -> Result<Response<Body>, ApiError> {
3310 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3311 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3312 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3313 :
3314 0 : let state = get_state(&request);
3315 :
3316 0 : let timeline =
3317 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
3318 0 : .await?;
3319 :
3320 0 : let result = timeline.perf_info().await;
3321 :
3322 0 : json_response(StatusCode::OK, result)
3323 0 : }
3324 :
3325 0 : async fn ingest_aux_files(
3326 0 : mut request: Request<Body>,
3327 0 : _cancel: CancellationToken,
3328 0 : ) -> Result<Response<Body>, ApiError> {
3329 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3330 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3331 0 : let body: IngestAuxFilesRequest = json_request(&mut request).await?;
3332 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3333 :
3334 0 : let state = get_state(&request);
3335 :
3336 0 : let timeline =
3337 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
3338 0 : .await?;
3339 :
3340 0 : let mut modification = timeline.begin_modification(
3341 0 : Lsn(timeline.get_last_record_lsn().0 + 8), /* advance LSN by 8 */
3342 0 : );
3343 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
3344 0 : for (fname, content) in body.aux_files {
3345 0 : modification
3346 0 : .put_file(&fname, content.as_bytes(), &ctx)
3347 0 : .await
3348 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
3349 : }
3350 0 : modification
3351 0 : .commit(&ctx)
3352 0 : .await
3353 0 : .map_err(ApiError::InternalServerError)?;
3354 :
3355 0 : json_response(StatusCode::OK, ())
3356 0 : }
3357 :
3358 : /// Report on the largest tenants on this pageserver, for the storage controller to identify
3359 : /// candidates for splitting
3360 0 : async fn post_top_tenants(
3361 0 : mut r: Request<Body>,
3362 0 : _cancel: CancellationToken,
3363 0 : ) -> Result<Response<Body>, ApiError> {
3364 0 : check_permission(&r, None)?;
3365 0 : let request: TopTenantShardsRequest = json_request(&mut r).await?;
3366 0 : let state = get_state(&r);
3367 :
3368 0 : fn get_size_metric(sizes: &TopTenantShardItem, order_by: &TenantSorting) -> u64 {
3369 0 : match order_by {
3370 0 : TenantSorting::ResidentSize => sizes.resident_size,
3371 0 : TenantSorting::MaxLogicalSize => sizes.max_logical_size,
3372 0 : TenantSorting::MaxLogicalSizePerShard => sizes.max_logical_size_per_shard,
3373 : }
3374 0 : }
3375 :
3376 : #[derive(Eq, PartialEq)]
3377 : struct HeapItem {
3378 : metric: u64,
3379 : sizes: TopTenantShardItem,
3380 : }
3381 :
3382 : impl PartialOrd for HeapItem {
3383 0 : fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
3384 0 : Some(self.cmp(other))
3385 0 : }
3386 : }
3387 :
3388 : /// Heap items have reverse ordering on their metric: this enables using BinaryHeap, which
3389 : /// supports popping the greatest item but not the smallest.
3390 : impl Ord for HeapItem {
3391 0 : fn cmp(&self, other: &Self) -> std::cmp::Ordering {
3392 0 : Reverse(self.metric).cmp(&Reverse(other.metric))
3393 0 : }
3394 : }
3395 :
3396 0 : let mut top_n: BinaryHeap<HeapItem> = BinaryHeap::with_capacity(request.limit);
3397 :
3398 : // FIXME: this is a lot of clones to take this tenant list
3399 0 : for (tenant_shard_id, tenant_slot) in state.tenant_manager.list() {
3400 0 : if let Some(shards_lt) = request.where_shards_lt {
3401 : // Ignore tenants which already have >= this many shards
3402 0 : if tenant_shard_id.shard_count >= shards_lt {
3403 0 : continue;
3404 0 : }
3405 0 : }
3406 :
3407 0 : let sizes = match tenant_slot {
3408 0 : TenantSlot::Attached(tenant) => tenant.get_sizes(),
3409 : TenantSlot::Secondary(_) | TenantSlot::InProgress(_) => {
3410 0 : continue;
3411 : }
3412 : };
3413 0 : let metric = get_size_metric(&sizes, &request.order_by);
3414 :
3415 0 : if let Some(gt) = request.where_gt {
3416 : // Ignore tenants whose metric is <= the lower size threshold, to do less sorting work
3417 0 : if metric <= gt {
3418 0 : continue;
3419 0 : }
3420 0 : };
3421 :
3422 0 : match top_n.peek() {
3423 0 : None => {
3424 0 : // Top N list is empty: candidate becomes first member
3425 0 : top_n.push(HeapItem { metric, sizes });
3426 0 : }
3427 0 : Some(i) if i.metric > metric && top_n.len() < request.limit => {
3428 0 : // Lowest item in list is greater than our candidate, but we aren't at limit yet: push to end
3429 0 : top_n.push(HeapItem { metric, sizes });
3430 0 : }
3431 0 : Some(i) if i.metric > metric => {
3432 0 : // List is at limit and lowest value is greater than our candidate, drop it.
3433 0 : }
3434 0 : Some(_) => top_n.push(HeapItem { metric, sizes }),
3435 : }
3436 :
3437 0 : while top_n.len() > request.limit {
3438 0 : top_n.pop();
3439 0 : }
3440 : }
3441 :
3442 0 : json_response(
3443 : StatusCode::OK,
3444 : TopTenantShardsResponse {
3445 0 : shards: top_n.into_iter().map(|i| i.sizes).collect(),
3446 : },
3447 : )
3448 0 : }
3449 :
3450 0 : async fn put_tenant_timeline_import_basebackup(
3451 0 : request: Request<Body>,
3452 0 : _cancel: CancellationToken,
3453 0 : ) -> Result<Response<Body>, ApiError> {
3454 0 : let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
3455 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3456 0 : let base_lsn: Lsn = must_parse_query_param(&request, "base_lsn")?;
3457 0 : let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
3458 0 : let pg_version: PgMajorVersion = must_parse_query_param(&request, "pg_version")?;
3459 :
3460 0 : check_permission(&request, Some(tenant_id))?;
3461 :
3462 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
3463 :
3464 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
3465 :
3466 0 : let span = info_span!("import_basebackup",
3467 0 : tenant_id=%tenant_id, timeline_id=%timeline_id, shard_id=%tenant_shard_id.shard_slug(),
3468 : base_lsn=%base_lsn, end_lsn=%end_lsn, pg_version=%pg_version);
3469 0 : async move {
3470 0 : let state = get_state(&request);
3471 0 : let tenant = state
3472 0 : .tenant_manager
3473 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3474 :
3475 0 : let broker_client = state.broker_client.clone();
3476 :
3477 0 : let mut body = StreamReader::new(
3478 0 : request
3479 0 : .into_body()
3480 0 : .map(|res| res.map_err(|error| std::io::Error::other(anyhow::anyhow!(error)))),
3481 : );
3482 :
3483 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
3484 :
3485 0 : let (timeline, timeline_ctx) = tenant
3486 0 : .create_empty_timeline(timeline_id, base_lsn, pg_version, &ctx)
3487 0 : .map_err(ApiError::InternalServerError)
3488 0 : .await?;
3489 :
3490 : // TODO mark timeline as not ready until it reaches end_lsn.
3491 : // We might have some wal to import as well, and we should prevent compute
3492 : // from connecting before that and writing conflicting wal.
3493 : //
3494 : // This is not relevant for pageserver->pageserver migrations, since there's
3495 : // no wal to import. But should be fixed if we want to import from postgres.
3496 :
3497 : // TODO leave clean state on error. For now you can use detach to clean
3498 : // up broken state from a failed import.
3499 :
3500 : // Import basebackup provided via CopyData
3501 0 : info!("importing basebackup");
3502 :
3503 0 : timeline
3504 0 : .import_basebackup_from_tar(
3505 0 : tenant.clone(),
3506 0 : &mut body,
3507 0 : base_lsn,
3508 0 : broker_client,
3509 0 : &timeline_ctx,
3510 0 : )
3511 0 : .await
3512 0 : .map_err(ApiError::InternalServerError)?;
3513 :
3514 : // Read the end of the tar archive.
3515 0 : read_tar_eof(body)
3516 0 : .await
3517 0 : .map_err(ApiError::InternalServerError)?;
3518 :
3519 : // TODO check checksum
3520 : // Meanwhile you can verify client-side by taking fullbackup
3521 : // and checking that it matches in size with what was imported.
3522 : // It wouldn't work if base came from vanilla postgres though,
3523 : // since we discard some log files.
3524 :
3525 0 : info!("done");
3526 0 : json_response(StatusCode::OK, ())
3527 0 : }
3528 0 : .instrument(span)
3529 0 : .await
3530 0 : }
3531 :
3532 0 : async fn put_tenant_timeline_import_wal(
3533 0 : request: Request<Body>,
3534 0 : _cancel: CancellationToken,
3535 0 : ) -> Result<Response<Body>, ApiError> {
3536 0 : let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
3537 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3538 0 : let start_lsn: Lsn = must_parse_query_param(&request, "start_lsn")?;
3539 0 : let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
3540 :
3541 0 : check_permission(&request, Some(tenant_id))?;
3542 :
3543 0 : let span = info_span!("import_wal", tenant_id=%tenant_id, timeline_id=%timeline_id, start_lsn=%start_lsn, end_lsn=%end_lsn);
3544 0 : async move {
3545 0 : let state = get_state(&request);
3546 :
3547 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, TenantShardId::unsharded(tenant_id), timeline_id).await?;
3548 0 : let ctx = RequestContextBuilder::new(TaskKind::MgmtRequest)
3549 0 : .download_behavior(DownloadBehavior::Warn)
3550 0 : .scope(context::Scope::new_timeline(&timeline))
3551 0 : .root();
3552 :
3553 0 : let mut body = StreamReader::new(request.into_body().map(|res| {
3554 0 : res.map_err(|error| {
3555 0 : std::io::Error::other( anyhow::anyhow!(error))
3556 0 : })
3557 0 : }));
3558 :
3559 0 : let last_record_lsn = timeline.get_last_record_lsn();
3560 0 : if last_record_lsn != start_lsn {
3561 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}")));
3562 0 : }
3563 :
3564 : // TODO leave clean state on error. For now you can use detach to clean
3565 : // up broken state from a failed import.
3566 :
3567 : // Import wal provided via CopyData
3568 0 : info!("importing wal");
3569 0 : crate::import_datadir::import_wal_from_tar(&timeline, &mut body, start_lsn, end_lsn, &ctx).await.map_err(ApiError::InternalServerError)?;
3570 0 : info!("wal import complete");
3571 :
3572 : // Read the end of the tar archive.
3573 0 : read_tar_eof(body).await.map_err(ApiError::InternalServerError)?;
3574 :
3575 : // TODO Does it make sense to overshoot?
3576 0 : if timeline.get_last_record_lsn() < end_lsn {
3577 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}")));
3578 0 : }
3579 :
3580 : // Flush data to disk, then upload to s3. No need for a forced checkpoint.
3581 : // We only want to persist the data, and it doesn't matter if it's in the
3582 : // shape of deltas or images.
3583 0 : info!("flushing layers");
3584 0 : timeline.freeze_and_flush().await.map_err(|e| match e {
3585 0 : tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown,
3586 0 : other => ApiError::InternalServerError(anyhow::anyhow!(other)),
3587 0 : })?;
3588 :
3589 0 : info!("done");
3590 :
3591 0 : json_response(StatusCode::OK, ())
3592 0 : }.instrument(span).await
3593 0 : }
3594 :
3595 : /// Activate a timeline after its import has completed
3596 : ///
3597 : /// The endpoint is idempotent and callers are expected to retry all
3598 : /// errors until a successful response.
3599 0 : async fn activate_post_import_handler(
3600 0 : request: Request<Body>,
3601 0 : _cancel: CancellationToken,
3602 0 : ) -> Result<Response<Body>, ApiError> {
3603 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3604 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3605 :
3606 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3607 : const DEFAULT_ACTIVATE_TIMEOUT: Duration = Duration::from_secs(1);
3608 0 : let activate_timeout = parse_query_param(&request, "timeline_activate_timeout_ms")?
3609 0 : .map(Duration::from_millis)
3610 0 : .unwrap_or(DEFAULT_ACTIVATE_TIMEOUT);
3611 :
3612 0 : let span = info_span!(
3613 : "activate_post_import_handler",
3614 : tenant_id=%tenant_shard_id.tenant_id,
3615 : timeline_id=%timeline_id,
3616 0 : shard_id=%tenant_shard_id.shard_slug()
3617 : );
3618 :
3619 0 : async move {
3620 0 : let state = get_state(&request);
3621 0 : let tenant = state
3622 0 : .tenant_manager
3623 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3624 :
3625 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
3626 :
3627 0 : tenant.finalize_importing_timeline(timeline_id).await?;
3628 :
3629 0 : match tenant.get_timeline(timeline_id, false) {
3630 0 : Ok(_timeline) => {
3631 0 : // Timeline is already visible. Reset not required: fall through.
3632 0 : }
3633 : Err(GetTimelineError::NotFound { .. }) => {
3634 : // This is crude: we reset the whole tenant such that the new timeline is detected
3635 : // and activated. We can come up with something more granular in the future.
3636 : //
3637 : // Note that we only reset the tenant if required: when the timeline is
3638 : // not present in [`Tenant::timelines`].
3639 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
3640 0 : state
3641 0 : .tenant_manager
3642 0 : .reset_tenant(tenant_shard_id, false, &ctx)
3643 0 : .await
3644 0 : .map_err(ApiError::InternalServerError)?;
3645 : }
3646 : Err(GetTimelineError::ShuttingDown) => {
3647 0 : return Err(ApiError::ShuttingDown);
3648 : }
3649 : Err(GetTimelineError::NotActive { .. }) => {
3650 0 : unreachable!("Called get_timeline with active_only=false");
3651 : }
3652 : }
3653 :
3654 0 : let timeline = tenant.get_timeline(timeline_id, false)?;
3655 :
3656 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn)
3657 0 : .with_scope_timeline(&timeline);
3658 :
3659 0 : let result =
3660 0 : tokio::time::timeout(activate_timeout, timeline.wait_to_become_active(&ctx)).await;
3661 0 : match result {
3662 0 : Ok(Ok(())) => {
3663 0 : // fallthrough
3664 0 : }
3665 : // Timeline reached some other state that's not active
3666 : // TODO(vlad): if the tenant is broken, return a permananet error
3667 0 : Ok(Err(_timeline_state)) => {
3668 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3669 0 : "Timeline activation failed"
3670 0 : )));
3671 : }
3672 : // Activation timed out
3673 : Err(_) => {
3674 0 : return Err(ApiError::Timeout("Timeline activation timed out".into()));
3675 : }
3676 : }
3677 :
3678 0 : let timeline_info = build_timeline_info(
3679 0 : &timeline, false, // include_non_incremental_logical_size,
3680 0 : false, // force_await_initial_logical_size
3681 0 : false, // include_image_consistent_lsn
3682 0 : &ctx,
3683 0 : )
3684 0 : .await
3685 0 : .context("get local timeline info")
3686 0 : .map_err(ApiError::InternalServerError)?;
3687 :
3688 0 : json_response(StatusCode::OK, timeline_info)
3689 0 : }
3690 0 : .instrument(span)
3691 0 : .await
3692 0 : }
3693 :
3694 : // [Hadron] Reset gauge metrics that are used to raised alerts. We need this API as a stop-gap measure to reset alerts
3695 : // after we manually rectify situations such as local SSD data loss. We will eventually automate this.
3696 0 : async fn hadron_reset_alert_gauges(
3697 0 : request: Request<Body>,
3698 0 : _cancel: CancellationToken,
3699 0 : ) -> Result<Response<Body>, ApiError> {
3700 0 : check_permission(&request, None)?;
3701 0 : LOCAL_DATA_LOSS_SUSPECTED.set(0);
3702 0 : json_response(StatusCode::OK, ())
3703 0 : }
3704 :
3705 : /// Read the end of a tar archive.
3706 : ///
3707 : /// A tar archive normally ends with two consecutive blocks of zeros, 512 bytes each.
3708 : /// `tokio_tar` already read the first such block. Read the second all-zeros block,
3709 : /// and check that there is no more data after the EOF marker.
3710 : ///
3711 : /// 'tar' command can also write extra blocks of zeros, up to a record
3712 : /// size, controlled by the --record-size argument. Ignore them too.
3713 0 : async fn read_tar_eof(mut reader: (impl tokio::io::AsyncRead + Unpin)) -> anyhow::Result<()> {
3714 : use tokio::io::AsyncReadExt;
3715 0 : let mut buf = [0u8; 512];
3716 :
3717 : // Read the all-zeros block, and verify it
3718 0 : let mut total_bytes = 0;
3719 0 : while total_bytes < 512 {
3720 0 : let nbytes = reader.read(&mut buf[total_bytes..]).await?;
3721 0 : total_bytes += nbytes;
3722 0 : if nbytes == 0 {
3723 0 : break;
3724 0 : }
3725 : }
3726 0 : if total_bytes < 512 {
3727 0 : anyhow::bail!("incomplete or invalid tar EOF marker");
3728 0 : }
3729 0 : if !buf.iter().all(|&x| x == 0) {
3730 0 : anyhow::bail!("invalid tar EOF marker");
3731 0 : }
3732 :
3733 : // Drain any extra zero-blocks after the EOF marker
3734 0 : let mut trailing_bytes = 0;
3735 0 : let mut seen_nonzero_bytes = false;
3736 : loop {
3737 0 : let nbytes = reader.read(&mut buf).await?;
3738 0 : trailing_bytes += nbytes;
3739 0 : if !buf.iter().all(|&x| x == 0) {
3740 0 : seen_nonzero_bytes = true;
3741 0 : }
3742 0 : if nbytes == 0 {
3743 0 : break;
3744 0 : }
3745 : }
3746 0 : if seen_nonzero_bytes {
3747 0 : anyhow::bail!("unexpected non-zero bytes after the tar archive");
3748 0 : }
3749 0 : if trailing_bytes % 512 != 0 {
3750 0 : anyhow::bail!(
3751 0 : "unexpected number of zeros ({trailing_bytes}), not divisible by tar block size (512 bytes), after the tar archive"
3752 : );
3753 0 : }
3754 0 : Ok(())
3755 0 : }
3756 :
3757 0 : async fn force_refresh_feature_flag(
3758 0 : request: Request<Body>,
3759 0 : _cancel: CancellationToken,
3760 0 : ) -> Result<Response<Body>, ApiError> {
3761 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3762 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3763 :
3764 0 : let state = get_state(&request);
3765 0 : let tenant = state
3766 0 : .tenant_manager
3767 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3768 0 : tenant
3769 0 : .feature_resolver
3770 0 : .refresh_properties_and_flags(&tenant);
3771 0 : json_response(StatusCode::OK, ())
3772 0 : }
3773 :
3774 0 : async fn tenant_evaluate_feature_flag(
3775 0 : request: Request<Body>,
3776 0 : _cancel: CancellationToken,
3777 0 : ) -> Result<Response<Body>, ApiError> {
3778 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3779 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3780 :
3781 0 : let flag: String = parse_request_param(&request, "flag_key")?;
3782 0 : let as_type: Option<String> = parse_query_param(&request, "as")?;
3783 :
3784 0 : let state = get_state(&request);
3785 :
3786 0 : async {
3787 0 : let tenant = state
3788 0 : .tenant_manager
3789 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3790 : // TODO: the properties we get here might be stale right after it is collected. But such races are rare (updated every 10s)
3791 : // and we don't need to worry about it for now.
3792 0 : let properties = tenant.feature_resolver.collect_properties();
3793 0 : if as_type.as_deref() == Some("boolean") {
3794 0 : let result = tenant.feature_resolver.evaluate_boolean(&flag);
3795 0 : let result = result.map(|_| true).map_err(|e| e.to_string());
3796 0 : json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
3797 0 : } else if as_type.as_deref() == Some("multivariate") {
3798 0 : let result = tenant.feature_resolver.evaluate_multivariate(&flag).map_err(|e| e.to_string());
3799 0 : json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
3800 : } else {
3801 : // Auto infer the type of the feature flag.
3802 0 : let is_boolean = tenant.feature_resolver.is_feature_flag_boolean(&flag).map_err(|e| ApiError::InternalServerError(anyhow::anyhow!("{e}")))?;
3803 0 : if is_boolean {
3804 0 : let result = tenant.feature_resolver.evaluate_boolean(&flag);
3805 0 : let result = result.map(|_| true).map_err(|e| e.to_string());
3806 0 : json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
3807 : } else {
3808 0 : let result = tenant.feature_resolver.evaluate_multivariate(&flag).map_err(|e| e.to_string());
3809 0 : json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
3810 : }
3811 : }
3812 0 : }
3813 0 : .instrument(info_span!("tenant_evaluate_feature_flag", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug()))
3814 0 : .await
3815 0 : }
3816 :
3817 0 : async fn force_override_feature_flag_for_testing_put(
3818 0 : request: Request<Body>,
3819 0 : _cancel: CancellationToken,
3820 0 : ) -> Result<Response<Body>, ApiError> {
3821 0 : check_permission(&request, None)?;
3822 :
3823 0 : let flag: String = parse_request_param(&request, "flag_key")?;
3824 0 : let value: String = must_parse_query_param(&request, "value")?;
3825 0 : let state = get_state(&request);
3826 0 : state
3827 0 : .feature_resolver
3828 0 : .force_override_for_testing(&flag, Some(&value));
3829 0 : json_response(StatusCode::OK, ())
3830 0 : }
3831 :
3832 0 : async fn force_override_feature_flag_for_testing_delete(
3833 0 : request: Request<Body>,
3834 0 : _cancel: CancellationToken,
3835 0 : ) -> Result<Response<Body>, ApiError> {
3836 0 : check_permission(&request, None)?;
3837 :
3838 0 : let flag: String = parse_request_param(&request, "flag_key")?;
3839 0 : let state = get_state(&request);
3840 0 : state
3841 0 : .feature_resolver
3842 0 : .force_override_for_testing(&flag, None);
3843 0 : json_response(StatusCode::OK, ())
3844 0 : }
3845 :
3846 0 : async fn update_feature_flag_spec(
3847 0 : mut request: Request<Body>,
3848 0 : _cancel: CancellationToken,
3849 0 : ) -> Result<Response<Body>, ApiError> {
3850 0 : check_permission(&request, None)?;
3851 0 : let body = json_request(&mut request).await?;
3852 0 : let state = get_state(&request);
3853 0 : state
3854 0 : .feature_resolver
3855 0 : .update(body)
3856 0 : .map_err(ApiError::InternalServerError)?;
3857 0 : json_response(StatusCode::OK, ())
3858 0 : }
3859 :
3860 : /// Common functionality of all the HTTP API handlers.
3861 : ///
3862 : /// - Adds a tracing span to each request (by `request_span`)
3863 : /// - Logs the request depending on the request method (by `request_span`)
3864 : /// - Logs the response if it was not successful (by `request_span`
3865 : /// - Shields the handler function from async cancellations. Hyper can drop the handler
3866 : /// Future if the connection to the client is lost, but most of the pageserver code is
3867 : /// not async cancellation safe. This converts the dropped future into a graceful cancellation
3868 : /// request with a CancellationToken.
3869 0 : async fn api_handler<R, H>(request: Request<Body>, handler: H) -> Result<Response<Body>, ApiError>
3870 0 : where
3871 0 : R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
3872 0 : H: FnOnce(Request<Body>, CancellationToken) -> R + Send + Sync + 'static,
3873 0 : {
3874 0 : if request.uri() != &"/v1/failpoints".parse::<Uri>().unwrap() {
3875 0 : fail::fail_point!("api-503", |_| Err(ApiError::ResourceUnavailable(
3876 0 : "failpoint".into()
3877 0 : )));
3878 :
3879 0 : fail::fail_point!("api-500", |_| Err(ApiError::InternalServerError(
3880 0 : anyhow::anyhow!("failpoint")
3881 0 : )));
3882 0 : }
3883 :
3884 : // Spawn a new task to handle the request, to protect the handler from unexpected
3885 : // async cancellations. Most pageserver functions are not async cancellation safe.
3886 : // We arm a drop-guard, so that if Hyper drops the Future, we signal the task
3887 : // with the cancellation token.
3888 0 : let token = CancellationToken::new();
3889 0 : let cancel_guard = token.clone().drop_guard();
3890 0 : let result = request_span(request, move |r| async {
3891 0 : let handle = tokio::spawn(
3892 0 : async {
3893 0 : let token_cloned = token.clone();
3894 0 : let result = handler(r, token).await;
3895 0 : if token_cloned.is_cancelled() {
3896 : // dropguard has executed: we will never turn this result into response.
3897 : //
3898 : // at least temporarily do {:?} logging; these failures are rare enough but
3899 : // could hide difficult errors.
3900 0 : match &result {
3901 0 : Ok(response) => {
3902 0 : let status = response.status();
3903 0 : info!(%status, "Cancelled request finished successfully")
3904 : }
3905 0 : Err(e) => match e {
3906 : ApiError::ShuttingDown | ApiError::ResourceUnavailable(_) => {
3907 : // Don't log this at error severity: they are normal during lifecycle of tenants/process
3908 0 : info!("Cancelled request aborted for shutdown")
3909 : }
3910 : _ => {
3911 : // Log these in a highly visible way, because we have no client to send the response to, but
3912 : // would like to know that something went wrong.
3913 0 : error!("Cancelled request finished with an error: {e:?}")
3914 : }
3915 : },
3916 : }
3917 0 : }
3918 : // only logging for cancelled panicked request handlers is the tracing_panic_hook,
3919 : // which should suffice.
3920 : //
3921 : // there is still a chance to lose the result due to race between
3922 : // returning from here and the actual connection closing happening
3923 : // before outer task gets to execute. leaving that up for #5815.
3924 0 : result
3925 0 : }
3926 0 : .in_current_span(),
3927 : );
3928 :
3929 0 : match handle.await {
3930 : // TODO: never actually return Err from here, always Ok(...) so that we can log
3931 : // spanned errors. Call api_error_handler instead and return appropriate Body.
3932 0 : Ok(result) => result,
3933 0 : Err(e) => {
3934 : // The handler task panicked. We have a global panic handler that logs the
3935 : // panic with its backtrace, so no need to log that here. Only log a brief
3936 : // message to make it clear that we returned the error to the client.
3937 0 : error!("HTTP request handler task panicked: {e:#}");
3938 :
3939 : // Don't return an Error here, because then fallback error handler that was
3940 : // installed in make_router() will print the error. Instead, construct the
3941 : // HTTP error response and return that.
3942 0 : Ok(
3943 0 : ApiError::InternalServerError(anyhow!("HTTP request handler task panicked"))
3944 0 : .into_response(),
3945 0 : )
3946 : }
3947 : }
3948 0 : })
3949 0 : .await;
3950 :
3951 0 : cancel_guard.disarm();
3952 :
3953 0 : result
3954 0 : }
3955 :
3956 : /// Like api_handler, but returns an error response if the server is built without
3957 : /// the 'testing' feature.
3958 0 : async fn testing_api_handler<R, H>(
3959 0 : desc: &str,
3960 0 : request: Request<Body>,
3961 0 : handler: H,
3962 0 : ) -> Result<Response<Body>, ApiError>
3963 0 : where
3964 0 : R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
3965 0 : H: FnOnce(Request<Body>, CancellationToken) -> R + Send + Sync + 'static,
3966 0 : {
3967 0 : if cfg!(feature = "testing") {
3968 0 : api_handler(request, handler).await
3969 : } else {
3970 0 : std::future::ready(Err(ApiError::BadRequest(anyhow!(
3971 0 : "Cannot {desc} because pageserver was compiled without testing APIs",
3972 0 : ))))
3973 0 : .await
3974 : }
3975 0 : }
3976 :
3977 0 : pub fn make_router(
3978 0 : state: Arc<State>,
3979 0 : launch_ts: &'static LaunchTimestamp,
3980 0 : auth: Option<Arc<SwappableJwtAuth>>,
3981 0 : ) -> anyhow::Result<RouterBuilder<hyper::Body, ApiError>> {
3982 0 : let spec = include_bytes!("openapi_spec.yml");
3983 0 : let mut router = attach_openapi_ui(endpoint::make_router(), spec, "/swagger.yml", "/v1/doc");
3984 0 : if auth.is_some() {
3985 0 : router = router.middleware(auth_middleware(|request| {
3986 0 : let state = get_state(request);
3987 0 : if state.allowlist_routes.contains(&request.uri().path()) {
3988 0 : None
3989 : } else {
3990 0 : state.auth.as_deref()
3991 : }
3992 0 : }))
3993 0 : }
3994 :
3995 0 : router = router.middleware(
3996 0 : endpoint::add_response_header_middleware(
3997 0 : "PAGESERVER_LAUNCH_TIMESTAMP",
3998 0 : &launch_ts.to_string(),
3999 : )
4000 0 : .expect("construct launch timestamp header middleware"),
4001 : );
4002 :
4003 0 : let force_metric_collection_on_scrape = state.conf.force_metric_collection_on_scrape;
4004 :
4005 0 : let prometheus_metrics_handler_wrapper =
4006 0 : move |req| prometheus_metrics_handler(req, force_metric_collection_on_scrape);
4007 :
4008 0 : Ok(router
4009 0 : .data(state)
4010 0 : .get("/metrics", move |r| request_span(r, prometheus_metrics_handler_wrapper))
4011 0 : .get("/profile/cpu", |r| request_span(r, profile_cpu_handler))
4012 0 : .get("/profile/heap", |r| request_span(r, profile_heap_handler))
4013 0 : .get("/v1/status", |r| api_handler(r, status_handler))
4014 0 : .put("/v1/failpoints", |r| {
4015 0 : testing_api_handler("manage failpoints", r, failpoints_handler)
4016 0 : })
4017 0 : .post("/v1/reload_auth_validation_keys", |r| {
4018 0 : api_handler(r, reload_auth_validation_keys_handler)
4019 0 : })
4020 0 : .get("/v1/tenant", |r| api_handler(r, tenant_list_handler))
4021 0 : .get("/v1/tenant/:tenant_shard_id", |r| {
4022 0 : api_handler(r, tenant_status)
4023 0 : })
4024 0 : .delete("/v1/tenant/:tenant_shard_id", |r| {
4025 0 : api_handler(r, tenant_delete_handler)
4026 0 : })
4027 0 : .get("/v1/tenant/:tenant_shard_id/synthetic_size", |r| {
4028 0 : api_handler(r, tenant_size_handler)
4029 0 : })
4030 0 : .patch("/v1/tenant/config", |r| {
4031 0 : api_handler(r, patch_tenant_config_handler)
4032 0 : })
4033 0 : .put("/v1/tenant/config", |r| {
4034 0 : api_handler(r, update_tenant_config_handler)
4035 0 : })
4036 0 : .put("/v1/tenant/:tenant_shard_id/shard_split", |r| {
4037 0 : api_handler(r, tenant_shard_split_handler)
4038 0 : })
4039 0 : .get("/v1/tenant/:tenant_shard_id/config", |r| {
4040 0 : api_handler(r, get_tenant_config_handler)
4041 0 : })
4042 0 : .put("/v1/tenant/:tenant_shard_id/location_config", |r| {
4043 0 : api_handler(r, put_tenant_location_config_handler)
4044 0 : })
4045 0 : .get("/v1/location_config", |r| {
4046 0 : api_handler(r, list_location_config_handler)
4047 0 : })
4048 0 : .get("/v1/location_config/:tenant_shard_id", |r| {
4049 0 : api_handler(r, get_location_config_handler)
4050 0 : })
4051 0 : .put(
4052 : "/v1/tenant/:tenant_shard_id/time_travel_remote_storage",
4053 0 : |r| api_handler(r, tenant_time_travel_remote_storage_handler),
4054 : )
4055 0 : .get("/v1/tenant/:tenant_shard_id/timeline", |r| {
4056 0 : api_handler(r, timeline_list_handler)
4057 0 : })
4058 0 : .get("/v1/tenant/:tenant_shard_id/timeline_and_offloaded", |r| {
4059 0 : api_handler(r, timeline_and_offloaded_list_handler)
4060 0 : })
4061 0 : .post("/v1/tenant/:tenant_shard_id/timeline", |r| {
4062 0 : api_handler(r, timeline_create_handler)
4063 0 : })
4064 0 : .post("/v1/tenant/:tenant_shard_id/reset", |r| {
4065 0 : api_handler(r, tenant_reset_handler)
4066 0 : })
4067 0 : .post(
4068 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/preserve_initdb_archive",
4069 0 : |r| api_handler(r, timeline_preserve_initdb_handler),
4070 : )
4071 0 : .put(
4072 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/archival_config",
4073 0 : |r| api_handler(r, timeline_archival_config_handler),
4074 : )
4075 0 : .get("/v1/tenant/:tenant_shard_id/timeline/:timeline_id", |r| {
4076 0 : api_handler(r, timeline_detail_handler)
4077 0 : })
4078 0 : .get(
4079 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_lsn_by_timestamp",
4080 0 : |r| api_handler(r, get_lsn_by_timestamp_handler),
4081 : )
4082 0 : .get(
4083 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_timestamp_of_lsn",
4084 0 : |r| api_handler(r, get_timestamp_of_lsn_handler),
4085 : )
4086 0 : .post(
4087 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/patch_index_part",
4088 0 : |r| api_handler(r, timeline_patch_index_part_handler),
4089 : )
4090 0 : .post(
4091 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/lsn_lease",
4092 0 : |r| api_handler(r, lsn_lease_handler),
4093 : )
4094 0 : .put(
4095 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/do_gc",
4096 0 : |r| api_handler(r, timeline_gc_handler),
4097 : )
4098 0 : .get(
4099 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
4100 0 : |r| api_handler(r, timeline_compact_info_handler),
4101 : )
4102 0 : .put(
4103 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
4104 0 : |r| api_handler(r, timeline_compact_handler),
4105 : )
4106 0 : .delete(
4107 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
4108 0 : |r| api_handler(r, timeline_cancel_compact_handler),
4109 : )
4110 0 : .put(
4111 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/offload",
4112 0 : |r| testing_api_handler("attempt timeline offload", r, timeline_offload_handler),
4113 : )
4114 0 : .put(
4115 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/mark_invisible",
4116 0 : |r| api_handler( r, timeline_mark_invisible_handler),
4117 : )
4118 0 : .put(
4119 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/checkpoint",
4120 0 : |r| testing_api_handler("run timeline checkpoint", r, timeline_checkpoint_handler),
4121 : )
4122 0 : .post(
4123 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_remote_layers",
4124 0 : |r| api_handler(r, timeline_download_remote_layers_handler_post),
4125 : )
4126 0 : .get(
4127 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_remote_layers",
4128 0 : |r| api_handler(r, timeline_download_remote_layers_handler_get),
4129 : )
4130 0 : .put(
4131 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/detach_ancestor",
4132 0 : |r| api_handler(r, timeline_detach_ancestor_handler),
4133 : )
4134 0 : .delete("/v1/tenant/:tenant_shard_id/timeline/:timeline_id", |r| {
4135 0 : api_handler(r, timeline_delete_handler)
4136 0 : })
4137 0 : .get(
4138 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer",
4139 0 : |r| api_handler(r, layer_map_info_handler),
4140 : )
4141 0 : .post(
4142 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_heatmap_layers",
4143 0 : |r| api_handler(r, timeline_download_heatmap_layers_handler),
4144 : )
4145 0 : .delete(
4146 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_heatmap_layers",
4147 0 : |r| api_handler(r, timeline_shutdown_download_heatmap_layers_handler),
4148 : )
4149 0 : .get(
4150 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_file_name",
4151 0 : |r| api_handler(r, layer_download_handler),
4152 : )
4153 0 : .delete(
4154 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_file_name",
4155 0 : |r| api_handler(r, evict_timeline_layer_handler),
4156 : )
4157 0 : .post(
4158 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_name/scan_disposable_keys",
4159 0 : |r| testing_api_handler("timeline_layer_scan_disposable_keys", r, timeline_layer_scan_disposable_keys),
4160 : )
4161 0 : .post(
4162 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/block_gc",
4163 0 : |r| api_handler(r, timeline_gc_blocking_handler),
4164 : )
4165 0 : .post(
4166 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/unblock_gc",
4167 0 : |r| api_handler(r, timeline_gc_unblocking_handler),
4168 : )
4169 0 : .get(
4170 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/page_trace",
4171 0 : |r| api_handler(r, timeline_page_trace_handler),
4172 : )
4173 0 : .post("/v1/tenant/:tenant_shard_id/heatmap_upload", |r| {
4174 0 : api_handler(r, secondary_upload_handler)
4175 0 : })
4176 0 : .get("/v1/tenant/:tenant_id/scan_remote_storage", |r| {
4177 0 : api_handler(r, tenant_scan_remote_handler)
4178 0 : })
4179 0 : .put("/v1/disk_usage_eviction/run", |r| {
4180 0 : api_handler(r, disk_usage_eviction_run)
4181 0 : })
4182 0 : .put("/v1/deletion_queue/flush", |r| {
4183 0 : api_handler(r, deletion_queue_flush)
4184 0 : })
4185 0 : .get("/v1/tenant/:tenant_shard_id/secondary/status", |r| {
4186 0 : api_handler(r, secondary_status_handler)
4187 0 : })
4188 0 : .post("/v1/tenant/:tenant_shard_id/secondary/download", |r| {
4189 0 : api_handler(r, secondary_download_handler)
4190 0 : })
4191 0 : .post("/v1/tenant/:tenant_shard_id/wait_lsn", |r| {
4192 0 : api_handler(r, wait_lsn_handler)
4193 0 : })
4194 0 : .put("/v1/tenant/:tenant_shard_id/break", |r| {
4195 0 : testing_api_handler("set tenant state to broken", r, handle_tenant_break)
4196 0 : })
4197 0 : .get("/v1/panic", |r| api_handler(r, always_panic_handler))
4198 0 : .post("/v1/tracing/event", |r| {
4199 0 : testing_api_handler("emit a tracing event", r, post_tracing_event_handler)
4200 0 : })
4201 0 : .get(
4202 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/getpage",
4203 0 : |r| testing_api_handler("getpage@lsn", r, getpage_at_lsn_handler),
4204 : )
4205 0 : .get(
4206 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/touchpage",
4207 0 : |r| api_handler(r, touchpage_at_lsn_handler),
4208 : )
4209 0 : .get(
4210 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/keyspace",
4211 0 : |r| api_handler(r, timeline_collect_keyspace),
4212 : )
4213 0 : .put("/v1/io_engine", |r| api_handler(r, put_io_engine_handler))
4214 0 : .put("/v1/io_mode", |r| api_handler(r, put_io_mode_handler))
4215 0 : .get("/v1/utilization", |r| api_handler(r, get_utilization))
4216 0 : .get("/v1/list_tenant_visible_size", |r| api_handler(r, list_tenant_visible_size_handler))
4217 0 : .post(
4218 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/ingest_aux_files",
4219 0 : |r| testing_api_handler("ingest_aux_files", r, ingest_aux_files),
4220 : )
4221 0 : .post(
4222 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/list_aux_files",
4223 0 : |r| testing_api_handler("list_aux_files", r, list_aux_files),
4224 : )
4225 0 : .post("/v1/top_tenants", |r| api_handler(r, post_top_tenants))
4226 0 : .post(
4227 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/perf_info",
4228 0 : |r| testing_api_handler("perf_info", r, perf_info),
4229 : )
4230 0 : .put(
4231 : "/v1/tenant/:tenant_id/timeline/:timeline_id/import_basebackup",
4232 0 : |r| api_handler(r, put_tenant_timeline_import_basebackup),
4233 : )
4234 0 : .put(
4235 : "/v1/tenant/:tenant_id/timeline/:timeline_id/import_wal",
4236 0 : |r| api_handler(r, put_tenant_timeline_import_wal),
4237 : )
4238 0 : .put(
4239 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/activate_post_import",
4240 0 : |r| api_handler(r, activate_post_import_handler),
4241 : )
4242 0 : .get("/v1/tenant/:tenant_shard_id/feature_flag/:flag_key", |r| {
4243 0 : api_handler(r, tenant_evaluate_feature_flag)
4244 0 : })
4245 0 : .post("/v1/tenant/:tenant_shard_id/force_refresh_feature_flag", |r| {
4246 0 : api_handler(r, force_refresh_feature_flag)
4247 0 : })
4248 0 : .put("/v1/feature_flag/:flag_key", |r| {
4249 0 : testing_api_handler("force override feature flag - put", r, force_override_feature_flag_for_testing_put)
4250 0 : })
4251 0 : .delete("/v1/feature_flag/:flag_key", |r| {
4252 0 : testing_api_handler("force override feature flag - delete", r, force_override_feature_flag_for_testing_delete)
4253 0 : })
4254 0 : .post("/v1/feature_flag_spec", |r| {
4255 0 : api_handler(r, update_feature_flag_spec)
4256 0 : })
4257 0 : .post("/hadron-internal/reset_alert_gauges", |r| {
4258 0 : api_handler(r, hadron_reset_alert_gauges)
4259 0 : })
4260 0 : .any(handler_404))
4261 0 : }
|