Line data Source code
1 : //!
2 : //! Management HTTP API
3 : //!
4 : use std::cmp::Reverse;
5 : use std::collections::BTreeMap;
6 : use std::collections::BinaryHeap;
7 : use std::collections::HashMap;
8 : use std::str::FromStr;
9 : use std::sync::Arc;
10 : use std::time::Duration;
11 :
12 : use anyhow::{Context, Result, anyhow};
13 : use bytes::Bytes;
14 : use enumset::EnumSet;
15 : use futures::future::join_all;
16 : use futures::{StreamExt, TryFutureExt};
17 : use http_utils::endpoint::{
18 : self, attach_openapi_ui, auth_middleware, check_permission_with, profile_cpu_handler,
19 : profile_heap_handler, prometheus_metrics_handler, request_span,
20 : };
21 : use http_utils::error::{ApiError, HttpErrorBody};
22 : use http_utils::failpoints::failpoints_handler;
23 : use http_utils::json::{json_request, json_request_maybe, json_response};
24 : use http_utils::request::{
25 : get_request_param, must_get_query_param, must_parse_query_param, parse_query_param,
26 : parse_request_param,
27 : };
28 : use http_utils::{RequestExt, RouterBuilder};
29 : use humantime::format_rfc3339;
30 : use hyper::{Body, Request, Response, StatusCode, Uri, header};
31 : use metrics::launch_timestamp::LaunchTimestamp;
32 : use pageserver_api::models::virtual_file::IoMode;
33 : use pageserver_api::models::{
34 : DetachBehavior, DownloadRemoteLayersTaskSpawnRequest, IngestAuxFilesRequest,
35 : ListAuxFilesRequest, LocationConfig, LocationConfigListResponse, LocationConfigMode, LsnLease,
36 : LsnLeaseRequest, OffloadedTimelineInfo, PageTraceEvent, ShardParameters, StatusResponse,
37 : TenantConfigPatchRequest, TenantConfigRequest, TenantDetails, TenantInfo,
38 : TenantLocationConfigRequest, TenantLocationConfigResponse, TenantScanRemoteStorageResponse,
39 : TenantScanRemoteStorageShard, TenantShardLocation, TenantShardSplitRequest,
40 : TenantShardSplitResponse, TenantSorting, TenantState, TenantWaitLsnRequest,
41 : TimelineArchivalConfigRequest, TimelineCreateRequest, TimelineCreateRequestMode,
42 : TimelineCreateRequestModeImportPgdata, TimelineGcRequest, TimelineInfo,
43 : TimelinePatchIndexPartRequest, TimelineVisibilityState, TimelinesInfoAndOffloaded,
44 : TopTenantShardItem, TopTenantShardsRequest, TopTenantShardsResponse,
45 : };
46 : use pageserver_api::shard::{ShardCount, TenantShardId};
47 : use postgres_ffi::PgMajorVersion;
48 : use remote_storage::{DownloadError, GenericRemoteStorage, TimeTravelError};
49 : use scopeguard::defer;
50 : use serde::{Deserialize, Serialize};
51 : use serde_json::json;
52 : use tenant_size_model::svg::SvgBranchKind;
53 : use tenant_size_model::{SizeResult, StorageModel};
54 : use tokio::time::Instant;
55 : use tokio_util::io::StreamReader;
56 : use tokio_util::sync::CancellationToken;
57 : use tracing::*;
58 : use utils::auth::SwappableJwtAuth;
59 : use utils::generation::Generation;
60 : use utils::id::{TenantId, TimelineId};
61 : use utils::lsn::Lsn;
62 : use wal_decoder::models::record::NeonWalRecord;
63 :
64 : use crate::config::PageServerConf;
65 : use crate::context;
66 : use crate::context::{DownloadBehavior, RequestContext, RequestContextBuilder};
67 : use crate::deletion_queue::DeletionQueueClient;
68 : use crate::feature_resolver::FeatureResolver;
69 : use crate::metrics::LOCAL_DATA_LOSS_SUSPECTED;
70 : use crate::pgdatadir_mapping::LsnForTimestamp;
71 : use crate::task_mgr::TaskKind;
72 : use crate::tenant::config::LocationConf;
73 : use crate::tenant::mgr::{
74 : GetActiveTenantError, GetTenantError, TenantManager, TenantMapError, TenantMapInsertError,
75 : TenantSlot, TenantSlotError, TenantSlotUpsertError, TenantStateError, UpsertLocationError,
76 : };
77 : use crate::tenant::remote_timeline_client::index::GcCompactionState;
78 : use crate::tenant::remote_timeline_client::{
79 : download_index_part, download_tenant_manifest, list_remote_tenant_shards, list_remote_timelines,
80 : };
81 : use crate::tenant::secondary::SecondaryController;
82 : use crate::tenant::size::ModelInputs;
83 : use crate::tenant::storage_layer::ValuesReconstructState;
84 : use crate::tenant::storage_layer::{IoConcurrency, LayerAccessStatsReset, LayerName};
85 : use crate::tenant::timeline::layer_manager::LayerManagerLockHolder;
86 : use crate::tenant::timeline::offload::{OffloadError, offload_timeline};
87 : use crate::tenant::timeline::{
88 : CompactFlags, CompactOptions, CompactRequest, MarkInvisibleRequest, Timeline, WaitLsnTimeout,
89 : WaitLsnWaiter, import_pgdata,
90 : };
91 : use crate::tenant::{
92 : GetTimelineError, LogicalSizeCalculationCause, OffloadedTimeline, PageReconstructError,
93 : remote_timeline_client,
94 : };
95 : use crate::{DEFAULT_PG_VERSION, disk_usage_eviction_task, tenant};
96 :
97 : // For APIs that require an Active tenant, how long should we block waiting for that state?
98 : // This is not functionally necessary (clients will retry), but avoids generating a lot of
99 : // failed API calls while tenants are activating.
100 : #[cfg(not(feature = "testing"))]
101 : pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(5000);
102 :
103 : // Tests run on slow/oversubscribed nodes, and may need to wait much longer for tenants to
104 : // finish attaching, if calls to remote storage are slow.
105 : #[cfg(feature = "testing")]
106 : pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);
107 :
108 : pub struct State {
109 : conf: &'static PageServerConf,
110 : tenant_manager: Arc<TenantManager>,
111 : auth: Option<Arc<SwappableJwtAuth>>,
112 : allowlist_routes: &'static [&'static str],
113 : remote_storage: GenericRemoteStorage,
114 : broker_client: storage_broker::BrokerClientChannel,
115 : disk_usage_eviction_state: Arc<disk_usage_eviction_task::State>,
116 : deletion_queue_client: DeletionQueueClient,
117 : secondary_controller: SecondaryController,
118 : latest_utilization: tokio::sync::Mutex<Option<(std::time::Instant, bytes::Bytes)>>,
119 : feature_resolver: FeatureResolver,
120 : }
121 :
122 : impl State {
123 : #[allow(clippy::too_many_arguments)]
124 0 : pub fn new(
125 0 : conf: &'static PageServerConf,
126 0 : tenant_manager: Arc<TenantManager>,
127 0 : auth: Option<Arc<SwappableJwtAuth>>,
128 0 : remote_storage: GenericRemoteStorage,
129 0 : broker_client: storage_broker::BrokerClientChannel,
130 0 : disk_usage_eviction_state: Arc<disk_usage_eviction_task::State>,
131 0 : deletion_queue_client: DeletionQueueClient,
132 0 : secondary_controller: SecondaryController,
133 0 : feature_resolver: FeatureResolver,
134 0 : ) -> anyhow::Result<Self> {
135 0 : let allowlist_routes = &[
136 0 : "/v1/status",
137 0 : "/v1/doc",
138 0 : "/swagger.yml",
139 0 : "/metrics",
140 0 : "/profile/cpu",
141 0 : "/profile/heap",
142 0 : ];
143 0 : Ok(Self {
144 0 : conf,
145 0 : tenant_manager,
146 0 : auth,
147 0 : allowlist_routes,
148 0 : remote_storage,
149 0 : broker_client,
150 0 : disk_usage_eviction_state,
151 0 : deletion_queue_client,
152 0 : secondary_controller,
153 0 : latest_utilization: Default::default(),
154 0 : feature_resolver,
155 0 : })
156 0 : }
157 : }
158 :
159 : #[inline(always)]
160 0 : fn get_state(request: &Request<Body>) -> &State {
161 0 : request
162 0 : .data::<Arc<State>>()
163 0 : .expect("unknown state type")
164 0 : .as_ref()
165 0 : }
166 :
167 : #[inline(always)]
168 0 : fn get_config(request: &Request<Body>) -> &'static PageServerConf {
169 0 : get_state(request).conf
170 0 : }
171 :
172 : /// Check that the requester is authorized to operate on given tenant
173 0 : fn check_permission(request: &Request<Body>, tenant_id: Option<TenantId>) -> Result<(), ApiError> {
174 0 : check_permission_with(request, |claims| {
175 0 : crate::auth::check_permission(claims, tenant_id)
176 0 : })
177 0 : }
178 :
179 : impl From<PageReconstructError> for ApiError {
180 0 : fn from(pre: PageReconstructError) -> ApiError {
181 0 : match pre {
182 0 : PageReconstructError::Other(other) => ApiError::InternalServerError(other),
183 0 : PageReconstructError::MissingKey(e) => ApiError::InternalServerError(e.into()),
184 0 : PageReconstructError::Cancelled => ApiError::Cancelled,
185 0 : PageReconstructError::AncestorLsnTimeout(e) => ApiError::Timeout(format!("{e}").into()),
186 0 : PageReconstructError::WalRedo(pre) => ApiError::InternalServerError(pre),
187 : }
188 0 : }
189 : }
190 :
191 : impl From<TenantMapInsertError> for ApiError {
192 0 : fn from(tmie: TenantMapInsertError) -> ApiError {
193 0 : match tmie {
194 0 : TenantMapInsertError::SlotError(e) => e.into(),
195 0 : TenantMapInsertError::SlotUpsertError(e) => e.into(),
196 0 : TenantMapInsertError::Other(e) => ApiError::InternalServerError(e),
197 : }
198 0 : }
199 : }
200 :
201 : impl From<TenantSlotError> for ApiError {
202 0 : fn from(e: TenantSlotError) -> ApiError {
203 : use TenantSlotError::*;
204 0 : match e {
205 0 : NotFound(tenant_id) => {
206 0 : ApiError::NotFound(anyhow::anyhow!("NotFound: tenant {tenant_id}").into())
207 : }
208 : InProgress => {
209 0 : ApiError::ResourceUnavailable("Tenant is being modified concurrently".into())
210 : }
211 0 : MapState(e) => e.into(),
212 : }
213 0 : }
214 : }
215 :
216 : impl From<TenantSlotUpsertError> for ApiError {
217 0 : fn from(e: TenantSlotUpsertError) -> ApiError {
218 : use TenantSlotUpsertError::*;
219 0 : match e {
220 0 : InternalError(e) => ApiError::InternalServerError(anyhow::anyhow!("{e}")),
221 0 : MapState(e) => e.into(),
222 0 : ShuttingDown(_) => ApiError::ShuttingDown,
223 : }
224 0 : }
225 : }
226 :
227 : impl From<UpsertLocationError> for ApiError {
228 0 : fn from(e: UpsertLocationError) -> ApiError {
229 : use UpsertLocationError::*;
230 0 : match e {
231 0 : BadRequest(e) => ApiError::BadRequest(e),
232 0 : Unavailable(_) => ApiError::ShuttingDown,
233 0 : e @ InProgress => ApiError::Conflict(format!("{e}")),
234 0 : Flush(e) | InternalError(e) => ApiError::InternalServerError(e),
235 : }
236 0 : }
237 : }
238 :
239 : impl From<TenantMapError> for ApiError {
240 0 : fn from(e: TenantMapError) -> ApiError {
241 : use TenantMapError::*;
242 0 : match e {
243 : StillInitializing | ShuttingDown => {
244 0 : ApiError::ResourceUnavailable(format!("{e}").into())
245 : }
246 : }
247 0 : }
248 : }
249 :
250 : impl From<TenantStateError> for ApiError {
251 0 : fn from(tse: TenantStateError) -> ApiError {
252 0 : match tse {
253 : TenantStateError::IsStopping(_) => {
254 0 : ApiError::ResourceUnavailable("Tenant is stopping".into())
255 : }
256 0 : TenantStateError::SlotError(e) => e.into(),
257 0 : TenantStateError::SlotUpsertError(e) => e.into(),
258 0 : TenantStateError::Other(e) => ApiError::InternalServerError(anyhow!(e)),
259 : }
260 0 : }
261 : }
262 :
263 : impl From<GetTenantError> for ApiError {
264 0 : fn from(tse: GetTenantError) -> ApiError {
265 0 : match tse {
266 0 : GetTenantError::NotFound(tid) => ApiError::NotFound(anyhow!("tenant {tid}").into()),
267 0 : GetTenantError::ShardNotFound(tid) => {
268 0 : ApiError::NotFound(anyhow!("tenant {tid}").into())
269 : }
270 : GetTenantError::NotActive(_) => {
271 : // Why is this not `ApiError::NotFound`?
272 : // Because we must be careful to never return 404 for a tenant if it does
273 : // in fact exist locally. If we did, the caller could draw the conclusion
274 : // that it can attach the tenant to another PS and we'd be in split-brain.
275 0 : ApiError::ResourceUnavailable("Tenant not yet active".into())
276 : }
277 0 : GetTenantError::MapState(e) => ApiError::ResourceUnavailable(format!("{e}").into()),
278 : }
279 0 : }
280 : }
281 :
282 : impl From<GetTimelineError> for ApiError {
283 0 : fn from(gte: GetTimelineError) -> Self {
284 : // Rationale: tenant is activated only after eligble timelines activate
285 0 : ApiError::NotFound(gte.into())
286 0 : }
287 : }
288 :
289 : impl From<GetActiveTenantError> for ApiError {
290 0 : fn from(e: GetActiveTenantError) -> ApiError {
291 0 : match e {
292 0 : GetActiveTenantError::Broken(reason) => {
293 0 : ApiError::InternalServerError(anyhow!("tenant is broken: {}", reason))
294 : }
295 : GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => {
296 0 : ApiError::ShuttingDown
297 : }
298 0 : GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{e}")),
299 0 : GetActiveTenantError::Cancelled => ApiError::ShuttingDown,
300 0 : GetActiveTenantError::NotFound(gte) => gte.into(),
301 : GetActiveTenantError::WaitForActiveTimeout { .. } => {
302 0 : ApiError::ResourceUnavailable(format!("{e}").into())
303 : }
304 : GetActiveTenantError::SwitchedTenant => {
305 : // in our HTTP handlers, this error doesn't happen
306 : // TODO: separate error types
307 0 : ApiError::ResourceUnavailable("switched tenant".into())
308 : }
309 : }
310 0 : }
311 : }
312 :
313 : impl From<crate::tenant::DeleteTimelineError> for ApiError {
314 0 : fn from(value: crate::tenant::DeleteTimelineError) -> Self {
315 : use crate::tenant::DeleteTimelineError::*;
316 0 : match value {
317 0 : NotFound => ApiError::NotFound(anyhow::anyhow!("timeline not found").into()),
318 0 : HasChildren(children) => ApiError::PreconditionFailed(
319 0 : format!("Cannot delete timeline which has child timelines: {children:?}")
320 0 : .into_boxed_str(),
321 0 : ),
322 0 : a @ AlreadyInProgress(_) => ApiError::Conflict(a.to_string()),
323 0 : Cancelled => ApiError::ResourceUnavailable("shutting down".into()),
324 0 : Other(e) => ApiError::InternalServerError(e),
325 : }
326 0 : }
327 : }
328 :
329 : impl From<crate::tenant::TimelineArchivalError> for ApiError {
330 0 : fn from(value: crate::tenant::TimelineArchivalError) -> Self {
331 : use crate::tenant::TimelineArchivalError::*;
332 0 : match value {
333 0 : NotFound => ApiError::NotFound(anyhow::anyhow!("timeline not found").into()),
334 0 : Timeout => ApiError::Timeout("hit pageserver internal timeout".into()),
335 0 : Cancelled => ApiError::ShuttingDown,
336 0 : e @ HasArchivedParent(_) => {
337 0 : ApiError::PreconditionFailed(e.to_string().into_boxed_str())
338 : }
339 0 : HasUnarchivedChildren(children) => ApiError::PreconditionFailed(
340 0 : format!(
341 0 : "Cannot archive timeline which has non-archived child timelines: {children:?}"
342 0 : )
343 0 : .into_boxed_str(),
344 0 : ),
345 0 : a @ AlreadyInProgress => ApiError::Conflict(a.to_string()),
346 0 : Other(e) => ApiError::InternalServerError(e),
347 : }
348 0 : }
349 : }
350 :
351 : impl From<crate::tenant::mgr::DeleteTimelineError> for ApiError {
352 0 : fn from(value: crate::tenant::mgr::DeleteTimelineError) -> Self {
353 : use crate::tenant::mgr::DeleteTimelineError::*;
354 0 : match value {
355 : // Report Precondition failed so client can distinguish between
356 : // "tenant is missing" case from "timeline is missing"
357 0 : Tenant(GetTenantError::NotFound(..)) => ApiError::PreconditionFailed(
358 0 : "Requested tenant is missing".to_owned().into_boxed_str(),
359 0 : ),
360 0 : Tenant(t) => ApiError::from(t),
361 0 : Timeline(t) => ApiError::from(t),
362 : }
363 0 : }
364 : }
365 :
366 : impl From<crate::tenant::mgr::DeleteTenantError> for ApiError {
367 0 : fn from(value: crate::tenant::mgr::DeleteTenantError) -> Self {
368 : use crate::tenant::mgr::DeleteTenantError::*;
369 0 : match value {
370 0 : SlotError(e) => e.into(),
371 0 : Other(o) => ApiError::InternalServerError(o),
372 0 : Cancelled => ApiError::ShuttingDown,
373 : }
374 0 : }
375 : }
376 :
377 : impl From<crate::tenant::secondary::SecondaryTenantError> for ApiError {
378 0 : fn from(ste: crate::tenant::secondary::SecondaryTenantError) -> ApiError {
379 : use crate::tenant::secondary::SecondaryTenantError;
380 0 : match ste {
381 0 : SecondaryTenantError::GetTenant(gte) => gte.into(),
382 0 : SecondaryTenantError::ShuttingDown => ApiError::ShuttingDown,
383 : }
384 0 : }
385 : }
386 :
387 : impl From<crate::tenant::FinalizeTimelineImportError> for ApiError {
388 0 : fn from(err: crate::tenant::FinalizeTimelineImportError) -> ApiError {
389 : use crate::tenant::FinalizeTimelineImportError::*;
390 0 : match err {
391 : ImportTaskStillRunning => {
392 0 : ApiError::ResourceUnavailable("Import task still running".into())
393 : }
394 0 : ShuttingDown => ApiError::ShuttingDown,
395 : }
396 0 : }
397 : }
398 :
399 : // Helper function to construct a TimelineInfo struct for a timeline
400 0 : async fn build_timeline_info(
401 0 : timeline: &Arc<Timeline>,
402 0 : include_non_incremental_logical_size: bool,
403 0 : force_await_initial_logical_size: bool,
404 0 : include_image_consistent_lsn: bool,
405 0 : ctx: &RequestContext,
406 0 : ) -> anyhow::Result<TimelineInfo> {
407 0 : crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id();
408 :
409 0 : if force_await_initial_logical_size {
410 0 : timeline.clone().await_initial_logical_size().await
411 0 : }
412 :
413 0 : let mut info = build_timeline_info_common(
414 0 : timeline,
415 0 : ctx,
416 0 : tenant::timeline::GetLogicalSizePriority::Background,
417 0 : )
418 0 : .await?;
419 0 : if include_non_incremental_logical_size {
420 : // XXX we should be using spawn_ondemand_logical_size_calculation here.
421 : // Otherwise, if someone deletes the timeline / detaches the tenant while
422 : // we're executing this function, we will outlive the timeline on-disk state.
423 : info.current_logical_size_non_incremental = Some(
424 0 : timeline
425 0 : .get_current_logical_size_non_incremental(info.last_record_lsn, ctx)
426 0 : .await?,
427 : );
428 0 : }
429 : // HADRON
430 0 : if include_image_consistent_lsn {
431 0 : info.image_consistent_lsn = Some(timeline.compute_image_consistent_lsn().await?);
432 0 : }
433 0 : Ok(info)
434 0 : }
435 :
436 0 : async fn build_timeline_info_common(
437 0 : timeline: &Arc<Timeline>,
438 0 : ctx: &RequestContext,
439 0 : logical_size_task_priority: tenant::timeline::GetLogicalSizePriority,
440 0 : ) -> anyhow::Result<TimelineInfo> {
441 0 : crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id();
442 0 : let initdb_lsn = timeline.initdb_lsn;
443 0 : let last_record_lsn = timeline.get_last_record_lsn();
444 0 : let (wal_source_connstr, last_received_msg_lsn, last_received_msg_ts) = {
445 0 : let guard = timeline.last_received_wal.lock().unwrap();
446 0 : if let Some(info) = guard.as_ref() {
447 0 : (
448 0 : Some(format!("{}", info.wal_source_connconf)), // Password is hidden, but it's for statistics only.
449 0 : Some(info.last_received_msg_lsn),
450 0 : Some(info.last_received_msg_ts),
451 0 : )
452 : } else {
453 0 : (None, None, None)
454 : }
455 : };
456 :
457 0 : let ancestor_timeline_id = timeline.get_ancestor_timeline_id();
458 0 : let ancestor_lsn = match timeline.get_ancestor_lsn() {
459 0 : Lsn(0) => None,
460 0 : lsn @ Lsn(_) => Some(lsn),
461 : };
462 0 : let current_logical_size = timeline.get_current_logical_size(logical_size_task_priority, ctx);
463 0 : let current_physical_size = Some(timeline.layer_size_sum().await);
464 0 : let state = timeline.current_state();
465 : // Report is_archived = false if the timeline is still loading
466 0 : let is_archived = timeline.is_archived().unwrap_or(false);
467 0 : let remote_consistent_lsn_projected = timeline
468 0 : .get_remote_consistent_lsn_projected()
469 0 : .unwrap_or(Lsn(0));
470 0 : let remote_consistent_lsn_visible = timeline
471 0 : .get_remote_consistent_lsn_visible()
472 0 : .unwrap_or(Lsn(0));
473 0 : let is_invisible = timeline.remote_client.is_invisible().unwrap_or(false);
474 :
475 0 : let walreceiver_status = timeline.walreceiver_status();
476 :
477 0 : let (pitr_history_size, within_ancestor_pitr) = timeline.get_pitr_history_stats();
478 :
479 : // Externally, expose the lowest LSN that can be used to create a branch.
480 : // Internally we distinguish between the planned GC cutoff (PITR point) and the "applied" GC cutoff (where we
481 : // actually trimmed data to), which can pass each other when PITR is changed.
482 0 : let min_readable_lsn = std::cmp::max(
483 0 : timeline.get_gc_cutoff_lsn().unwrap_or_default(),
484 0 : *timeline.get_applied_gc_cutoff_lsn(),
485 : );
486 :
487 0 : let (rel_size_migration, rel_size_migrated_at) = timeline.get_rel_size_v2_status();
488 :
489 0 : let info = TimelineInfo {
490 0 : tenant_id: timeline.tenant_shard_id,
491 0 : timeline_id: timeline.timeline_id,
492 0 : ancestor_timeline_id,
493 0 : ancestor_lsn,
494 0 : disk_consistent_lsn: timeline.get_disk_consistent_lsn(),
495 0 : remote_consistent_lsn: remote_consistent_lsn_projected,
496 0 : remote_consistent_lsn_visible,
497 0 : initdb_lsn,
498 0 : last_record_lsn,
499 0 : prev_record_lsn: Some(timeline.get_prev_record_lsn()),
500 0 : min_readable_lsn,
501 0 : applied_gc_cutoff_lsn: *timeline.get_applied_gc_cutoff_lsn(),
502 0 : current_logical_size: current_logical_size.size_dont_care_about_accuracy(),
503 0 : current_logical_size_is_accurate: match current_logical_size.accuracy() {
504 0 : tenant::timeline::logical_size::Accuracy::Approximate => false,
505 0 : tenant::timeline::logical_size::Accuracy::Exact => true,
506 : },
507 0 : directory_entries_counts: timeline.get_directory_metrics().to_vec(),
508 0 : current_physical_size,
509 0 : current_logical_size_non_incremental: None,
510 0 : pitr_history_size,
511 0 : within_ancestor_pitr,
512 0 : timeline_dir_layer_file_size_sum: None,
513 0 : wal_source_connstr,
514 0 : last_received_msg_lsn,
515 0 : last_received_msg_ts,
516 0 : pg_version: timeline.pg_version,
517 :
518 0 : state,
519 0 : is_archived: Some(is_archived),
520 0 : rel_size_migration: Some(rel_size_migration),
521 0 : rel_size_migrated_at,
522 0 : is_invisible: Some(is_invisible),
523 :
524 0 : walreceiver_status,
525 : // HADRON
526 0 : image_consistent_lsn: None,
527 : };
528 0 : Ok(info)
529 0 : }
530 :
531 0 : fn build_timeline_offloaded_info(offloaded: &Arc<OffloadedTimeline>) -> OffloadedTimelineInfo {
532 : let &OffloadedTimeline {
533 0 : tenant_shard_id,
534 0 : timeline_id,
535 0 : ancestor_retain_lsn,
536 0 : ancestor_timeline_id,
537 0 : archived_at,
538 : ..
539 0 : } = offloaded.as_ref();
540 0 : OffloadedTimelineInfo {
541 0 : tenant_id: tenant_shard_id,
542 0 : timeline_id,
543 0 : ancestor_retain_lsn,
544 0 : ancestor_timeline_id,
545 0 : archived_at: archived_at.and_utc(),
546 0 : }
547 0 : }
548 :
549 : // healthcheck handler
550 0 : async fn status_handler(
551 0 : request: Request<Body>,
552 0 : _cancel: CancellationToken,
553 0 : ) -> Result<Response<Body>, ApiError> {
554 0 : check_permission(&request, None)?;
555 0 : let config = get_config(&request);
556 0 : json_response(StatusCode::OK, StatusResponse { id: config.id })
557 0 : }
558 :
559 0 : async fn reload_auth_validation_keys_handler(
560 0 : request: Request<Body>,
561 0 : _cancel: CancellationToken,
562 0 : ) -> Result<Response<Body>, ApiError> {
563 0 : check_permission(&request, None)?;
564 0 : let config = get_config(&request);
565 0 : let state = get_state(&request);
566 0 : let Some(shared_auth) = &state.auth else {
567 0 : return json_response(StatusCode::BAD_REQUEST, ());
568 : };
569 : // unwrap is ok because check is performed when creating config, so path is set and exists
570 0 : let key_path = config.auth_validation_public_key_path.as_ref().unwrap();
571 0 : info!("Reloading public key(s) for verifying JWT tokens from {key_path:?}");
572 :
573 0 : match utils::auth::JwtAuth::from_key_path(key_path) {
574 0 : Ok(new_auth) => {
575 0 : shared_auth.swap(new_auth);
576 0 : json_response(StatusCode::OK, ())
577 : }
578 0 : Err(e) => {
579 0 : let err_msg = "Error reloading public keys";
580 0 : warn!("Error reloading public keys from {key_path:?}: {e:}");
581 0 : json_response(
582 : StatusCode::INTERNAL_SERVER_ERROR,
583 0 : HttpErrorBody::from_msg(err_msg.to_string()),
584 : )
585 : }
586 : }
587 0 : }
588 :
589 0 : async fn timeline_create_handler(
590 0 : mut request: Request<Body>,
591 0 : _cancel: CancellationToken,
592 0 : ) -> Result<Response<Body>, ApiError> {
593 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
594 0 : let request_data: TimelineCreateRequest = json_request(&mut request).await?;
595 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
596 :
597 0 : let new_timeline_id = request_data.new_timeline_id;
598 : // fill in the default pg_version if not provided & convert request into domain model
599 0 : let params: tenant::CreateTimelineParams = match request_data.mode {
600 : TimelineCreateRequestMode::Bootstrap {
601 0 : existing_initdb_timeline_id,
602 0 : pg_version,
603 0 : } => tenant::CreateTimelineParams::Bootstrap(tenant::CreateTimelineParamsBootstrap {
604 0 : new_timeline_id,
605 0 : existing_initdb_timeline_id,
606 0 : pg_version: pg_version.unwrap_or(DEFAULT_PG_VERSION),
607 0 : }),
608 : TimelineCreateRequestMode::Branch {
609 0 : ancestor_timeline_id,
610 0 : ancestor_start_lsn,
611 : read_only: _,
612 : pg_version: _,
613 0 : } => tenant::CreateTimelineParams::Branch(tenant::CreateTimelineParamsBranch {
614 0 : new_timeline_id,
615 0 : ancestor_timeline_id,
616 0 : ancestor_start_lsn,
617 0 : }),
618 : TimelineCreateRequestMode::ImportPgdata {
619 : import_pgdata:
620 : TimelineCreateRequestModeImportPgdata {
621 0 : location,
622 0 : idempotency_key,
623 : },
624 : } => tenant::CreateTimelineParams::ImportPgdata(tenant::CreateTimelineParamsImportPgdata {
625 0 : idempotency_key: import_pgdata::index_part_format::IdempotencyKey::new(
626 0 : idempotency_key.0,
627 : ),
628 0 : new_timeline_id,
629 : location: {
630 : use import_pgdata::index_part_format::Location;
631 : use pageserver_api::models::ImportPgdataLocation;
632 0 : match location {
633 : #[cfg(feature = "testing")]
634 0 : ImportPgdataLocation::LocalFs { path } => Location::LocalFs { path },
635 : ImportPgdataLocation::AwsS3 {
636 0 : region,
637 0 : bucket,
638 0 : key,
639 0 : } => Location::AwsS3 {
640 0 : region,
641 0 : bucket,
642 0 : key,
643 0 : },
644 : }
645 : },
646 : }),
647 : };
648 :
649 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Error);
650 :
651 0 : let state = get_state(&request);
652 :
653 0 : async {
654 0 : let tenant = state
655 0 : .tenant_manager
656 0 : .get_attached_tenant_shard(tenant_shard_id)?;
657 :
658 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
659 :
660 : // earlier versions of the code had pg_version and ancestor_lsn in the span
661 : // => continue to provide that information, but, through a log message that doesn't require us to destructure
662 0 : tracing::info!(?params, "creating timeline");
663 :
664 0 : match tenant
665 0 : .create_timeline(params, state.broker_client.clone(), &ctx)
666 0 : .await
667 : {
668 0 : Ok(new_timeline) => {
669 : // Created. Construct a TimelineInfo for it.
670 0 : let timeline_info = build_timeline_info_common(
671 0 : &new_timeline,
672 0 : &ctx,
673 0 : tenant::timeline::GetLogicalSizePriority::User,
674 0 : )
675 0 : .await
676 0 : .map_err(ApiError::InternalServerError)?;
677 0 : json_response(StatusCode::CREATED, timeline_info)
678 : }
679 0 : Err(_) if tenant.cancel.is_cancelled() => {
680 : // In case we get some ugly error type during shutdown, cast it into a clean 503.
681 0 : json_response(
682 : StatusCode::SERVICE_UNAVAILABLE,
683 0 : HttpErrorBody::from_msg("Tenant shutting down".to_string()),
684 : )
685 : }
686 0 : Err(e @ tenant::CreateTimelineError::Conflict) => {
687 0 : json_response(StatusCode::CONFLICT, HttpErrorBody::from_msg(e.to_string()))
688 : }
689 0 : Err(e @ tenant::CreateTimelineError::AlreadyCreating) => json_response(
690 : StatusCode::TOO_MANY_REQUESTS,
691 0 : HttpErrorBody::from_msg(e.to_string()),
692 : ),
693 0 : Err(tenant::CreateTimelineError::AncestorLsn(err)) => json_response(
694 : StatusCode::NOT_ACCEPTABLE,
695 0 : HttpErrorBody::from_msg(format!("{err:#}")),
696 : ),
697 0 : Err(e @ tenant::CreateTimelineError::AncestorNotActive) => json_response(
698 : StatusCode::SERVICE_UNAVAILABLE,
699 0 : HttpErrorBody::from_msg(e.to_string()),
700 : ),
701 0 : Err(e @ tenant::CreateTimelineError::AncestorArchived) => json_response(
702 : StatusCode::NOT_ACCEPTABLE,
703 0 : HttpErrorBody::from_msg(e.to_string()),
704 : ),
705 0 : Err(tenant::CreateTimelineError::ShuttingDown) => json_response(
706 : StatusCode::SERVICE_UNAVAILABLE,
707 0 : HttpErrorBody::from_msg("tenant shutting down".to_string()),
708 : ),
709 0 : Err(tenant::CreateTimelineError::Other(err)) => Err(ApiError::InternalServerError(err)),
710 : }
711 0 : }
712 0 : .instrument(info_span!("timeline_create",
713 : tenant_id = %tenant_shard_id.tenant_id,
714 0 : shard_id = %tenant_shard_id.shard_slug(),
715 : timeline_id = %new_timeline_id,
716 : ))
717 0 : .await
718 0 : }
719 :
720 0 : async fn timeline_list_handler(
721 0 : request: Request<Body>,
722 0 : _cancel: CancellationToken,
723 0 : ) -> Result<Response<Body>, ApiError> {
724 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
725 0 : let include_non_incremental_logical_size: Option<bool> =
726 0 : parse_query_param(&request, "include-non-incremental-logical-size")?;
727 0 : let force_await_initial_logical_size: Option<bool> =
728 0 : parse_query_param(&request, "force-await-initial-logical-size")?;
729 0 : let include_image_consistent_lsn: Option<bool> =
730 0 : parse_query_param(&request, "include-image-consistent-lsn")?;
731 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
732 :
733 0 : let state = get_state(&request);
734 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
735 :
736 0 : let response_data = async {
737 0 : let tenant = state
738 0 : .tenant_manager
739 0 : .get_attached_tenant_shard(tenant_shard_id)?;
740 :
741 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
742 :
743 0 : let timelines = tenant.list_timelines();
744 :
745 0 : let mut response_data = Vec::with_capacity(timelines.len());
746 0 : for timeline in timelines {
747 0 : let timeline_info = build_timeline_info(
748 0 : &timeline,
749 0 : include_non_incremental_logical_size.unwrap_or(false),
750 0 : force_await_initial_logical_size.unwrap_or(false),
751 0 : include_image_consistent_lsn.unwrap_or(false),
752 0 : &ctx,
753 : )
754 0 : .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id))
755 0 : .await
756 0 : .context("Failed to build timeline info")
757 0 : .map_err(ApiError::InternalServerError)?;
758 :
759 0 : response_data.push(timeline_info);
760 : }
761 0 : Ok::<Vec<TimelineInfo>, ApiError>(response_data)
762 0 : }
763 0 : .instrument(info_span!("timeline_list",
764 : tenant_id = %tenant_shard_id.tenant_id,
765 0 : shard_id = %tenant_shard_id.shard_slug()))
766 0 : .await?;
767 :
768 0 : json_response(StatusCode::OK, response_data)
769 0 : }
770 :
771 0 : async fn timeline_and_offloaded_list_handler(
772 0 : request: Request<Body>,
773 0 : _cancel: CancellationToken,
774 0 : ) -> Result<Response<Body>, ApiError> {
775 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
776 0 : let include_non_incremental_logical_size: Option<bool> =
777 0 : parse_query_param(&request, "include-non-incremental-logical-size")?;
778 0 : let force_await_initial_logical_size: Option<bool> =
779 0 : parse_query_param(&request, "force-await-initial-logical-size")?;
780 0 : let include_image_consistent_lsn: Option<bool> =
781 0 : parse_query_param(&request, "include-image-consistent-lsn")?;
782 :
783 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
784 :
785 0 : let state = get_state(&request);
786 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
787 :
788 0 : let response_data = async {
789 0 : let tenant = state
790 0 : .tenant_manager
791 0 : .get_attached_tenant_shard(tenant_shard_id)?;
792 :
793 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
794 :
795 0 : let (timelines, offloadeds) = tenant.list_timelines_and_offloaded();
796 :
797 0 : let mut timeline_infos = Vec::with_capacity(timelines.len());
798 0 : for timeline in timelines {
799 0 : let timeline_info = build_timeline_info(
800 0 : &timeline,
801 0 : include_non_incremental_logical_size.unwrap_or(false),
802 0 : force_await_initial_logical_size.unwrap_or(false),
803 0 : include_image_consistent_lsn.unwrap_or(false),
804 0 : &ctx,
805 : )
806 0 : .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id))
807 0 : .await
808 0 : .context("Failed to build timeline info")
809 0 : .map_err(ApiError::InternalServerError)?;
810 :
811 0 : timeline_infos.push(timeline_info);
812 : }
813 0 : let offloaded_infos = offloadeds
814 0 : .into_iter()
815 0 : .map(|offloaded| build_timeline_offloaded_info(&offloaded))
816 0 : .collect::<Vec<_>>();
817 0 : let res = TimelinesInfoAndOffloaded {
818 0 : timelines: timeline_infos,
819 0 : offloaded: offloaded_infos,
820 0 : };
821 0 : Ok::<TimelinesInfoAndOffloaded, ApiError>(res)
822 0 : }
823 0 : .instrument(info_span!("timeline_and_offloaded_list",
824 : tenant_id = %tenant_shard_id.tenant_id,
825 0 : shard_id = %tenant_shard_id.shard_slug()))
826 0 : .await?;
827 :
828 0 : json_response(StatusCode::OK, response_data)
829 0 : }
830 :
831 0 : async fn timeline_preserve_initdb_handler(
832 0 : request: Request<Body>,
833 0 : _cancel: CancellationToken,
834 0 : ) -> Result<Response<Body>, ApiError> {
835 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
836 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
837 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
838 0 : let state = get_state(&request);
839 :
840 : // Part of the process for disaster recovery from safekeeper-stored WAL:
841 : // If we don't recover into a new timeline but want to keep the timeline ID,
842 : // then the initdb archive is deleted. This endpoint copies it to a different
843 : // location where timeline recreation cand find it.
844 :
845 0 : async {
846 0 : let tenant = state
847 0 : .tenant_manager
848 0 : .get_attached_tenant_shard(tenant_shard_id)?;
849 :
850 0 : let timeline = tenant.get_timeline(timeline_id, false)?;
851 :
852 0 : timeline
853 0 : .preserve_initdb_archive()
854 0 : .await
855 0 : .context("preserving initdb archive")
856 0 : .map_err(ApiError::InternalServerError)?;
857 :
858 0 : Ok::<_, ApiError>(())
859 0 : }
860 0 : .instrument(info_span!("timeline_preserve_initdb_archive",
861 : tenant_id = %tenant_shard_id.tenant_id,
862 0 : shard_id = %tenant_shard_id.shard_slug(),
863 : %timeline_id))
864 0 : .await?;
865 :
866 0 : json_response(StatusCode::OK, ())
867 0 : }
868 :
869 0 : async fn timeline_archival_config_handler(
870 0 : mut request: Request<Body>,
871 0 : _cancel: CancellationToken,
872 0 : ) -> Result<Response<Body>, ApiError> {
873 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
874 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
875 :
876 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
877 :
878 0 : let request_data: TimelineArchivalConfigRequest = json_request(&mut request).await?;
879 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
880 0 : let state = get_state(&request);
881 :
882 0 : async {
883 0 : let tenant = state
884 0 : .tenant_manager
885 0 : .get_attached_tenant_shard(tenant_shard_id)?;
886 :
887 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
888 :
889 0 : tenant
890 0 : .apply_timeline_archival_config(
891 0 : timeline_id,
892 0 : request_data.state,
893 0 : state.broker_client.clone(),
894 0 : ctx,
895 0 : )
896 0 : .await?;
897 0 : Ok::<_, ApiError>(())
898 0 : }
899 0 : .instrument(info_span!("timeline_archival_config",
900 : tenant_id = %tenant_shard_id.tenant_id,
901 0 : shard_id = %tenant_shard_id.shard_slug(),
902 : state = ?request_data.state,
903 : %timeline_id))
904 0 : .await?;
905 :
906 0 : json_response(StatusCode::OK, ())
907 0 : }
908 :
909 : /// This API is used to patch the index part of a timeline. You must ensure such patches are safe to apply. Use this API as an emergency
910 : /// measure only.
911 : ///
912 : /// Some examples of safe patches:
913 : /// - Increase the gc_cutoff and gc_compaction_cutoff to a larger value in case of a bug that didn't bump the cutoff and cause read errors.
914 : /// - Force set the index part to use reldir v2 (migrating/migrated).
915 : ///
916 : /// Some examples of unsafe patches:
917 : /// - Force set the index part from v2 to v1 (legacy). This will cause the code path to ignore anything written to the new keyspace and cause
918 : /// errors.
919 : /// - Decrease the gc_cutoff without validating the data really exists. It will cause read errors in the background.
920 0 : async fn timeline_patch_index_part_handler(
921 0 : mut request: Request<Body>,
922 0 : _cancel: CancellationToken,
923 0 : ) -> Result<Response<Body>, ApiError> {
924 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
925 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
926 :
927 0 : let request_data: TimelinePatchIndexPartRequest = json_request(&mut request).await?;
928 0 : check_permission(&request, None)?; // require global permission for this request
929 0 : let state = get_state(&request);
930 :
931 0 : async {
932 0 : let timeline =
933 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
934 0 : .await?;
935 :
936 0 : if request_data.rel_size_migration.is_none() && request_data.rel_size_migrated_at.is_some()
937 : {
938 0 : return Err(ApiError::BadRequest(anyhow!(
939 0 : "updating rel_size_migrated_at without rel_size_migration is not allowed"
940 0 : )));
941 0 : }
942 :
943 0 : if let Some(rel_size_migration) = request_data.rel_size_migration {
944 0 : timeline
945 0 : .update_rel_size_v2_status(rel_size_migration, request_data.rel_size_migrated_at)
946 0 : .map_err(ApiError::InternalServerError)?;
947 0 : }
948 :
949 0 : if let Some(gc_compaction_last_completed_lsn) =
950 0 : request_data.gc_compaction_last_completed_lsn
951 : {
952 0 : timeline
953 0 : .update_gc_compaction_state(GcCompactionState {
954 0 : last_completed_lsn: gc_compaction_last_completed_lsn,
955 0 : })
956 0 : .map_err(ApiError::InternalServerError)?;
957 0 : }
958 :
959 0 : if let Some(applied_gc_cutoff_lsn) = request_data.applied_gc_cutoff_lsn {
960 0 : {
961 0 : let guard = timeline.applied_gc_cutoff_lsn.lock_for_write();
962 0 : guard.store_and_unlock(applied_gc_cutoff_lsn);
963 0 : }
964 0 : }
965 :
966 0 : if request_data.force_index_update {
967 0 : timeline
968 0 : .remote_client
969 0 : .force_schedule_index_upload()
970 0 : .context("force schedule index upload")
971 0 : .map_err(ApiError::InternalServerError)?;
972 0 : }
973 :
974 0 : Ok::<_, ApiError>(())
975 0 : }
976 0 : .instrument(info_span!("timeline_patch_index_part",
977 : tenant_id = %tenant_shard_id.tenant_id,
978 0 : shard_id = %tenant_shard_id.shard_slug(),
979 : %timeline_id))
980 0 : .await?;
981 :
982 0 : json_response(StatusCode::OK, ())
983 0 : }
984 :
985 0 : async fn timeline_detail_handler(
986 0 : request: Request<Body>,
987 0 : _cancel: CancellationToken,
988 0 : ) -> Result<Response<Body>, ApiError> {
989 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
990 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
991 0 : let include_non_incremental_logical_size: Option<bool> =
992 0 : parse_query_param(&request, "include-non-incremental-logical-size")?;
993 0 : let force_await_initial_logical_size: Option<bool> =
994 0 : parse_query_param(&request, "force-await-initial-logical-size")?;
995 : // HADRON
996 0 : let include_image_consistent_lsn: Option<bool> =
997 0 : parse_query_param(&request, "include-image-consistent-lsn")?;
998 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
999 :
1000 : // Logical size calculation needs downloading.
1001 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
1002 0 : let state = get_state(&request);
1003 :
1004 0 : let timeline_info = async {
1005 0 : let tenant = state
1006 0 : .tenant_manager
1007 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1008 :
1009 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1010 :
1011 0 : let timeline = tenant.get_timeline(timeline_id, false)?;
1012 0 : let ctx = &ctx.with_scope_timeline(&timeline);
1013 :
1014 0 : let timeline_info = build_timeline_info(
1015 0 : &timeline,
1016 0 : include_non_incremental_logical_size.unwrap_or(false),
1017 0 : force_await_initial_logical_size.unwrap_or(false),
1018 0 : include_image_consistent_lsn.unwrap_or(false),
1019 0 : ctx,
1020 0 : )
1021 0 : .await
1022 0 : .context("get local timeline info")
1023 0 : .map_err(ApiError::InternalServerError)?;
1024 :
1025 0 : Ok::<_, ApiError>(timeline_info)
1026 0 : }
1027 0 : .instrument(info_span!("timeline_detail",
1028 : tenant_id = %tenant_shard_id.tenant_id,
1029 0 : shard_id = %tenant_shard_id.shard_slug(),
1030 : %timeline_id))
1031 0 : .await?;
1032 :
1033 0 : json_response(StatusCode::OK, timeline_info)
1034 0 : }
1035 :
1036 0 : async fn get_lsn_by_timestamp_handler(
1037 0 : request: Request<Body>,
1038 0 : cancel: CancellationToken,
1039 0 : ) -> Result<Response<Body>, ApiError> {
1040 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1041 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1042 0 : let state = get_state(&request);
1043 :
1044 0 : if !tenant_shard_id.is_shard_zero() {
1045 : // Requires SLRU contents, which are only stored on shard zero
1046 0 : return Err(ApiError::BadRequest(anyhow!(
1047 0 : "Lsn calculations by timestamp are only available on shard zero"
1048 0 : )));
1049 0 : }
1050 :
1051 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1052 0 : let timestamp_raw = must_get_query_param(&request, "timestamp")?;
1053 0 : let timestamp = humantime::parse_rfc3339(×tamp_raw)
1054 0 : .with_context(|| format!("Invalid time: {timestamp_raw:?}"))
1055 0 : .map_err(ApiError::BadRequest)?;
1056 0 : let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp);
1057 :
1058 0 : let with_lease = parse_query_param(&request, "with_lease")?.unwrap_or(false);
1059 :
1060 0 : let timeline =
1061 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1062 0 : .await?;
1063 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1064 0 : .with_scope_timeline(&timeline);
1065 0 : let result = timeline
1066 0 : .find_lsn_for_timestamp(timestamp_pg, &cancel, &ctx)
1067 0 : .await?;
1068 :
1069 : #[derive(serde::Serialize, Debug)]
1070 : struct Result {
1071 : lsn: Lsn,
1072 : kind: &'static str,
1073 : #[serde(default)]
1074 : #[serde(skip_serializing_if = "Option::is_none")]
1075 : #[serde(flatten)]
1076 : lease: Option<LsnLease>,
1077 : }
1078 0 : let (lsn, kind) = match result {
1079 0 : LsnForTimestamp::Present(lsn) => (lsn, "present"),
1080 0 : LsnForTimestamp::Future(lsn) => (lsn, "future"),
1081 0 : LsnForTimestamp::Past(lsn) => (lsn, "past"),
1082 0 : LsnForTimestamp::NoData(lsn) => (lsn, "nodata"),
1083 : };
1084 :
1085 0 : let lease = if with_lease {
1086 0 : timeline
1087 0 : .init_lsn_lease(lsn, timeline.get_lsn_lease_length_for_ts(), &ctx)
1088 0 : .inspect_err(|_| {
1089 0 : warn!("fail to grant a lease to {}", lsn);
1090 0 : })
1091 0 : .ok()
1092 : } else {
1093 0 : None
1094 : };
1095 :
1096 0 : let result = Result { lsn, kind, lease };
1097 0 : let valid_until = result
1098 0 : .lease
1099 0 : .as_ref()
1100 0 : .map(|l| humantime::format_rfc3339_millis(l.valid_until).to_string());
1101 0 : tracing::info!(
1102 : lsn=?result.lsn,
1103 : kind=%result.kind,
1104 : timestamp=%timestamp_raw,
1105 : valid_until=?valid_until,
1106 0 : "lsn_by_timestamp finished"
1107 : );
1108 0 : json_response(StatusCode::OK, result)
1109 0 : }
1110 :
1111 0 : async fn get_timestamp_of_lsn_handler(
1112 0 : request: Request<Body>,
1113 0 : _cancel: CancellationToken,
1114 0 : ) -> Result<Response<Body>, ApiError> {
1115 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1116 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1117 0 : let state = get_state(&request);
1118 :
1119 0 : if !tenant_shard_id.is_shard_zero() {
1120 : // Requires SLRU contents, which are only stored on shard zero
1121 0 : return Err(ApiError::BadRequest(anyhow!(
1122 0 : "Timestamp calculations by lsn are only available on shard zero"
1123 0 : )));
1124 0 : }
1125 :
1126 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1127 :
1128 0 : let lsn_str = must_get_query_param(&request, "lsn")?;
1129 0 : let lsn = Lsn::from_str(&lsn_str)
1130 0 : .with_context(|| format!("Invalid LSN: {lsn_str:?}"))
1131 0 : .map_err(ApiError::BadRequest)?;
1132 :
1133 0 : let timeline =
1134 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1135 0 : .await?;
1136 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1137 0 : .with_scope_timeline(&timeline);
1138 0 : let result = timeline.get_timestamp_for_lsn(lsn, &ctx).await?;
1139 :
1140 0 : match result {
1141 0 : Some(time) => {
1142 0 : let time = format_rfc3339(
1143 0 : postgres_ffi::try_from_pg_timestamp(time).map_err(ApiError::InternalServerError)?,
1144 : )
1145 0 : .to_string();
1146 0 : json_response(StatusCode::OK, time)
1147 : }
1148 0 : None => Err(ApiError::PreconditionFailed(
1149 0 : format!("Timestamp for lsn {lsn} not found").into(),
1150 0 : )),
1151 : }
1152 0 : }
1153 :
1154 0 : async fn timeline_delete_handler(
1155 0 : request: Request<Body>,
1156 0 : _cancel: CancellationToken,
1157 0 : ) -> Result<Response<Body>, ApiError> {
1158 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1159 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1160 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1161 :
1162 0 : let state = get_state(&request);
1163 :
1164 0 : let tenant = state
1165 0 : .tenant_manager
1166 0 : .get_attached_tenant_shard(tenant_shard_id)
1167 0 : .map_err(|e| {
1168 0 : match e {
1169 : // GetTenantError has a built-in conversion to ApiError, but in this context we don't
1170 : // want to treat missing tenants as 404, to avoid ambiguity with successful deletions.
1171 : GetTenantError::NotFound(_) | GetTenantError::ShardNotFound(_) => {
1172 0 : ApiError::PreconditionFailed(
1173 0 : "Requested tenant is missing".to_string().into_boxed_str(),
1174 0 : )
1175 : }
1176 0 : e => e.into(),
1177 : }
1178 0 : })?;
1179 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1180 0 : tenant.delete_timeline(timeline_id).instrument(info_span!("timeline_delete", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id))
1181 0 : .await?;
1182 :
1183 0 : json_response(StatusCode::ACCEPTED, ())
1184 0 : }
1185 :
1186 0 : async fn tenant_reset_handler(
1187 0 : request: Request<Body>,
1188 0 : _cancel: CancellationToken,
1189 0 : ) -> Result<Response<Body>, ApiError> {
1190 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1191 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1192 :
1193 0 : let drop_cache: Option<bool> = parse_query_param(&request, "drop_cache")?;
1194 :
1195 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
1196 0 : let state = get_state(&request);
1197 0 : state
1198 0 : .tenant_manager
1199 0 : .reset_tenant(tenant_shard_id, drop_cache.unwrap_or(false), &ctx)
1200 0 : .await
1201 0 : .map_err(ApiError::InternalServerError)?;
1202 :
1203 0 : json_response(StatusCode::OK, ())
1204 0 : }
1205 :
1206 0 : async fn tenant_list_handler(
1207 0 : request: Request<Body>,
1208 0 : _cancel: CancellationToken,
1209 0 : ) -> Result<Response<Body>, ApiError> {
1210 0 : check_permission(&request, None)?;
1211 0 : let state = get_state(&request);
1212 :
1213 0 : let response_data = state
1214 0 : .tenant_manager
1215 0 : .list_tenants()
1216 0 : .map_err(|_| {
1217 0 : ApiError::ResourceUnavailable("Tenant map is initializing or shutting down".into())
1218 0 : })?
1219 0 : .iter()
1220 0 : .map(|(id, state, gen_)| TenantInfo {
1221 0 : id: *id,
1222 0 : state: state.clone(),
1223 0 : current_physical_size: None,
1224 0 : attachment_status: state.attachment_status(),
1225 0 : generation: (*gen_)
1226 0 : .into()
1227 0 : .expect("Tenants are always attached with a generation"),
1228 0 : gc_blocking: None,
1229 0 : })
1230 0 : .collect::<Vec<TenantInfo>>();
1231 :
1232 0 : json_response(StatusCode::OK, response_data)
1233 0 : }
1234 :
1235 0 : async fn tenant_status(
1236 0 : request: Request<Body>,
1237 0 : _cancel: CancellationToken,
1238 0 : ) -> Result<Response<Body>, ApiError> {
1239 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1240 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1241 0 : let state = get_state(&request);
1242 :
1243 : // In tests, sometimes we want to query the state of a tenant without auto-activating it if it's currently waiting.
1244 0 : let activate = true;
1245 : #[cfg(feature = "testing")]
1246 0 : let activate = parse_query_param(&request, "activate")?.unwrap_or(activate);
1247 :
1248 0 : let tenant_info = async {
1249 0 : let tenant = state
1250 0 : .tenant_manager
1251 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1252 :
1253 0 : if activate {
1254 : // This is advisory: we prefer to let the tenant activate on-demand when this function is
1255 : // called, but it is still valid to return 200 and describe the current state of the tenant
1256 : // if it doesn't make it into an active state.
1257 0 : tenant
1258 0 : .wait_to_become_active(ACTIVE_TENANT_TIMEOUT)
1259 0 : .await
1260 0 : .ok();
1261 0 : }
1262 :
1263 : // Calculate total physical size of all timelines
1264 0 : let mut current_physical_size = 0;
1265 0 : for timeline in tenant.list_timelines().iter() {
1266 0 : current_physical_size += timeline.layer_size_sum().await;
1267 : }
1268 :
1269 0 : let state = tenant.current_state();
1270 : Result::<_, ApiError>::Ok(TenantDetails {
1271 : tenant_info: TenantInfo {
1272 0 : id: tenant_shard_id,
1273 0 : state: state.clone(),
1274 0 : current_physical_size: Some(current_physical_size),
1275 0 : attachment_status: state.attachment_status(),
1276 0 : generation: tenant
1277 0 : .generation()
1278 0 : .into()
1279 0 : .expect("Tenants are always attached with a generation"),
1280 0 : gc_blocking: tenant.gc_block.summary().map(|x| format!("{x:?}")),
1281 : },
1282 0 : walredo: tenant.wal_redo_manager_status(),
1283 0 : timelines: tenant.list_timeline_ids(),
1284 : })
1285 0 : }
1286 0 : .instrument(info_span!("tenant_status_handler",
1287 : tenant_id = %tenant_shard_id.tenant_id,
1288 0 : shard_id = %tenant_shard_id.shard_slug()))
1289 0 : .await?;
1290 :
1291 0 : json_response(StatusCode::OK, tenant_info)
1292 0 : }
1293 :
1294 0 : async fn tenant_delete_handler(
1295 0 : request: Request<Body>,
1296 0 : _cancel: CancellationToken,
1297 0 : ) -> Result<Response<Body>, ApiError> {
1298 : // TODO openapi spec
1299 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1300 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1301 :
1302 0 : let state = get_state(&request);
1303 :
1304 0 : state
1305 0 : .tenant_manager
1306 0 : .delete_tenant(tenant_shard_id)
1307 0 : .instrument(info_span!("tenant_delete_handler",
1308 : tenant_id = %tenant_shard_id.tenant_id,
1309 0 : shard_id = %tenant_shard_id.shard_slug()
1310 : ))
1311 0 : .await?;
1312 :
1313 0 : json_response(StatusCode::OK, ())
1314 0 : }
1315 :
1316 : /// HTTP endpoint to query the current tenant_size of a tenant.
1317 : ///
1318 : /// This is not used by consumption metrics under [`crate::consumption_metrics`], but can be used
1319 : /// to debug any of the calculations. Requires `tenant_id` request parameter, supports
1320 : /// `inputs_only=true|false` (default false) which supports debugging failure to calculate model
1321 : /// values.
1322 : ///
1323 : /// 'retention_period' query parameter overrides the cutoff that is used to calculate the size
1324 : /// (only if it is shorter than the real cutoff).
1325 : ///
1326 : /// Note: we don't update the cached size and prometheus metric here.
1327 : /// The retention period might be different, and it's nice to have a method to just calculate it
1328 : /// without modifying anything anyway.
1329 0 : async fn tenant_size_handler(
1330 0 : request: Request<Body>,
1331 0 : cancel: CancellationToken,
1332 0 : ) -> Result<Response<Body>, ApiError> {
1333 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1334 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1335 0 : let inputs_only: Option<bool> = parse_query_param(&request, "inputs_only")?;
1336 0 : let retention_period: Option<u64> = parse_query_param(&request, "retention_period")?;
1337 0 : let headers = request.headers();
1338 0 : let state = get_state(&request);
1339 :
1340 0 : if !tenant_shard_id.is_shard_zero() {
1341 0 : return Err(ApiError::BadRequest(anyhow!(
1342 0 : "Size calculations are only available on shard zero"
1343 0 : )));
1344 0 : }
1345 :
1346 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
1347 0 : let tenant = state
1348 0 : .tenant_manager
1349 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1350 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1351 :
1352 : // this can be long operation
1353 0 : let inputs = tenant
1354 0 : .gather_size_inputs(
1355 0 : retention_period,
1356 0 : LogicalSizeCalculationCause::TenantSizeHandler,
1357 0 : &cancel,
1358 0 : &ctx,
1359 0 : )
1360 0 : .await
1361 0 : .map_err(|e| match e {
1362 0 : crate::tenant::size::CalculateSyntheticSizeError::Cancelled => ApiError::ShuttingDown,
1363 0 : other => ApiError::InternalServerError(anyhow::anyhow!(other)),
1364 0 : })?;
1365 :
1366 0 : let mut sizes = None;
1367 0 : let accepts_html = headers
1368 0 : .get(header::ACCEPT)
1369 0 : .map(|v| v == "text/html")
1370 0 : .unwrap_or_default();
1371 0 : if !inputs_only.unwrap_or(false) {
1372 0 : let storage_model = inputs.calculate_model();
1373 0 : let size = storage_model.calculate();
1374 :
1375 : // If request header expects html, return html
1376 0 : if accepts_html {
1377 0 : return synthetic_size_html_response(inputs, storage_model, size);
1378 0 : }
1379 0 : sizes = Some(size);
1380 0 : } else if accepts_html {
1381 0 : return Err(ApiError::BadRequest(anyhow!(
1382 0 : "inputs_only parameter is incompatible with html output request"
1383 0 : )));
1384 0 : }
1385 :
1386 : /// The type resides in the pageserver not to expose `ModelInputs`.
1387 : #[derive(serde::Serialize)]
1388 : struct TenantHistorySize {
1389 : id: TenantId,
1390 : /// Size is a mixture of WAL and logical size, so the unit is bytes.
1391 : ///
1392 : /// Will be none if `?inputs_only=true` was given.
1393 : size: Option<u64>,
1394 : /// Size of each segment used in the model.
1395 : /// Will be null if `?inputs_only=true` was given.
1396 : segment_sizes: Option<Vec<tenant_size_model::SegmentSizeResult>>,
1397 : inputs: crate::tenant::size::ModelInputs,
1398 : }
1399 :
1400 0 : json_response(
1401 : StatusCode::OK,
1402 : TenantHistorySize {
1403 0 : id: tenant_shard_id.tenant_id,
1404 0 : size: sizes.as_ref().map(|x| x.total_size),
1405 0 : segment_sizes: sizes.map(|x| x.segments),
1406 0 : inputs,
1407 : },
1408 : )
1409 0 : }
1410 :
1411 0 : async fn tenant_shard_split_handler(
1412 0 : mut request: Request<Body>,
1413 0 : _cancel: CancellationToken,
1414 0 : ) -> Result<Response<Body>, ApiError> {
1415 0 : let req: TenantShardSplitRequest = json_request(&mut request).await?;
1416 :
1417 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1418 0 : let state = get_state(&request);
1419 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
1420 :
1421 0 : let tenant = state
1422 0 : .tenant_manager
1423 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1424 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1425 :
1426 0 : let new_shards = state
1427 0 : .tenant_manager
1428 0 : .shard_split(
1429 0 : tenant,
1430 0 : ShardCount::new(req.new_shard_count),
1431 0 : req.new_stripe_size,
1432 0 : &ctx,
1433 0 : )
1434 0 : .await
1435 0 : .map_err(ApiError::InternalServerError)?;
1436 :
1437 0 : json_response(StatusCode::OK, TenantShardSplitResponse { new_shards })
1438 0 : }
1439 :
1440 0 : async fn layer_map_info_handler(
1441 0 : request: Request<Body>,
1442 0 : _cancel: CancellationToken,
1443 0 : ) -> Result<Response<Body>, ApiError> {
1444 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1445 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1446 0 : let reset: LayerAccessStatsReset =
1447 0 : parse_query_param(&request, "reset")?.unwrap_or(LayerAccessStatsReset::NoReset);
1448 0 : let state = get_state(&request);
1449 :
1450 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1451 :
1452 0 : let timeline =
1453 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1454 0 : .await?;
1455 0 : let layer_map_info = timeline
1456 0 : .layer_map_info(reset)
1457 0 : .await
1458 0 : .map_err(|_shutdown| ApiError::ShuttingDown)?;
1459 :
1460 0 : json_response(StatusCode::OK, layer_map_info)
1461 0 : }
1462 :
1463 : #[instrument(skip_all, fields(tenant_id, shard_id, timeline_id, layer_name))]
1464 : async fn timeline_layer_scan_disposable_keys(
1465 : request: Request<Body>,
1466 : cancel: CancellationToken,
1467 : ) -> Result<Response<Body>, ApiError> {
1468 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1469 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1470 : let layer_name: LayerName = parse_request_param(&request, "layer_name")?;
1471 :
1472 : tracing::Span::current().record(
1473 : "tenant_id",
1474 : tracing::field::display(&tenant_shard_id.tenant_id),
1475 : );
1476 : tracing::Span::current().record(
1477 : "shard_id",
1478 : tracing::field::display(tenant_shard_id.shard_slug()),
1479 : );
1480 : tracing::Span::current().record("timeline_id", tracing::field::display(&timeline_id));
1481 : tracing::Span::current().record("layer_name", tracing::field::display(&layer_name));
1482 :
1483 : let state = get_state(&request);
1484 :
1485 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1486 :
1487 : // technically the timeline need not be active for this scan to complete
1488 : let timeline =
1489 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1490 : .await?;
1491 :
1492 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1493 : .with_scope_timeline(&timeline);
1494 :
1495 : let guard = timeline
1496 : .layers
1497 : .read(LayerManagerLockHolder::GetLayerMapInfo)
1498 : .await;
1499 : let Some(layer) = guard.try_get_from_key(&layer_name.clone().into()) else {
1500 : return Err(ApiError::NotFound(
1501 : anyhow::anyhow!("Layer {tenant_shard_id}/{timeline_id}/{layer_name} not found").into(),
1502 : ));
1503 : };
1504 :
1505 : let resident_layer = layer
1506 : .download_and_keep_resident(&ctx)
1507 : .await
1508 0 : .map_err(|err| match err {
1509 : tenant::storage_layer::layer::DownloadError::TimelineShutdown
1510 : | tenant::storage_layer::layer::DownloadError::DownloadCancelled => {
1511 0 : ApiError::ShuttingDown
1512 : }
1513 : tenant::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
1514 : | tenant::storage_layer::layer::DownloadError::DownloadRequired
1515 : | tenant::storage_layer::layer::DownloadError::NotFile(_)
1516 : | tenant::storage_layer::layer::DownloadError::DownloadFailed
1517 : | tenant::storage_layer::layer::DownloadError::PreStatFailed(_) => {
1518 0 : ApiError::InternalServerError(err.into())
1519 : }
1520 : #[cfg(test)]
1521 : tenant::storage_layer::layer::DownloadError::Failpoint(_) => {
1522 0 : ApiError::InternalServerError(err.into())
1523 : }
1524 0 : })?;
1525 :
1526 : let keys = resident_layer
1527 : .load_keys(&ctx)
1528 : .await
1529 : .map_err(ApiError::InternalServerError)?;
1530 :
1531 : let shard_identity = timeline.get_shard_identity();
1532 :
1533 : let mut disposable_count = 0;
1534 : let mut not_disposable_count = 0;
1535 : let cancel = cancel.clone();
1536 : for (i, key) in keys.into_iter().enumerate() {
1537 : if shard_identity.is_key_disposable(&key) {
1538 : disposable_count += 1;
1539 : tracing::debug!(key = %key, key.dbg=?key, "disposable key");
1540 : } else {
1541 : not_disposable_count += 1;
1542 : }
1543 : #[allow(clippy::collapsible_if)]
1544 : if i % 10000 == 0 {
1545 : if cancel.is_cancelled() || timeline.cancel.is_cancelled() || timeline.is_stopping() {
1546 : return Err(ApiError::ShuttingDown);
1547 : }
1548 : }
1549 : }
1550 :
1551 : json_response(
1552 : StatusCode::OK,
1553 : pageserver_api::models::ScanDisposableKeysResponse {
1554 : disposable_count,
1555 : not_disposable_count,
1556 : },
1557 : )
1558 : }
1559 :
1560 0 : async fn timeline_download_heatmap_layers_handler(
1561 0 : request: Request<Body>,
1562 0 : _cancel: CancellationToken,
1563 0 : ) -> Result<Response<Body>, ApiError> {
1564 : // Only used in the case where remote storage is not configured.
1565 : const DEFAULT_MAX_CONCURRENCY: usize = 100;
1566 : // A conservative default.
1567 : const DEFAULT_CONCURRENCY: usize = 16;
1568 :
1569 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1570 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1571 :
1572 0 : let desired_concurrency =
1573 0 : parse_query_param(&request, "concurrency")?.unwrap_or(DEFAULT_CONCURRENCY);
1574 0 : let recurse = parse_query_param(&request, "recurse")?.unwrap_or(false);
1575 :
1576 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1577 :
1578 0 : let state = get_state(&request);
1579 0 : let timeline =
1580 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1581 0 : .await?;
1582 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1583 0 : .with_scope_timeline(&timeline);
1584 :
1585 0 : let max_concurrency = get_config(&request)
1586 0 : .remote_storage_config
1587 0 : .as_ref()
1588 0 : .map(|c| c.concurrency_limit())
1589 0 : .unwrap_or(DEFAULT_MAX_CONCURRENCY);
1590 0 : let concurrency = std::cmp::min(max_concurrency, desired_concurrency);
1591 :
1592 0 : timeline.start_heatmap_layers_download(concurrency, recurse, &ctx)?;
1593 :
1594 0 : json_response(StatusCode::ACCEPTED, ())
1595 0 : }
1596 :
1597 0 : async fn timeline_shutdown_download_heatmap_layers_handler(
1598 0 : request: Request<Body>,
1599 0 : _cancel: CancellationToken,
1600 0 : ) -> Result<Response<Body>, ApiError> {
1601 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1602 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1603 :
1604 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1605 :
1606 0 : let state = get_state(&request);
1607 0 : let timeline =
1608 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1609 0 : .await?;
1610 :
1611 0 : timeline.stop_and_drain_heatmap_layers_download().await;
1612 :
1613 0 : json_response(StatusCode::OK, ())
1614 0 : }
1615 :
1616 0 : async fn layer_download_handler(
1617 0 : request: Request<Body>,
1618 0 : _cancel: CancellationToken,
1619 0 : ) -> Result<Response<Body>, ApiError> {
1620 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1621 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1622 0 : let layer_file_name = get_request_param(&request, "layer_file_name")?;
1623 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1624 0 : let layer_name = LayerName::from_str(layer_file_name)
1625 0 : .map_err(|s| ApiError::BadRequest(anyhow::anyhow!(s)))?;
1626 0 : let state = get_state(&request);
1627 :
1628 0 : let timeline =
1629 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1630 0 : .await?;
1631 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1632 0 : .with_scope_timeline(&timeline);
1633 0 : let downloaded = timeline
1634 0 : .download_layer(&layer_name, &ctx)
1635 0 : .await
1636 0 : .map_err(|e| match e {
1637 : tenant::storage_layer::layer::DownloadError::TimelineShutdown
1638 : | tenant::storage_layer::layer::DownloadError::DownloadCancelled => {
1639 0 : ApiError::ShuttingDown
1640 : }
1641 0 : other => ApiError::InternalServerError(other.into()),
1642 0 : })?;
1643 :
1644 0 : match downloaded {
1645 0 : Some(true) => json_response(StatusCode::OK, ()),
1646 0 : Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
1647 0 : None => json_response(
1648 : StatusCode::BAD_REQUEST,
1649 0 : format!("Layer {tenant_shard_id}/{timeline_id}/{layer_file_name} not found"),
1650 : ),
1651 : }
1652 0 : }
1653 :
1654 0 : async fn evict_timeline_layer_handler(
1655 0 : request: Request<Body>,
1656 0 : _cancel: CancellationToken,
1657 0 : ) -> Result<Response<Body>, ApiError> {
1658 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1659 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1660 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1661 0 : let layer_file_name = get_request_param(&request, "layer_file_name")?;
1662 0 : let state = get_state(&request);
1663 :
1664 0 : let layer_name = LayerName::from_str(layer_file_name)
1665 0 : .map_err(|s| ApiError::BadRequest(anyhow::anyhow!(s)))?;
1666 :
1667 0 : let timeline =
1668 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1669 0 : .await?;
1670 0 : let evicted = timeline
1671 0 : .evict_layer(&layer_name)
1672 0 : .await
1673 0 : .map_err(ApiError::InternalServerError)?;
1674 :
1675 0 : match evicted {
1676 0 : Some(true) => json_response(StatusCode::OK, ()),
1677 0 : Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
1678 0 : None => json_response(
1679 : StatusCode::BAD_REQUEST,
1680 0 : format!("Layer {tenant_shard_id}/{timeline_id}/{layer_file_name} not found"),
1681 : ),
1682 : }
1683 0 : }
1684 :
1685 0 : async fn timeline_gc_blocking_handler(
1686 0 : request: Request<Body>,
1687 0 : _cancel: CancellationToken,
1688 0 : ) -> Result<Response<Body>, ApiError> {
1689 0 : block_or_unblock_gc(request, true).await
1690 0 : }
1691 :
1692 0 : async fn timeline_gc_unblocking_handler(
1693 0 : request: Request<Body>,
1694 0 : _cancel: CancellationToken,
1695 0 : ) -> Result<Response<Body>, ApiError> {
1696 0 : block_or_unblock_gc(request, false).await
1697 0 : }
1698 :
1699 : /// Traces GetPage@LSN requests for a timeline, and emits metadata in an efficient binary encoding.
1700 : /// Use the `pagectl page-trace` command to decode and analyze the output.
1701 0 : async fn timeline_page_trace_handler(
1702 0 : request: Request<Body>,
1703 0 : cancel: CancellationToken,
1704 0 : ) -> Result<Response<Body>, ApiError> {
1705 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1706 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1707 0 : let state = get_state(&request);
1708 0 : check_permission(&request, None)?;
1709 :
1710 0 : let size_limit: usize = parse_query_param(&request, "size_limit_bytes")?.unwrap_or(1024 * 1024);
1711 0 : let time_limit_secs: u64 = parse_query_param(&request, "time_limit_secs")?.unwrap_or(5);
1712 :
1713 : // Convert size limit to event limit based on the serialized size of an event. The event size is
1714 : // fixed, as the default bincode serializer uses fixed-width integer encoding.
1715 0 : let event_size = bincode::serialize(&PageTraceEvent::default())
1716 0 : .map_err(|err| ApiError::InternalServerError(err.into()))?
1717 0 : .len();
1718 0 : let event_limit = size_limit / event_size;
1719 :
1720 0 : let timeline =
1721 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1722 0 : .await?;
1723 :
1724 : // Install a page trace, unless one is already in progress. We just use a buffered channel,
1725 : // which may 2x the memory usage in the worst case, but it's still bounded.
1726 0 : let (trace_tx, mut trace_rx) = tokio::sync::mpsc::channel(event_limit);
1727 0 : let cur = timeline.page_trace.load();
1728 0 : let installed = cur.is_none()
1729 0 : && timeline
1730 0 : .page_trace
1731 0 : .compare_and_swap(cur, Some(Arc::new(trace_tx)))
1732 0 : .is_none();
1733 0 : if !installed {
1734 0 : return Err(ApiError::Conflict("page trace already active".to_string()));
1735 0 : }
1736 0 : defer!(timeline.page_trace.store(None)); // uninstall on return
1737 :
1738 : // Collect the trace and return it to the client. We could stream the response, but this is
1739 : // simple and fine.
1740 0 : let mut body = Vec::with_capacity(size_limit);
1741 0 : let deadline = Instant::now() + Duration::from_secs(time_limit_secs);
1742 :
1743 0 : while body.len() < size_limit {
1744 0 : tokio::select! {
1745 0 : event = trace_rx.recv() => {
1746 0 : let Some(event) = event else {
1747 0 : break; // shouldn't happen (sender doesn't close, unless timeline dropped)
1748 : };
1749 0 : bincode::serialize_into(&mut body, &event)
1750 0 : .map_err(|err| ApiError::InternalServerError(err.into()))?;
1751 : }
1752 0 : _ = tokio::time::sleep_until(deadline) => break, // time limit reached
1753 0 : _ = cancel.cancelled() => return Err(ApiError::Cancelled),
1754 : }
1755 : }
1756 :
1757 0 : Ok(Response::builder()
1758 0 : .status(StatusCode::OK)
1759 0 : .header(header::CONTENT_TYPE, "application/octet-stream")
1760 0 : .body(hyper::Body::from(body))
1761 0 : .unwrap())
1762 0 : }
1763 :
1764 : /// Adding a block is `POST ../block_gc`, removing a block is `POST ../unblock_gc`.
1765 : ///
1766 : /// Both are technically unsafe because they might fire off index uploads, thus they are POST.
1767 0 : async fn block_or_unblock_gc(
1768 0 : request: Request<Body>,
1769 0 : block: bool,
1770 0 : ) -> Result<Response<Body>, ApiError> {
1771 : use crate::tenant::remote_timeline_client::WaitCompletionError;
1772 : use crate::tenant::upload_queue::NotInitialized;
1773 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1774 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1775 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1776 0 : let state = get_state(&request);
1777 :
1778 0 : let tenant = state
1779 0 : .tenant_manager
1780 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1781 :
1782 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1783 :
1784 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
1785 :
1786 0 : let fut = async {
1787 0 : if block {
1788 0 : timeline.block_gc(&tenant).await.map(|_| ())
1789 : } else {
1790 0 : timeline.unblock_gc(&tenant).await
1791 : }
1792 0 : };
1793 :
1794 0 : let span = tracing::info_span!(
1795 : "block_or_unblock_gc",
1796 : tenant_id = %tenant_shard_id.tenant_id,
1797 0 : shard_id = %tenant_shard_id.shard_slug(),
1798 : timeline_id = %timeline_id,
1799 : block = block,
1800 : );
1801 :
1802 0 : let res = fut.instrument(span).await;
1803 :
1804 0 : res.map_err(|e| {
1805 0 : if e.is::<NotInitialized>() || e.is::<WaitCompletionError>() {
1806 0 : ApiError::ShuttingDown
1807 : } else {
1808 0 : ApiError::InternalServerError(e)
1809 : }
1810 0 : })?;
1811 :
1812 0 : json_response(StatusCode::OK, ())
1813 0 : }
1814 :
1815 : /// Get tenant_size SVG graph along with the JSON data.
1816 0 : fn synthetic_size_html_response(
1817 0 : inputs: ModelInputs,
1818 0 : storage_model: StorageModel,
1819 0 : sizes: SizeResult,
1820 0 : ) -> Result<Response<Body>, ApiError> {
1821 0 : let mut timeline_ids: Vec<String> = Vec::new();
1822 0 : let mut timeline_map: HashMap<TimelineId, usize> = HashMap::new();
1823 0 : for (index, ti) in inputs.timeline_inputs.iter().enumerate() {
1824 0 : timeline_map.insert(ti.timeline_id, index);
1825 0 : timeline_ids.push(ti.timeline_id.to_string());
1826 0 : }
1827 0 : let seg_to_branch: Vec<(usize, SvgBranchKind)> = inputs
1828 0 : .segments
1829 0 : .iter()
1830 0 : .map(|seg| {
1831 0 : (
1832 0 : *timeline_map.get(&seg.timeline_id).unwrap(),
1833 0 : seg.kind.into(),
1834 0 : )
1835 0 : })
1836 0 : .collect();
1837 :
1838 0 : let svg =
1839 0 : tenant_size_model::svg::draw_svg(&storage_model, &timeline_ids, &seg_to_branch, &sizes)
1840 0 : .map_err(ApiError::InternalServerError)?;
1841 :
1842 0 : let mut response = String::new();
1843 :
1844 : use std::fmt::Write;
1845 0 : write!(response, "<html>\n<body>\n").unwrap();
1846 0 : write!(response, "<div>\n{svg}\n</div>").unwrap();
1847 0 : writeln!(response, "Project size: {}", sizes.total_size).unwrap();
1848 0 : writeln!(response, "<pre>").unwrap();
1849 0 : writeln!(
1850 0 : response,
1851 0 : "{}",
1852 0 : serde_json::to_string_pretty(&inputs).unwrap()
1853 : )
1854 0 : .unwrap();
1855 0 : writeln!(
1856 0 : response,
1857 0 : "{}",
1858 0 : serde_json::to_string_pretty(&sizes.segments).unwrap()
1859 : )
1860 0 : .unwrap();
1861 0 : writeln!(response, "</pre>").unwrap();
1862 0 : write!(response, "</body>\n</html>\n").unwrap();
1863 :
1864 0 : html_response(StatusCode::OK, response)
1865 0 : }
1866 :
1867 0 : pub fn html_response(status: StatusCode, data: String) -> Result<Response<Body>, ApiError> {
1868 0 : let response = Response::builder()
1869 0 : .status(status)
1870 0 : .header(header::CONTENT_TYPE, "text/html")
1871 0 : .body(Body::from(data.as_bytes().to_vec()))
1872 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
1873 0 : Ok(response)
1874 0 : }
1875 :
1876 0 : async fn get_tenant_config_handler(
1877 0 : request: Request<Body>,
1878 0 : _cancel: CancellationToken,
1879 0 : ) -> Result<Response<Body>, ApiError> {
1880 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1881 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1882 0 : let state = get_state(&request);
1883 :
1884 0 : let tenant = state
1885 0 : .tenant_manager
1886 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1887 :
1888 0 : let response = HashMap::from([
1889 : (
1890 : "tenant_specific_overrides",
1891 0 : serde_json::to_value(tenant.tenant_specific_overrides())
1892 0 : .context("serializing tenant specific overrides")
1893 0 : .map_err(ApiError::InternalServerError)?,
1894 : ),
1895 : (
1896 0 : "effective_config",
1897 0 : serde_json::to_value(tenant.effective_config())
1898 0 : .context("serializing effective config")
1899 0 : .map_err(ApiError::InternalServerError)?,
1900 : ),
1901 : ]);
1902 :
1903 0 : json_response(StatusCode::OK, response)
1904 0 : }
1905 :
1906 0 : async fn update_tenant_config_handler(
1907 0 : mut request: Request<Body>,
1908 0 : _cancel: CancellationToken,
1909 0 : ) -> Result<Response<Body>, ApiError> {
1910 0 : let request_data: TenantConfigRequest = json_request(&mut request).await?;
1911 0 : let tenant_id = request_data.tenant_id;
1912 0 : check_permission(&request, Some(tenant_id))?;
1913 :
1914 0 : let new_tenant_conf = request_data.config;
1915 :
1916 0 : let state = get_state(&request);
1917 :
1918 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
1919 :
1920 0 : let tenant = state
1921 0 : .tenant_manager
1922 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1923 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1924 :
1925 : // This is a legacy API that only operates on attached tenants: the preferred
1926 : // API to use is the location_config/ endpoint, which lets the caller provide
1927 : // the full LocationConf.
1928 0 : let location_conf = LocationConf::attached_single(
1929 0 : new_tenant_conf.clone(),
1930 0 : tenant.get_generation(),
1931 0 : ShardParameters::from(tenant.get_shard_identity()),
1932 : );
1933 :
1934 0 : tenant
1935 0 : .get_shard_identity()
1936 0 : .assert_equal(location_conf.shard); // not strictly necessary since we construct it above
1937 :
1938 0 : crate::tenant::TenantShard::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
1939 0 : .await
1940 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
1941 :
1942 0 : let _ = tenant
1943 0 : .update_tenant_config(|_crnt| Ok(new_tenant_conf.clone()))
1944 0 : .expect("Closure returns Ok()");
1945 :
1946 0 : json_response(StatusCode::OK, ())
1947 0 : }
1948 :
1949 0 : async fn patch_tenant_config_handler(
1950 0 : mut request: Request<Body>,
1951 0 : _cancel: CancellationToken,
1952 0 : ) -> Result<Response<Body>, ApiError> {
1953 0 : let request_data: TenantConfigPatchRequest = json_request(&mut request).await?;
1954 0 : let tenant_id = request_data.tenant_id;
1955 0 : check_permission(&request, Some(tenant_id))?;
1956 :
1957 0 : let state = get_state(&request);
1958 :
1959 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
1960 :
1961 0 : let tenant = state
1962 0 : .tenant_manager
1963 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1964 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1965 :
1966 0 : let updated = tenant
1967 0 : .update_tenant_config(|crnt| {
1968 0 : crnt.apply_patch(request_data.config.clone())
1969 0 : .map_err(anyhow::Error::new)
1970 0 : })
1971 0 : .map_err(ApiError::BadRequest)?;
1972 :
1973 : // This is a legacy API that only operates on attached tenants: the preferred
1974 : // API to use is the location_config/ endpoint, which lets the caller provide
1975 : // the full LocationConf.
1976 0 : let location_conf = LocationConf::attached_single(
1977 0 : updated,
1978 0 : tenant.get_generation(),
1979 0 : ShardParameters::from(tenant.get_shard_identity()),
1980 : );
1981 :
1982 0 : tenant
1983 0 : .get_shard_identity()
1984 0 : .assert_equal(location_conf.shard); // not strictly necessary since we construct it above
1985 :
1986 0 : crate::tenant::TenantShard::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
1987 0 : .await
1988 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
1989 :
1990 0 : json_response(StatusCode::OK, ())
1991 0 : }
1992 :
1993 0 : async fn put_tenant_location_config_handler(
1994 0 : mut request: Request<Body>,
1995 0 : _cancel: CancellationToken,
1996 0 : ) -> Result<Response<Body>, ApiError> {
1997 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1998 :
1999 0 : let request_data: TenantLocationConfigRequest = json_request(&mut request).await?;
2000 0 : let flush = parse_query_param(&request, "flush_ms")?.map(Duration::from_millis);
2001 0 : let lazy = parse_query_param(&request, "lazy")?.unwrap_or(false);
2002 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2003 :
2004 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
2005 0 : let state = get_state(&request);
2006 0 : let conf = state.conf;
2007 :
2008 : // The `Detached` state is special, it doesn't upsert a tenant, it removes
2009 : // its local disk content and drops it from memory.
2010 0 : if let LocationConfigMode::Detached = request_data.config.mode {
2011 0 : if let Err(e) = state
2012 0 : .tenant_manager
2013 0 : .detach_tenant(conf, tenant_shard_id, &state.deletion_queue_client)
2014 0 : .instrument(info_span!("tenant_detach",
2015 : tenant_id = %tenant_shard_id.tenant_id,
2016 0 : shard_id = %tenant_shard_id.shard_slug()
2017 : ))
2018 0 : .await
2019 : {
2020 0 : match e {
2021 0 : TenantStateError::SlotError(TenantSlotError::NotFound(_)) => {
2022 0 : // This API is idempotent: a NotFound on a detach is fine.
2023 0 : }
2024 0 : _ => return Err(e.into()),
2025 : }
2026 0 : }
2027 0 : return json_response(StatusCode::OK, ());
2028 0 : }
2029 :
2030 0 : let location_conf =
2031 0 : LocationConf::try_from(&request_data.config).map_err(ApiError::BadRequest)?;
2032 :
2033 : // lazy==true queues up for activation or jumps the queue like normal when a compute connects,
2034 : // similar to at startup ordering.
2035 0 : let spawn_mode = if lazy {
2036 0 : tenant::SpawnMode::Lazy
2037 : } else {
2038 0 : tenant::SpawnMode::Eager
2039 : };
2040 :
2041 0 : let tenant = state
2042 0 : .tenant_manager
2043 0 : .upsert_location(tenant_shard_id, location_conf, flush, spawn_mode, &ctx)
2044 0 : .await?;
2045 0 : let stripe_size = tenant.as_ref().map(|t| t.get_shard_stripe_size());
2046 0 : let attached = tenant.is_some();
2047 :
2048 0 : if let Some(_flush_ms) = flush {
2049 0 : match state
2050 0 : .secondary_controller
2051 0 : .upload_tenant(tenant_shard_id)
2052 0 : .await
2053 : {
2054 : Ok(()) => {
2055 0 : tracing::info!("Uploaded heatmap during flush");
2056 : }
2057 0 : Err(e) => {
2058 0 : tracing::warn!("Failed to flush heatmap: {e}");
2059 : }
2060 : }
2061 : } else {
2062 0 : tracing::info!("No flush requested when configuring");
2063 : }
2064 :
2065 : // This API returns a vector of pageservers where the tenant is attached: this is
2066 : // primarily for use in the sharding service. For compatibilty, we also return this
2067 : // when called directly on a pageserver, but the payload is always zero or one shards.
2068 0 : let mut response = TenantLocationConfigResponse {
2069 0 : shards: Vec::new(),
2070 0 : stripe_size: None,
2071 0 : };
2072 0 : if attached {
2073 0 : response.shards.push(TenantShardLocation {
2074 0 : shard_id: tenant_shard_id,
2075 0 : node_id: state.conf.id,
2076 0 : });
2077 0 : if tenant_shard_id.shard_count.count() > 1 {
2078 : // Stripe size should be set if we are attached
2079 0 : debug_assert!(stripe_size.is_some());
2080 0 : response.stripe_size = stripe_size;
2081 0 : }
2082 0 : }
2083 :
2084 0 : json_response(StatusCode::OK, response)
2085 0 : }
2086 :
2087 0 : async fn list_location_config_handler(
2088 0 : request: Request<Body>,
2089 0 : _cancel: CancellationToken,
2090 0 : ) -> Result<Response<Body>, ApiError> {
2091 0 : let state = get_state(&request);
2092 0 : let slots = state.tenant_manager.list();
2093 0 : let result = LocationConfigListResponse {
2094 0 : tenant_shards: slots
2095 0 : .into_iter()
2096 0 : .map(|(tenant_shard_id, slot)| {
2097 0 : let v = match slot {
2098 0 : TenantSlot::Attached(t) => Some(t.get_location_conf()),
2099 0 : TenantSlot::Secondary(s) => Some(s.get_location_conf()),
2100 0 : TenantSlot::InProgress(_) => None,
2101 : };
2102 0 : (tenant_shard_id, v)
2103 0 : })
2104 0 : .collect(),
2105 : };
2106 0 : json_response(StatusCode::OK, result)
2107 0 : }
2108 :
2109 0 : async fn get_location_config_handler(
2110 0 : request: Request<Body>,
2111 0 : _cancel: CancellationToken,
2112 0 : ) -> Result<Response<Body>, ApiError> {
2113 0 : let state = get_state(&request);
2114 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2115 0 : let slot = state.tenant_manager.get(tenant_shard_id);
2116 :
2117 0 : let Some(slot) = slot else {
2118 0 : return Err(ApiError::NotFound(
2119 0 : anyhow::anyhow!("Tenant shard not found").into(),
2120 0 : ));
2121 : };
2122 :
2123 0 : let result: Option<LocationConfig> = match slot {
2124 0 : TenantSlot::Attached(t) => Some(t.get_location_conf()),
2125 0 : TenantSlot::Secondary(s) => Some(s.get_location_conf()),
2126 0 : TenantSlot::InProgress(_) => None,
2127 : };
2128 :
2129 0 : json_response(StatusCode::OK, result)
2130 0 : }
2131 :
2132 : // Do a time travel recovery on the given tenant/tenant shard. Tenant needs to be detached
2133 : // (from all pageservers) as it invalidates consistency assumptions.
2134 0 : async fn tenant_time_travel_remote_storage_handler(
2135 0 : request: Request<Body>,
2136 0 : cancel: CancellationToken,
2137 0 : ) -> Result<Response<Body>, ApiError> {
2138 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2139 :
2140 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2141 :
2142 0 : let timestamp_raw = must_get_query_param(&request, "travel_to")?;
2143 0 : let timestamp = humantime::parse_rfc3339(×tamp_raw)
2144 0 : .with_context(|| format!("Invalid time for travel_to: {timestamp_raw:?}"))
2145 0 : .map_err(ApiError::BadRequest)?;
2146 :
2147 0 : let done_if_after_raw = must_get_query_param(&request, "done_if_after")?;
2148 0 : let done_if_after = humantime::parse_rfc3339(&done_if_after_raw)
2149 0 : .with_context(|| format!("Invalid time for done_if_after: {done_if_after_raw:?}"))
2150 0 : .map_err(ApiError::BadRequest)?;
2151 :
2152 : // This is just a sanity check to fend off naive wrong usages of the API:
2153 : // the tenant needs to be detached *everywhere*
2154 0 : let state = get_state(&request);
2155 0 : let we_manage_tenant = state.tenant_manager.manages_tenant_shard(tenant_shard_id);
2156 0 : if we_manage_tenant {
2157 0 : return Err(ApiError::BadRequest(anyhow!(
2158 0 : "Tenant {tenant_shard_id} is already attached at this pageserver"
2159 0 : )));
2160 0 : }
2161 :
2162 0 : if timestamp > done_if_after {
2163 0 : return Err(ApiError::BadRequest(anyhow!(
2164 0 : "The done_if_after timestamp comes before the timestamp to recover to"
2165 0 : )));
2166 0 : }
2167 :
2168 0 : tracing::info!(
2169 0 : "Issuing time travel request internally. timestamp={timestamp_raw}, done_if_after={done_if_after_raw}"
2170 : );
2171 :
2172 0 : remote_timeline_client::upload::time_travel_recover_tenant(
2173 0 : &state.remote_storage,
2174 0 : &tenant_shard_id,
2175 0 : timestamp,
2176 0 : done_if_after,
2177 0 : &cancel,
2178 0 : )
2179 0 : .await
2180 0 : .map_err(|e| match e {
2181 0 : TimeTravelError::BadInput(e) => {
2182 0 : warn!("bad input error: {e}");
2183 0 : ApiError::BadRequest(anyhow!("bad input error"))
2184 : }
2185 : TimeTravelError::Unimplemented => {
2186 0 : ApiError::BadRequest(anyhow!("unimplemented for the configured remote storage"))
2187 : }
2188 0 : TimeTravelError::Cancelled => ApiError::InternalServerError(anyhow!("cancelled")),
2189 : TimeTravelError::TooManyVersions => {
2190 0 : ApiError::InternalServerError(anyhow!("too many versions in remote storage"))
2191 : }
2192 0 : TimeTravelError::Other(e) => {
2193 0 : warn!("internal error: {e}");
2194 0 : ApiError::InternalServerError(anyhow!("internal error"))
2195 : }
2196 0 : })?;
2197 :
2198 0 : json_response(StatusCode::OK, ())
2199 0 : }
2200 :
2201 : /// Testing helper to transition a tenant to [`crate::tenant::TenantState::Broken`].
2202 0 : async fn handle_tenant_break(
2203 0 : r: Request<Body>,
2204 0 : _cancel: CancellationToken,
2205 0 : ) -> Result<Response<Body>, ApiError> {
2206 0 : let tenant_shard_id: TenantShardId = parse_request_param(&r, "tenant_shard_id")?;
2207 :
2208 0 : let state = get_state(&r);
2209 0 : state
2210 0 : .tenant_manager
2211 0 : .get_attached_tenant_shard(tenant_shard_id)?
2212 0 : .set_broken("broken from test".to_owned())
2213 0 : .await;
2214 :
2215 0 : json_response(StatusCode::OK, ())
2216 0 : }
2217 :
2218 : // Obtains an lsn lease on the given timeline.
2219 0 : async fn lsn_lease_handler(
2220 0 : mut request: Request<Body>,
2221 0 : _cancel: CancellationToken,
2222 0 : ) -> Result<Response<Body>, ApiError> {
2223 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2224 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2225 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2226 0 : let lsn = json_request::<LsnLeaseRequest>(&mut request).await?.lsn;
2227 :
2228 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
2229 :
2230 0 : let state = get_state(&request);
2231 :
2232 0 : let timeline =
2233 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2234 0 : .await?;
2235 :
2236 0 : let result = async {
2237 0 : timeline
2238 0 : .init_lsn_lease(lsn, timeline.get_lsn_lease_length(), &ctx)
2239 0 : .map_err(|e| {
2240 0 : ApiError::InternalServerError(
2241 0 : e.context(format!("invalid lsn lease request at {lsn}")),
2242 0 : )
2243 0 : })
2244 0 : }
2245 0 : .instrument(info_span!("init_lsn_lease", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2246 0 : .await?;
2247 :
2248 0 : json_response(StatusCode::OK, result)
2249 0 : }
2250 :
2251 : // Run GC immediately on given timeline.
2252 0 : async fn timeline_gc_handler(
2253 0 : mut request: Request<Body>,
2254 0 : cancel: CancellationToken,
2255 0 : ) -> Result<Response<Body>, ApiError> {
2256 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2257 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2258 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2259 :
2260 0 : let gc_req: TimelineGcRequest = json_request(&mut request).await?;
2261 :
2262 0 : let state = get_state(&request);
2263 :
2264 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
2265 0 : let gc_result = state
2266 0 : .tenant_manager
2267 0 : .immediate_gc(tenant_shard_id, timeline_id, gc_req, cancel, &ctx)
2268 0 : .await?;
2269 :
2270 0 : json_response(StatusCode::OK, gc_result)
2271 0 : }
2272 :
2273 : // Cancel scheduled compaction tasks
2274 0 : async fn timeline_cancel_compact_handler(
2275 0 : request: Request<Body>,
2276 0 : _cancel: CancellationToken,
2277 0 : ) -> Result<Response<Body>, ApiError> {
2278 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2279 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2280 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2281 0 : let state = get_state(&request);
2282 0 : async {
2283 0 : let tenant = state
2284 0 : .tenant_manager
2285 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2286 0 : tenant.cancel_scheduled_compaction(timeline_id);
2287 0 : json_response(StatusCode::OK, ())
2288 0 : }
2289 0 : .instrument(info_span!("timeline_cancel_compact", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2290 0 : .await
2291 0 : }
2292 :
2293 : // Get compact info of a timeline
2294 0 : async fn timeline_compact_info_handler(
2295 0 : request: Request<Body>,
2296 0 : _cancel: CancellationToken,
2297 0 : ) -> Result<Response<Body>, ApiError> {
2298 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2299 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2300 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2301 0 : let state = get_state(&request);
2302 0 : async {
2303 0 : let tenant = state
2304 0 : .tenant_manager
2305 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2306 0 : let resp = tenant.get_scheduled_compaction_tasks(timeline_id);
2307 0 : json_response(StatusCode::OK, resp)
2308 0 : }
2309 0 : .instrument(info_span!("timeline_compact_info", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2310 0 : .await
2311 0 : }
2312 :
2313 : // Run compaction immediately on given timeline.
2314 0 : async fn timeline_compact_handler(
2315 0 : mut request: Request<Body>,
2316 0 : cancel: CancellationToken,
2317 0 : ) -> Result<Response<Body>, ApiError> {
2318 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2319 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2320 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2321 :
2322 0 : let compact_request = json_request_maybe::<Option<CompactRequest>>(&mut request).await?;
2323 :
2324 0 : let state = get_state(&request);
2325 :
2326 0 : let mut flags = EnumSet::empty();
2327 :
2328 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_l0_compaction")? {
2329 0 : flags |= CompactFlags::ForceL0Compaction;
2330 0 : }
2331 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_repartition")? {
2332 0 : flags |= CompactFlags::ForceRepartition;
2333 0 : }
2334 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? {
2335 0 : flags |= CompactFlags::ForceImageLayerCreation;
2336 0 : }
2337 0 : if Some(true) == parse_query_param::<_, bool>(&request, "enhanced_gc_bottom_most_compaction")? {
2338 0 : flags |= CompactFlags::EnhancedGcBottomMostCompaction;
2339 0 : }
2340 0 : if Some(true) == parse_query_param::<_, bool>(&request, "dry_run")? {
2341 0 : flags |= CompactFlags::DryRun;
2342 0 : }
2343 : // Manual compaction does not yield for L0.
2344 :
2345 0 : let wait_until_uploaded =
2346 0 : parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false);
2347 :
2348 0 : let wait_until_scheduled_compaction_done =
2349 0 : parse_query_param::<_, bool>(&request, "wait_until_scheduled_compaction_done")?
2350 0 : .unwrap_or(false);
2351 :
2352 0 : let sub_compaction = compact_request
2353 0 : .as_ref()
2354 0 : .map(|r| r.sub_compaction)
2355 0 : .unwrap_or(false);
2356 0 : let sub_compaction_max_job_size_mb = compact_request
2357 0 : .as_ref()
2358 0 : .and_then(|r| r.sub_compaction_max_job_size_mb);
2359 :
2360 0 : let options = CompactOptions {
2361 0 : compact_key_range: compact_request
2362 0 : .as_ref()
2363 0 : .and_then(|r| r.compact_key_range.clone()),
2364 0 : compact_lsn_range: compact_request
2365 0 : .as_ref()
2366 0 : .and_then(|r| r.compact_lsn_range.clone()),
2367 0 : flags,
2368 0 : sub_compaction,
2369 0 : sub_compaction_max_job_size_mb,
2370 : gc_compaction_do_metadata_compaction: false,
2371 : };
2372 :
2373 0 : let scheduled = compact_request
2374 0 : .as_ref()
2375 0 : .map(|r| r.scheduled)
2376 0 : .unwrap_or(false);
2377 :
2378 0 : async {
2379 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2380 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
2381 0 : if scheduled {
2382 0 : let tenant = state
2383 0 : .tenant_manager
2384 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2385 0 : let rx = tenant.schedule_compaction(timeline_id, options).await.map_err(ApiError::InternalServerError)?;
2386 0 : if wait_until_scheduled_compaction_done {
2387 : // It is possible that this will take a long time, dropping the HTTP request will not cancel the compaction.
2388 0 : rx.await.ok();
2389 0 : }
2390 : } else {
2391 0 : timeline
2392 0 : .compact_with_options(&cancel, options, &ctx)
2393 0 : .await
2394 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
2395 0 : if wait_until_uploaded {
2396 0 : timeline.remote_client.wait_completion().await
2397 : // XXX map to correct ApiError for the cases where it's due to shutdown
2398 0 : .context("wait completion").map_err(ApiError::InternalServerError)?;
2399 0 : }
2400 : }
2401 0 : json_response(StatusCode::OK, ())
2402 0 : }
2403 0 : .instrument(info_span!("manual_compaction", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2404 0 : .await
2405 0 : }
2406 :
2407 0 : async fn timeline_mark_invisible_handler(
2408 0 : mut request: Request<Body>,
2409 0 : _cancel: CancellationToken,
2410 0 : ) -> Result<Response<Body>, ApiError> {
2411 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2412 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2413 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2414 :
2415 0 : let compact_request = json_request_maybe::<Option<MarkInvisibleRequest>>(&mut request).await?;
2416 :
2417 0 : let state = get_state(&request);
2418 :
2419 0 : let visibility = match compact_request {
2420 0 : Some(req) => match req.is_visible {
2421 0 : Some(true) => TimelineVisibilityState::Visible,
2422 0 : Some(false) | None => TimelineVisibilityState::Invisible,
2423 : },
2424 0 : None => TimelineVisibilityState::Invisible,
2425 : };
2426 :
2427 0 : async {
2428 0 : let tenant = state
2429 0 : .tenant_manager
2430 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2431 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
2432 0 : timeline.remote_client.schedule_index_upload_for_timeline_invisible_state(visibility).map_err(ApiError::InternalServerError)?;
2433 0 : json_response(StatusCode::OK, ())
2434 0 : }
2435 0 : .instrument(info_span!("manual_timeline_mark_invisible", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2436 0 : .await
2437 0 : }
2438 :
2439 : // Run offload immediately on given timeline.
2440 0 : async fn timeline_offload_handler(
2441 0 : request: Request<Body>,
2442 0 : _cancel: CancellationToken,
2443 0 : ) -> Result<Response<Body>, ApiError> {
2444 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2445 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2446 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2447 :
2448 0 : let state = get_state(&request);
2449 :
2450 0 : async {
2451 0 : let tenant = state
2452 0 : .tenant_manager
2453 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2454 :
2455 0 : if tenant.get_offloaded_timeline(timeline_id).is_ok() {
2456 0 : return json_response(StatusCode::OK, ());
2457 0 : }
2458 0 : let timeline =
2459 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2460 0 : .await?;
2461 :
2462 0 : if !tenant.timeline_has_no_attached_children(timeline_id) {
2463 0 : return Err(ApiError::PreconditionFailed(
2464 0 : "timeline has attached children".into(),
2465 0 : ));
2466 0 : }
2467 0 : if let (false, reason) = timeline.can_offload() {
2468 0 : return Err(ApiError::PreconditionFailed(
2469 0 : format!("Timeline::can_offload() check failed: {reason}") .into(),
2470 0 : ));
2471 0 : }
2472 0 : offload_timeline(&tenant, &timeline)
2473 0 : .await
2474 0 : .map_err(|e| {
2475 0 : match e {
2476 0 : OffloadError::Cancelled => ApiError::ResourceUnavailable("Timeline shutting down".into()),
2477 0 : OffloadError::AlreadyInProgress => ApiError::Conflict("Timeline already being offloaded or deleted".into()),
2478 0 : _ => ApiError::InternalServerError(anyhow!(e))
2479 : }
2480 0 : })?;
2481 :
2482 0 : json_response(StatusCode::OK, ())
2483 0 : }
2484 0 : .instrument(info_span!("manual_timeline_offload", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2485 0 : .await
2486 0 : }
2487 :
2488 : // Run checkpoint immediately on given timeline.
2489 0 : async fn timeline_checkpoint_handler(
2490 0 : request: Request<Body>,
2491 0 : cancel: CancellationToken,
2492 0 : ) -> Result<Response<Body>, ApiError> {
2493 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2494 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2495 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2496 :
2497 0 : let state = get_state(&request);
2498 :
2499 0 : let mut flags = EnumSet::empty();
2500 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_l0_compaction")? {
2501 0 : flags |= CompactFlags::ForceL0Compaction;
2502 0 : }
2503 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_repartition")? {
2504 0 : flags |= CompactFlags::ForceRepartition;
2505 0 : }
2506 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? {
2507 0 : flags |= CompactFlags::ForceImageLayerCreation;
2508 0 : }
2509 :
2510 : // By default, checkpoints come with a compaction, but this may be optionally disabled by tests that just want to flush + upload.
2511 0 : let compact = parse_query_param::<_, bool>(&request, "compact")?.unwrap_or(true);
2512 :
2513 0 : let wait_until_flushed: bool =
2514 0 : parse_query_param(&request, "wait_until_flushed")?.unwrap_or(true);
2515 :
2516 0 : let wait_until_uploaded =
2517 0 : parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false);
2518 :
2519 0 : async {
2520 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2521 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
2522 0 : if wait_until_flushed {
2523 0 : timeline.freeze_and_flush().await
2524 : } else {
2525 0 : timeline.freeze().await.and(Ok(()))
2526 0 : }.map_err(|e| {
2527 0 : match e {
2528 0 : tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown,
2529 0 : other => ApiError::InternalServerError(other.into()),
2530 :
2531 : }
2532 0 : })?;
2533 0 : if compact {
2534 0 : timeline
2535 0 : .compact(&cancel, flags, &ctx)
2536 0 : .await
2537 0 : .map_err(|e|
2538 0 : if e.is_cancel() {
2539 0 : ApiError::ShuttingDown
2540 : } else {
2541 0 : ApiError::InternalServerError(e.into_anyhow())
2542 0 : }
2543 0 : )?;
2544 0 : }
2545 :
2546 0 : if wait_until_uploaded {
2547 0 : tracing::info!("Waiting for uploads to complete...");
2548 0 : timeline.remote_client.wait_completion().await
2549 : // XXX map to correct ApiError for the cases where it's due to shutdown
2550 0 : .context("wait completion").map_err(ApiError::InternalServerError)?;
2551 0 : tracing::info!("Uploads completed up to {}", timeline.get_remote_consistent_lsn_projected().unwrap_or(Lsn(0)));
2552 0 : }
2553 :
2554 0 : json_response(StatusCode::OK, ())
2555 0 : }
2556 0 : .instrument(info_span!("manual_checkpoint", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2557 0 : .await
2558 0 : }
2559 :
2560 0 : async fn timeline_download_remote_layers_handler_post(
2561 0 : mut request: Request<Body>,
2562 0 : _cancel: CancellationToken,
2563 0 : ) -> Result<Response<Body>, ApiError> {
2564 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2565 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2566 0 : let body: DownloadRemoteLayersTaskSpawnRequest = json_request(&mut request).await?;
2567 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2568 :
2569 0 : let state = get_state(&request);
2570 :
2571 0 : let timeline =
2572 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2573 0 : .await?;
2574 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
2575 0 : .with_scope_timeline(&timeline);
2576 0 : match timeline.spawn_download_all_remote_layers(body, &ctx).await {
2577 0 : Ok(st) => json_response(StatusCode::ACCEPTED, st),
2578 0 : Err(st) => json_response(StatusCode::CONFLICT, st),
2579 : }
2580 0 : }
2581 :
2582 0 : async fn timeline_download_remote_layers_handler_get(
2583 0 : request: Request<Body>,
2584 0 : _cancel: CancellationToken,
2585 0 : ) -> Result<Response<Body>, ApiError> {
2586 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2587 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2588 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2589 0 : let state = get_state(&request);
2590 :
2591 0 : let timeline =
2592 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2593 0 : .await?;
2594 0 : let info = timeline
2595 0 : .get_download_all_remote_layers_task_info()
2596 0 : .context("task never started since last pageserver process start")
2597 0 : .map_err(|e| ApiError::NotFound(e.into()))?;
2598 0 : json_response(StatusCode::OK, info)
2599 0 : }
2600 :
2601 0 : async fn timeline_detach_ancestor_handler(
2602 0 : request: Request<Body>,
2603 0 : _cancel: CancellationToken,
2604 0 : ) -> Result<Response<Body>, ApiError> {
2605 : use pageserver_api::models::detach_ancestor::AncestorDetached;
2606 :
2607 : use crate::tenant::timeline::detach_ancestor;
2608 :
2609 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2610 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2611 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2612 0 : let behavior: Option<DetachBehavior> = parse_query_param(&request, "detach_behavior")?;
2613 :
2614 0 : let behavior = behavior.unwrap_or_default();
2615 :
2616 0 : let span = tracing::info_span!("detach_ancestor", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id);
2617 :
2618 0 : async move {
2619 0 : let mut options = detach_ancestor::Options::default();
2620 :
2621 0 : let rewrite_concurrency =
2622 0 : parse_query_param::<_, std::num::NonZeroUsize>(&request, "rewrite_concurrency")?;
2623 0 : let copy_concurrency =
2624 0 : parse_query_param::<_, std::num::NonZeroUsize>(&request, "copy_concurrency")?;
2625 :
2626 0 : [
2627 0 : (&mut options.rewrite_concurrency, rewrite_concurrency),
2628 0 : (&mut options.copy_concurrency, copy_concurrency),
2629 0 : ]
2630 0 : .into_iter()
2631 0 : .filter_map(|(target, val)| val.map(|val| (target, val)))
2632 0 : .for_each(|(target, val)| *target = val);
2633 :
2634 0 : let state = get_state(&request);
2635 :
2636 0 : let tenant = state
2637 0 : .tenant_manager
2638 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2639 :
2640 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
2641 :
2642 0 : let ctx = RequestContext::new(TaskKind::DetachAncestor, DownloadBehavior::Download);
2643 0 : let ctx = &ctx;
2644 :
2645 : // Flush the upload queues of all timelines before detaching ancestor. We do the same thing again
2646 : // during shutdown. This early upload ensures the pageserver does not need to upload too many
2647 : // things and creates downtime during timeline reloads.
2648 0 : for timeline in tenant.list_timelines() {
2649 0 : timeline
2650 0 : .remote_client
2651 0 : .wait_completion()
2652 0 : .await
2653 0 : .map_err(|e| {
2654 0 : ApiError::PreconditionFailed(format!("cannot drain upload queue: {e}").into())
2655 0 : })?;
2656 : }
2657 :
2658 0 : tracing::info!("all timeline upload queues are drained");
2659 :
2660 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
2661 0 : let ctx = &ctx.with_scope_timeline(&timeline);
2662 :
2663 0 : let progress = timeline
2664 0 : .prepare_to_detach_from_ancestor(&tenant, options, behavior, ctx)
2665 0 : .await?;
2666 :
2667 : // uncomment to allow early as possible Tenant::drop
2668 : // drop(tenant);
2669 :
2670 0 : let resp = match progress {
2671 0 : detach_ancestor::Progress::Prepared(attempt, prepared) => {
2672 : // it would be great to tag the guard on to the tenant activation future
2673 0 : let reparented_timelines = state
2674 0 : .tenant_manager
2675 0 : .complete_detaching_timeline_ancestor(
2676 0 : tenant_shard_id,
2677 0 : timeline_id,
2678 0 : prepared,
2679 0 : behavior,
2680 0 : attempt,
2681 0 : ctx,
2682 0 : )
2683 0 : .await?;
2684 :
2685 0 : AncestorDetached {
2686 0 : reparented_timelines,
2687 0 : }
2688 : }
2689 0 : detach_ancestor::Progress::Done(resp) => resp,
2690 : };
2691 :
2692 0 : json_response(StatusCode::OK, resp)
2693 0 : }
2694 0 : .instrument(span)
2695 0 : .await
2696 0 : }
2697 :
2698 0 : async fn deletion_queue_flush(
2699 0 : r: Request<Body>,
2700 0 : cancel: CancellationToken,
2701 0 : ) -> Result<Response<Body>, ApiError> {
2702 0 : let state = get_state(&r);
2703 :
2704 0 : let execute = parse_query_param(&r, "execute")?.unwrap_or(false);
2705 :
2706 0 : let flush = async {
2707 0 : if execute {
2708 0 : state.deletion_queue_client.flush_execute().await
2709 : } else {
2710 0 : state.deletion_queue_client.flush().await
2711 : }
2712 0 : }
2713 : // DeletionQueueError's only case is shutting down.
2714 0 : .map_err(|_| ApiError::ShuttingDown);
2715 :
2716 0 : tokio::select! {
2717 0 : res = flush => {
2718 0 : res.map(|()| json_response(StatusCode::OK, ()))?
2719 : }
2720 0 : _ = cancel.cancelled() => {
2721 0 : Err(ApiError::ShuttingDown)
2722 : }
2723 : }
2724 0 : }
2725 :
2726 : /// Try if `GetPage@Lsn` is successful, useful for manual debugging.
2727 0 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
2728 : struct GetPageResponse {
2729 : pub page: Bytes,
2730 : pub layers_visited: u32,
2731 : pub delta_layers_visited: u32,
2732 : pub records: Vec<(Lsn, NeonWalRecord)>,
2733 : pub img: Option<(Lsn, Bytes)>,
2734 : }
2735 :
2736 0 : async fn getpage_at_lsn_handler(
2737 0 : request: Request<Body>,
2738 0 : cancel: CancellationToken,
2739 0 : ) -> Result<Response<Body>, ApiError> {
2740 0 : getpage_at_lsn_handler_inner(false, request, cancel).await
2741 0 : }
2742 :
2743 0 : async fn touchpage_at_lsn_handler(
2744 0 : request: Request<Body>,
2745 0 : cancel: CancellationToken,
2746 0 : ) -> Result<Response<Body>, ApiError> {
2747 0 : getpage_at_lsn_handler_inner(true, request, cancel).await
2748 0 : }
2749 :
2750 : /// Try if `GetPage@Lsn` is successful, useful for manual debugging.
2751 0 : async fn getpage_at_lsn_handler_inner(
2752 0 : touch: bool,
2753 0 : request: Request<Body>,
2754 0 : _cancel: CancellationToken,
2755 0 : ) -> Result<Response<Body>, ApiError> {
2756 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2757 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2758 : // Require pageserver admin permission for this API instead of only tenant-level token.
2759 0 : check_permission(&request, None)?;
2760 0 : let state = get_state(&request);
2761 :
2762 : struct Key(pageserver_api::key::Key);
2763 :
2764 : impl std::str::FromStr for Key {
2765 : type Err = anyhow::Error;
2766 :
2767 0 : fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
2768 0 : pageserver_api::key::Key::from_hex(s).map(Key)
2769 0 : }
2770 : }
2771 :
2772 0 : let key: Key = parse_query_param(&request, "key")?
2773 0 : .ok_or_else(|| ApiError::BadRequest(anyhow!("missing 'key' query parameter")))?;
2774 0 : let lsn: Option<Lsn> = parse_query_param(&request, "lsn")?;
2775 :
2776 0 : async {
2777 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2778 0 : let ctx = RequestContextBuilder::new(TaskKind::MgmtRequest)
2779 0 : .download_behavior(DownloadBehavior::Download)
2780 0 : .scope(context::Scope::new_timeline(&timeline))
2781 0 : .read_path_debug(true)
2782 0 : .root();
2783 :
2784 : // Use last_record_lsn if no lsn is provided
2785 0 : let lsn = lsn.unwrap_or_else(|| timeline.get_last_record_lsn());
2786 :
2787 0 : if touch {
2788 0 : json_response(StatusCode::OK, ())
2789 : } else {
2790 0 : let mut reconstruct_state = ValuesReconstructState::new_with_debug(IoConcurrency::sequential());
2791 0 : let page = timeline.debug_get(key.0, lsn, &ctx, &mut reconstruct_state).await?;
2792 0 : let response = GetPageResponse {
2793 0 : page,
2794 0 : layers_visited: reconstruct_state.get_layers_visited(),
2795 0 : delta_layers_visited: reconstruct_state.get_delta_layers_visited(),
2796 0 : records: reconstruct_state.debug_state.records.clone(),
2797 0 : img: reconstruct_state.debug_state.img.clone(),
2798 0 : };
2799 :
2800 0 : json_response(StatusCode::OK, response)
2801 : }
2802 0 : }
2803 0 : .instrument(info_span!("timeline_debug_get", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2804 0 : .await
2805 0 : }
2806 :
2807 0 : async fn timeline_collect_keyspace(
2808 0 : request: Request<Body>,
2809 0 : _cancel: CancellationToken,
2810 0 : ) -> Result<Response<Body>, ApiError> {
2811 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2812 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2813 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2814 0 : let state = get_state(&request);
2815 :
2816 0 : let at_lsn: Option<Lsn> = parse_query_param(&request, "at_lsn")?;
2817 :
2818 0 : async {
2819 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2820 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
2821 0 : let at_lsn = at_lsn.unwrap_or_else(|| timeline.get_last_record_lsn());
2822 0 : let (dense_ks, sparse_ks) = timeline
2823 0 : .collect_keyspace(at_lsn, &ctx)
2824 0 : .await
2825 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
2826 :
2827 : // This API is currently used by pagebench. Pagebench will iterate all keys within the keyspace.
2828 : // Therefore, we split dense/sparse keys in this API.
2829 0 : let res = pageserver_api::models::partitioning::Partitioning { keys: dense_ks, sparse_keys: sparse_ks, at_lsn };
2830 :
2831 0 : json_response(StatusCode::OK, res)
2832 0 : }
2833 0 : .instrument(info_span!("timeline_collect_keyspace", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2834 0 : .await
2835 0 : }
2836 :
2837 0 : async fn active_timeline_of_active_tenant(
2838 0 : tenant_manager: &TenantManager,
2839 0 : tenant_shard_id: TenantShardId,
2840 0 : timeline_id: TimelineId,
2841 0 : ) -> Result<Arc<Timeline>, ApiError> {
2842 0 : let tenant = tenant_manager.get_attached_tenant_shard(tenant_shard_id)?;
2843 :
2844 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
2845 :
2846 0 : Ok(tenant.get_timeline(timeline_id, true)?)
2847 0 : }
2848 :
2849 0 : async fn always_panic_handler(
2850 0 : req: Request<Body>,
2851 0 : _cancel: CancellationToken,
2852 0 : ) -> Result<Response<Body>, ApiError> {
2853 : // Deliberately cause a panic to exercise the panic hook registered via std::panic::set_hook().
2854 : // For pageserver, the relevant panic hook is `tracing_panic_hook` , and the `sentry` crate's wrapper around it.
2855 : // Use catch_unwind to ensure that tokio nor hyper are distracted by our panic.
2856 0 : let query = req.uri().query();
2857 0 : let _ = std::panic::catch_unwind(|| {
2858 0 : panic!("unconditional panic for testing panic hook integration; request query: {query:?}")
2859 : });
2860 0 : json_response(StatusCode::NO_CONTENT, ())
2861 0 : }
2862 :
2863 0 : async fn disk_usage_eviction_run(
2864 0 : mut r: Request<Body>,
2865 0 : cancel: CancellationToken,
2866 0 : ) -> Result<Response<Body>, ApiError> {
2867 0 : check_permission(&r, None)?;
2868 :
2869 0 : #[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)]
2870 : struct Config {
2871 : /// How many bytes to evict before reporting that pressure is relieved.
2872 : evict_bytes: u64,
2873 :
2874 : #[serde(default)]
2875 : eviction_order: pageserver_api::config::EvictionOrder,
2876 : }
2877 :
2878 : #[derive(Debug, Clone, Copy, serde::Serialize)]
2879 : struct Usage {
2880 : // remains unchanged after instantiation of the struct
2881 : evict_bytes: u64,
2882 : // updated by `add_available_bytes`
2883 : freed_bytes: u64,
2884 : }
2885 :
2886 : impl crate::disk_usage_eviction_task::Usage for Usage {
2887 0 : fn has_pressure(&self) -> bool {
2888 0 : self.evict_bytes > self.freed_bytes
2889 0 : }
2890 :
2891 0 : fn add_available_bytes(&mut self, bytes: u64) {
2892 0 : self.freed_bytes += bytes;
2893 0 : }
2894 : }
2895 :
2896 0 : let config = json_request::<Config>(&mut r).await?;
2897 :
2898 0 : let usage = Usage {
2899 0 : evict_bytes: config.evict_bytes,
2900 0 : freed_bytes: 0,
2901 0 : };
2902 :
2903 0 : let state = get_state(&r);
2904 0 : let eviction_state = state.disk_usage_eviction_state.clone();
2905 :
2906 0 : let res = crate::disk_usage_eviction_task::disk_usage_eviction_task_iteration_impl(
2907 0 : &eviction_state,
2908 0 : &state.remote_storage,
2909 0 : usage,
2910 0 : &state.tenant_manager,
2911 0 : config.eviction_order.into(),
2912 0 : &cancel,
2913 0 : )
2914 0 : .await;
2915 :
2916 0 : info!(?res, "disk_usage_eviction_task_iteration_impl finished");
2917 :
2918 0 : let res = res.map_err(ApiError::InternalServerError)?;
2919 :
2920 0 : json_response(StatusCode::OK, res)
2921 0 : }
2922 :
2923 0 : async fn secondary_upload_handler(
2924 0 : request: Request<Body>,
2925 0 : _cancel: CancellationToken,
2926 0 : ) -> Result<Response<Body>, ApiError> {
2927 0 : let state = get_state(&request);
2928 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2929 0 : state
2930 0 : .secondary_controller
2931 0 : .upload_tenant(tenant_shard_id)
2932 0 : .await?;
2933 :
2934 0 : json_response(StatusCode::OK, ())
2935 0 : }
2936 :
2937 0 : async fn tenant_scan_remote_handler(
2938 0 : request: Request<Body>,
2939 0 : cancel: CancellationToken,
2940 0 : ) -> Result<Response<Body>, ApiError> {
2941 0 : let state = get_state(&request);
2942 0 : let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
2943 :
2944 0 : let mut response = TenantScanRemoteStorageResponse::default();
2945 :
2946 0 : let (shards, _other_keys) =
2947 0 : list_remote_tenant_shards(&state.remote_storage, tenant_id, cancel.clone())
2948 0 : .await
2949 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
2950 :
2951 0 : for tenant_shard_id in shards {
2952 0 : let (timeline_ids, _other_keys) =
2953 0 : list_remote_timelines(&state.remote_storage, tenant_shard_id, cancel.clone())
2954 0 : .await
2955 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
2956 :
2957 0 : let mut generation = Generation::none();
2958 0 : for timeline_id in timeline_ids {
2959 0 : match download_index_part(
2960 0 : &state.remote_storage,
2961 0 : &tenant_shard_id,
2962 0 : &timeline_id,
2963 : Generation::MAX,
2964 0 : &cancel,
2965 : )
2966 0 : .instrument(info_span!("download_index_part",
2967 : tenant_id=%tenant_shard_id.tenant_id,
2968 0 : shard_id=%tenant_shard_id.shard_slug(),
2969 : %timeline_id))
2970 0 : .await
2971 : {
2972 0 : Ok((index_part, index_generation, _index_mtime)) => {
2973 0 : tracing::info!(
2974 0 : "Found timeline {tenant_shard_id}/{timeline_id} metadata (gen {index_generation:?}, {} layers, {} consistent LSN)",
2975 0 : index_part.layer_metadata.len(),
2976 0 : index_part.metadata.disk_consistent_lsn()
2977 : );
2978 0 : generation = std::cmp::max(generation, index_generation);
2979 : }
2980 : Err(DownloadError::NotFound) => {
2981 : // This is normal for tenants that were created with multiple shards: they have an unsharded path
2982 : // containing the timeline's initdb tarball but no index. Otherwise it is a bit strange.
2983 0 : tracing::info!(
2984 0 : "Timeline path {tenant_shard_id}/{timeline_id} exists in remote storage but has no index, skipping"
2985 : );
2986 0 : continue;
2987 : }
2988 0 : Err(e) => {
2989 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
2990 : }
2991 : };
2992 : }
2993 :
2994 0 : let result =
2995 0 : download_tenant_manifest(&state.remote_storage, &tenant_shard_id, generation, &cancel)
2996 0 : .instrument(info_span!("download_tenant_manifest",
2997 : tenant_id=%tenant_shard_id.tenant_id,
2998 0 : shard_id=%tenant_shard_id.shard_slug()))
2999 0 : .await;
3000 0 : let stripe_size = match result {
3001 0 : Ok((manifest, _, _)) => manifest.stripe_size,
3002 0 : Err(DownloadError::NotFound) => None,
3003 0 : Err(err) => return Err(ApiError::InternalServerError(anyhow!(err))),
3004 : };
3005 :
3006 0 : response.shards.push(TenantScanRemoteStorageShard {
3007 0 : tenant_shard_id,
3008 0 : generation: generation.into(),
3009 0 : stripe_size,
3010 0 : });
3011 : }
3012 :
3013 0 : if response.shards.is_empty() {
3014 0 : return Err(ApiError::NotFound(
3015 0 : anyhow::anyhow!("No shards found for tenant ID {tenant_id}").into(),
3016 0 : ));
3017 0 : }
3018 :
3019 0 : json_response(StatusCode::OK, response)
3020 0 : }
3021 :
3022 0 : async fn secondary_download_handler(
3023 0 : request: Request<Body>,
3024 0 : _cancel: CancellationToken,
3025 0 : ) -> Result<Response<Body>, ApiError> {
3026 0 : let state = get_state(&request);
3027 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3028 0 : let wait = parse_query_param(&request, "wait_ms")?.map(Duration::from_millis);
3029 :
3030 : // We don't need this to issue the download request, but:
3031 : // - it enables us to cleanly return 404 if we get a request for an absent shard
3032 : // - we will use this to provide status feedback in the response
3033 0 : let Some(secondary_tenant) = state
3034 0 : .tenant_manager
3035 0 : .get_secondary_tenant_shard(tenant_shard_id)
3036 : else {
3037 0 : return Err(ApiError::NotFound(
3038 0 : anyhow::anyhow!("Shard {} not found", tenant_shard_id).into(),
3039 0 : ));
3040 : };
3041 :
3042 0 : let timeout = wait.unwrap_or(Duration::MAX);
3043 :
3044 0 : let result = tokio::time::timeout(
3045 0 : timeout,
3046 0 : state.secondary_controller.download_tenant(tenant_shard_id),
3047 0 : )
3048 0 : .await;
3049 :
3050 0 : let progress = secondary_tenant.progress.lock().unwrap().clone();
3051 :
3052 0 : let status = match result {
3053 : Ok(Ok(())) => {
3054 0 : if progress.layers_downloaded >= progress.layers_total {
3055 : // Download job ran to completion
3056 0 : StatusCode::OK
3057 : } else {
3058 : // Download dropped out without errors because it ran out of time budget
3059 0 : StatusCode::ACCEPTED
3060 : }
3061 : }
3062 : // Edge case: downloads aren't usually fallible: things like a missing heatmap are considered
3063 : // okay. We could get an error here in the unlikely edge case that the tenant
3064 : // was detached between our check above and executing the download job.
3065 0 : Ok(Err(e)) => return Err(e.into()),
3066 : // A timeout is not an error: we have started the download, we're just not done
3067 : // yet. The caller will get a response body indicating status.
3068 0 : Err(_) => StatusCode::ACCEPTED,
3069 : };
3070 :
3071 0 : json_response(status, progress)
3072 0 : }
3073 :
3074 0 : async fn wait_lsn_handler(
3075 0 : mut request: Request<Body>,
3076 0 : cancel: CancellationToken,
3077 0 : ) -> Result<Response<Body>, ApiError> {
3078 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3079 0 : let wait_lsn_request: TenantWaitLsnRequest = json_request(&mut request).await?;
3080 :
3081 0 : let state = get_state(&request);
3082 0 : let tenant = state
3083 0 : .tenant_manager
3084 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3085 :
3086 0 : let mut wait_futures = Vec::default();
3087 0 : for timeline in tenant.list_timelines() {
3088 0 : let Some(lsn) = wait_lsn_request.timelines.get(&timeline.timeline_id) else {
3089 0 : continue;
3090 : };
3091 :
3092 0 : let fut = {
3093 0 : let timeline = timeline.clone();
3094 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Error);
3095 0 : async move {
3096 0 : timeline
3097 0 : .wait_lsn(
3098 0 : *lsn,
3099 0 : WaitLsnWaiter::HttpEndpoint,
3100 0 : WaitLsnTimeout::Custom(wait_lsn_request.timeout),
3101 0 : &ctx,
3102 0 : )
3103 0 : .await
3104 0 : }
3105 : };
3106 0 : wait_futures.push(fut);
3107 : }
3108 :
3109 0 : if wait_futures.is_empty() {
3110 0 : return json_response(StatusCode::NOT_FOUND, ());
3111 0 : }
3112 :
3113 0 : let all_done = tokio::select! {
3114 0 : results = join_all(wait_futures) => {
3115 0 : results.iter().all(|res| res.is_ok())
3116 : },
3117 0 : _ = cancel.cancelled() => {
3118 0 : return Err(ApiError::Cancelled);
3119 : }
3120 : };
3121 :
3122 0 : let status = if all_done {
3123 0 : StatusCode::OK
3124 : } else {
3125 0 : StatusCode::ACCEPTED
3126 : };
3127 :
3128 0 : json_response(status, ())
3129 0 : }
3130 :
3131 0 : async fn secondary_status_handler(
3132 0 : request: Request<Body>,
3133 0 : _cancel: CancellationToken,
3134 0 : ) -> Result<Response<Body>, ApiError> {
3135 0 : let state = get_state(&request);
3136 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3137 :
3138 0 : let Some(secondary_tenant) = state
3139 0 : .tenant_manager
3140 0 : .get_secondary_tenant_shard(tenant_shard_id)
3141 : else {
3142 0 : return Err(ApiError::NotFound(
3143 0 : anyhow::anyhow!("Shard {} not found", tenant_shard_id).into(),
3144 0 : ));
3145 : };
3146 :
3147 0 : let progress = secondary_tenant.progress.lock().unwrap().clone();
3148 :
3149 0 : json_response(StatusCode::OK, progress)
3150 0 : }
3151 :
3152 0 : async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
3153 0 : json_response(
3154 : StatusCode::NOT_FOUND,
3155 0 : HttpErrorBody::from_msg("page not found".to_owned()),
3156 : )
3157 0 : }
3158 :
3159 0 : async fn post_tracing_event_handler(
3160 0 : mut r: Request<Body>,
3161 0 : _cancel: CancellationToken,
3162 0 : ) -> Result<Response<Body>, ApiError> {
3163 0 : #[derive(Debug, serde::Deserialize)]
3164 : #[serde(rename_all = "lowercase")]
3165 : enum Level {
3166 : Error,
3167 : Warn,
3168 : Info,
3169 : Debug,
3170 : Trace,
3171 : }
3172 0 : #[derive(Debug, serde::Deserialize)]
3173 : struct Request {
3174 : level: Level,
3175 : message: String,
3176 : }
3177 0 : let body: Request = json_request(&mut r)
3178 0 : .await
3179 0 : .map_err(|_| ApiError::BadRequest(anyhow::anyhow!("invalid JSON body")))?;
3180 :
3181 0 : match body.level {
3182 0 : Level::Error => tracing::error!(?body.message),
3183 0 : Level::Warn => tracing::warn!(?body.message),
3184 0 : Level::Info => tracing::info!(?body.message),
3185 0 : Level::Debug => tracing::debug!(?body.message),
3186 0 : Level::Trace => tracing::trace!(?body.message),
3187 : }
3188 :
3189 0 : json_response(StatusCode::OK, ())
3190 0 : }
3191 :
3192 0 : async fn put_io_engine_handler(
3193 0 : mut r: Request<Body>,
3194 0 : _cancel: CancellationToken,
3195 0 : ) -> Result<Response<Body>, ApiError> {
3196 0 : check_permission(&r, None)?;
3197 0 : let kind: crate::virtual_file::IoEngineKind = json_request(&mut r).await?;
3198 0 : crate::virtual_file::io_engine::set(kind);
3199 0 : json_response(StatusCode::OK, ())
3200 0 : }
3201 :
3202 0 : async fn put_io_mode_handler(
3203 0 : mut r: Request<Body>,
3204 0 : _cancel: CancellationToken,
3205 0 : ) -> Result<Response<Body>, ApiError> {
3206 0 : check_permission(&r, None)?;
3207 0 : let mode: IoMode = json_request(&mut r).await?;
3208 0 : crate::virtual_file::set_io_mode(mode);
3209 0 : json_response(StatusCode::OK, ())
3210 0 : }
3211 :
3212 : /// Polled by control plane.
3213 : ///
3214 : /// See [`crate::utilization`].
3215 0 : async fn get_utilization(
3216 0 : r: Request<Body>,
3217 0 : _cancel: CancellationToken,
3218 0 : ) -> Result<Response<Body>, ApiError> {
3219 0 : fail::fail_point!("get-utilization-http-handler", |_| {
3220 0 : Err(ApiError::ResourceUnavailable("failpoint".into()))
3221 0 : });
3222 :
3223 : // this probably could be completely public, but lets make that change later.
3224 0 : check_permission(&r, None)?;
3225 :
3226 0 : let state = get_state(&r);
3227 0 : let mut g = state.latest_utilization.lock().await;
3228 :
3229 0 : let regenerate_every = Duration::from_secs(1);
3230 0 : let still_valid = g
3231 0 : .as_ref()
3232 0 : .is_some_and(|(captured_at, _)| captured_at.elapsed() < regenerate_every);
3233 :
3234 : // avoid needless statvfs calls even though those should be non-blocking fast.
3235 : // regenerate at most 1Hz to allow polling at any rate.
3236 0 : if !still_valid {
3237 0 : let path = state.conf.tenants_path();
3238 0 : let doc =
3239 0 : crate::utilization::regenerate(state.conf, path.as_std_path(), &state.tenant_manager)
3240 0 : .map_err(ApiError::InternalServerError)?;
3241 :
3242 0 : let mut buf = Vec::new();
3243 0 : serde_json::to_writer(&mut buf, &doc)
3244 0 : .context("serialize")
3245 0 : .map_err(ApiError::InternalServerError)?;
3246 :
3247 0 : let body = bytes::Bytes::from(buf);
3248 :
3249 0 : *g = Some((std::time::Instant::now(), body));
3250 0 : }
3251 :
3252 : // hyper 0.14 doesn't yet have Response::clone so this is a bit of extra legwork
3253 0 : let cached = g.as_ref().expect("just set").1.clone();
3254 :
3255 0 : Response::builder()
3256 0 : .header(hyper::http::header::CONTENT_TYPE, "application/json")
3257 : // thought of using http date header, but that is second precision which does not give any
3258 : // debugging aid
3259 0 : .status(StatusCode::OK)
3260 0 : .body(hyper::Body::from(cached))
3261 0 : .context("build response")
3262 0 : .map_err(ApiError::InternalServerError)
3263 0 : }
3264 :
3265 : /// HADRON
3266 0 : async fn list_tenant_visible_size_handler(
3267 0 : request: Request<Body>,
3268 0 : _cancel: CancellationToken,
3269 0 : ) -> Result<Response<Body>, ApiError> {
3270 0 : check_permission(&request, None)?;
3271 0 : let state = get_state(&request);
3272 :
3273 0 : let mut map = BTreeMap::new();
3274 0 : for (tenant_shard_id, slot) in state.tenant_manager.list() {
3275 0 : match slot {
3276 0 : TenantSlot::Attached(tenant) => {
3277 0 : let visible_size = tenant.get_visible_size();
3278 0 : map.insert(tenant_shard_id, visible_size);
3279 0 : }
3280 : TenantSlot::Secondary(_) | TenantSlot::InProgress(_) => {
3281 0 : continue;
3282 : }
3283 : }
3284 : }
3285 :
3286 0 : json_response(StatusCode::OK, map)
3287 0 : }
3288 :
3289 0 : async fn list_aux_files(
3290 0 : mut request: Request<Body>,
3291 0 : _cancel: CancellationToken,
3292 0 : ) -> Result<Response<Body>, ApiError> {
3293 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3294 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3295 0 : let body: ListAuxFilesRequest = json_request(&mut request).await?;
3296 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3297 :
3298 0 : let state = get_state(&request);
3299 :
3300 0 : let timeline =
3301 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
3302 0 : .await?;
3303 :
3304 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
3305 0 : state.conf.get_vectored_concurrent_io,
3306 0 : timeline.gate.enter().map_err(|_| ApiError::Cancelled)?,
3307 : );
3308 :
3309 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
3310 0 : .with_scope_timeline(&timeline);
3311 0 : let files = timeline
3312 0 : .list_aux_files(body.lsn, &ctx, io_concurrency)
3313 0 : .await?;
3314 0 : json_response(StatusCode::OK, files)
3315 0 : }
3316 :
3317 0 : async fn perf_info(
3318 0 : request: Request<Body>,
3319 0 : _cancel: CancellationToken,
3320 0 : ) -> Result<Response<Body>, ApiError> {
3321 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3322 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3323 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3324 :
3325 0 : let state = get_state(&request);
3326 :
3327 0 : let timeline =
3328 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
3329 0 : .await?;
3330 :
3331 0 : let result = timeline.perf_info().await;
3332 :
3333 0 : json_response(StatusCode::OK, result)
3334 0 : }
3335 :
3336 0 : async fn ingest_aux_files(
3337 0 : mut request: Request<Body>,
3338 0 : _cancel: CancellationToken,
3339 0 : ) -> Result<Response<Body>, ApiError> {
3340 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3341 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3342 0 : let body: IngestAuxFilesRequest = json_request(&mut request).await?;
3343 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3344 :
3345 0 : let state = get_state(&request);
3346 :
3347 0 : let timeline =
3348 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
3349 0 : .await?;
3350 :
3351 0 : let mut modification = timeline.begin_modification(
3352 0 : Lsn(timeline.get_last_record_lsn().0 + 8), /* advance LSN by 8 */
3353 0 : );
3354 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
3355 0 : for (fname, content) in body.aux_files {
3356 0 : modification
3357 0 : .put_file(&fname, content.as_bytes(), &ctx)
3358 0 : .await
3359 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
3360 : }
3361 0 : modification
3362 0 : .commit(&ctx)
3363 0 : .await
3364 0 : .map_err(ApiError::InternalServerError)?;
3365 :
3366 0 : json_response(StatusCode::OK, ())
3367 0 : }
3368 :
3369 : /// Report on the largest tenants on this pageserver, for the storage controller to identify
3370 : /// candidates for splitting
3371 0 : async fn post_top_tenants(
3372 0 : mut r: Request<Body>,
3373 0 : _cancel: CancellationToken,
3374 0 : ) -> Result<Response<Body>, ApiError> {
3375 0 : check_permission(&r, None)?;
3376 0 : let request: TopTenantShardsRequest = json_request(&mut r).await?;
3377 0 : let state = get_state(&r);
3378 :
3379 0 : fn get_size_metric(sizes: &TopTenantShardItem, order_by: &TenantSorting) -> u64 {
3380 0 : match order_by {
3381 0 : TenantSorting::ResidentSize => sizes.resident_size,
3382 0 : TenantSorting::MaxLogicalSize => sizes.max_logical_size,
3383 0 : TenantSorting::MaxLogicalSizePerShard => sizes.max_logical_size_per_shard,
3384 : }
3385 0 : }
3386 :
3387 : #[derive(Eq, PartialEq)]
3388 : struct HeapItem {
3389 : metric: u64,
3390 : sizes: TopTenantShardItem,
3391 : }
3392 :
3393 : impl PartialOrd for HeapItem {
3394 0 : fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
3395 0 : Some(self.cmp(other))
3396 0 : }
3397 : }
3398 :
3399 : /// Heap items have reverse ordering on their metric: this enables using BinaryHeap, which
3400 : /// supports popping the greatest item but not the smallest.
3401 : impl Ord for HeapItem {
3402 0 : fn cmp(&self, other: &Self) -> std::cmp::Ordering {
3403 0 : Reverse(self.metric).cmp(&Reverse(other.metric))
3404 0 : }
3405 : }
3406 :
3407 0 : let mut top_n: BinaryHeap<HeapItem> = BinaryHeap::with_capacity(request.limit);
3408 :
3409 : // FIXME: this is a lot of clones to take this tenant list
3410 0 : for (tenant_shard_id, tenant_slot) in state.tenant_manager.list() {
3411 0 : if let Some(shards_lt) = request.where_shards_lt {
3412 : // Ignore tenants which already have >= this many shards
3413 0 : if tenant_shard_id.shard_count >= shards_lt {
3414 0 : continue;
3415 0 : }
3416 0 : }
3417 :
3418 0 : let sizes = match tenant_slot {
3419 0 : TenantSlot::Attached(tenant) => tenant.get_sizes(),
3420 : TenantSlot::Secondary(_) | TenantSlot::InProgress(_) => {
3421 0 : continue;
3422 : }
3423 : };
3424 0 : let metric = get_size_metric(&sizes, &request.order_by);
3425 :
3426 0 : if let Some(gt) = request.where_gt {
3427 : // Ignore tenants whose metric is <= the lower size threshold, to do less sorting work
3428 0 : if metric <= gt {
3429 0 : continue;
3430 0 : }
3431 0 : };
3432 :
3433 0 : match top_n.peek() {
3434 0 : None => {
3435 0 : // Top N list is empty: candidate becomes first member
3436 0 : top_n.push(HeapItem { metric, sizes });
3437 0 : }
3438 0 : Some(i) if i.metric > metric && top_n.len() < request.limit => {
3439 0 : // Lowest item in list is greater than our candidate, but we aren't at limit yet: push to end
3440 0 : top_n.push(HeapItem { metric, sizes });
3441 0 : }
3442 0 : Some(i) if i.metric > metric => {
3443 0 : // List is at limit and lowest value is greater than our candidate, drop it.
3444 0 : }
3445 0 : Some(_) => top_n.push(HeapItem { metric, sizes }),
3446 : }
3447 :
3448 0 : while top_n.len() > request.limit {
3449 0 : top_n.pop();
3450 0 : }
3451 : }
3452 :
3453 0 : json_response(
3454 : StatusCode::OK,
3455 : TopTenantShardsResponse {
3456 0 : shards: top_n.into_iter().map(|i| i.sizes).collect(),
3457 : },
3458 : )
3459 0 : }
3460 :
3461 0 : async fn put_tenant_timeline_import_basebackup(
3462 0 : request: Request<Body>,
3463 0 : _cancel: CancellationToken,
3464 0 : ) -> Result<Response<Body>, ApiError> {
3465 0 : let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
3466 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3467 0 : let base_lsn: Lsn = must_parse_query_param(&request, "base_lsn")?;
3468 0 : let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
3469 0 : let pg_version: PgMajorVersion = must_parse_query_param(&request, "pg_version")?;
3470 :
3471 0 : check_permission(&request, Some(tenant_id))?;
3472 :
3473 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
3474 :
3475 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
3476 :
3477 0 : let span = info_span!("import_basebackup",
3478 0 : tenant_id=%tenant_id, timeline_id=%timeline_id, shard_id=%tenant_shard_id.shard_slug(),
3479 : base_lsn=%base_lsn, end_lsn=%end_lsn, pg_version=%pg_version);
3480 0 : async move {
3481 0 : let state = get_state(&request);
3482 0 : let tenant = state
3483 0 : .tenant_manager
3484 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3485 :
3486 0 : let broker_client = state.broker_client.clone();
3487 :
3488 0 : let mut body = StreamReader::new(
3489 0 : request
3490 0 : .into_body()
3491 0 : .map(|res| res.map_err(|error| std::io::Error::other(anyhow::anyhow!(error)))),
3492 : );
3493 :
3494 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
3495 :
3496 0 : let (timeline, timeline_ctx) = tenant
3497 0 : .create_empty_timeline(timeline_id, base_lsn, pg_version, &ctx)
3498 0 : .map_err(ApiError::InternalServerError)
3499 0 : .await?;
3500 :
3501 : // TODO mark timeline as not ready until it reaches end_lsn.
3502 : // We might have some wal to import as well, and we should prevent compute
3503 : // from connecting before that and writing conflicting wal.
3504 : //
3505 : // This is not relevant for pageserver->pageserver migrations, since there's
3506 : // no wal to import. But should be fixed if we want to import from postgres.
3507 :
3508 : // TODO leave clean state on error. For now you can use detach to clean
3509 : // up broken state from a failed import.
3510 :
3511 : // Import basebackup provided via CopyData
3512 0 : info!("importing basebackup");
3513 :
3514 0 : timeline
3515 0 : .import_basebackup_from_tar(
3516 0 : tenant.clone(),
3517 0 : &mut body,
3518 0 : base_lsn,
3519 0 : broker_client,
3520 0 : &timeline_ctx,
3521 0 : )
3522 0 : .await
3523 0 : .map_err(ApiError::InternalServerError)?;
3524 :
3525 : // Read the end of the tar archive.
3526 0 : read_tar_eof(body)
3527 0 : .await
3528 0 : .map_err(ApiError::InternalServerError)?;
3529 :
3530 : // TODO check checksum
3531 : // Meanwhile you can verify client-side by taking fullbackup
3532 : // and checking that it matches in size with what was imported.
3533 : // It wouldn't work if base came from vanilla postgres though,
3534 : // since we discard some log files.
3535 :
3536 0 : info!("done");
3537 0 : json_response(StatusCode::OK, ())
3538 0 : }
3539 0 : .instrument(span)
3540 0 : .await
3541 0 : }
3542 :
3543 0 : async fn put_tenant_timeline_import_wal(
3544 0 : request: Request<Body>,
3545 0 : _cancel: CancellationToken,
3546 0 : ) -> Result<Response<Body>, ApiError> {
3547 0 : let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
3548 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3549 0 : let start_lsn: Lsn = must_parse_query_param(&request, "start_lsn")?;
3550 0 : let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
3551 :
3552 0 : check_permission(&request, Some(tenant_id))?;
3553 :
3554 0 : let span = info_span!("import_wal", tenant_id=%tenant_id, timeline_id=%timeline_id, start_lsn=%start_lsn, end_lsn=%end_lsn);
3555 0 : async move {
3556 0 : let state = get_state(&request);
3557 :
3558 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, TenantShardId::unsharded(tenant_id), timeline_id).await?;
3559 0 : let ctx = RequestContextBuilder::new(TaskKind::MgmtRequest)
3560 0 : .download_behavior(DownloadBehavior::Warn)
3561 0 : .scope(context::Scope::new_timeline(&timeline))
3562 0 : .root();
3563 :
3564 0 : let mut body = StreamReader::new(request.into_body().map(|res| {
3565 0 : res.map_err(|error| {
3566 0 : std::io::Error::other( anyhow::anyhow!(error))
3567 0 : })
3568 0 : }));
3569 :
3570 0 : let last_record_lsn = timeline.get_last_record_lsn();
3571 0 : if last_record_lsn != start_lsn {
3572 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}")));
3573 0 : }
3574 :
3575 : // TODO leave clean state on error. For now you can use detach to clean
3576 : // up broken state from a failed import.
3577 :
3578 : // Import wal provided via CopyData
3579 0 : info!("importing wal");
3580 0 : crate::import_datadir::import_wal_from_tar(&timeline, &mut body, start_lsn, end_lsn, &ctx).await.map_err(ApiError::InternalServerError)?;
3581 0 : info!("wal import complete");
3582 :
3583 : // Read the end of the tar archive.
3584 0 : read_tar_eof(body).await.map_err(ApiError::InternalServerError)?;
3585 :
3586 : // TODO Does it make sense to overshoot?
3587 0 : if timeline.get_last_record_lsn() < end_lsn {
3588 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}")));
3589 0 : }
3590 :
3591 : // Flush data to disk, then upload to s3. No need for a forced checkpoint.
3592 : // We only want to persist the data, and it doesn't matter if it's in the
3593 : // shape of deltas or images.
3594 0 : info!("flushing layers");
3595 0 : timeline.freeze_and_flush().await.map_err(|e| match e {
3596 0 : tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown,
3597 0 : other => ApiError::InternalServerError(anyhow::anyhow!(other)),
3598 0 : })?;
3599 :
3600 0 : info!("done");
3601 :
3602 0 : json_response(StatusCode::OK, ())
3603 0 : }.instrument(span).await
3604 0 : }
3605 :
3606 : /// Activate a timeline after its import has completed
3607 : ///
3608 : /// The endpoint is idempotent and callers are expected to retry all
3609 : /// errors until a successful response.
3610 0 : async fn activate_post_import_handler(
3611 0 : request: Request<Body>,
3612 0 : _cancel: CancellationToken,
3613 0 : ) -> Result<Response<Body>, ApiError> {
3614 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3615 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3616 :
3617 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3618 : const DEFAULT_ACTIVATE_TIMEOUT: Duration = Duration::from_secs(1);
3619 0 : let activate_timeout = parse_query_param(&request, "timeline_activate_timeout_ms")?
3620 0 : .map(Duration::from_millis)
3621 0 : .unwrap_or(DEFAULT_ACTIVATE_TIMEOUT);
3622 :
3623 0 : let span = info_span!(
3624 : "activate_post_import_handler",
3625 : tenant_id=%tenant_shard_id.tenant_id,
3626 : timeline_id=%timeline_id,
3627 0 : shard_id=%tenant_shard_id.shard_slug()
3628 : );
3629 :
3630 0 : async move {
3631 0 : let state = get_state(&request);
3632 0 : let tenant = state
3633 0 : .tenant_manager
3634 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3635 :
3636 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
3637 :
3638 0 : tenant.finalize_importing_timeline(timeline_id).await?;
3639 :
3640 0 : match tenant.get_timeline(timeline_id, false) {
3641 0 : Ok(_timeline) => {
3642 0 : // Timeline is already visible. Reset not required: fall through.
3643 0 : }
3644 : Err(GetTimelineError::NotFound { .. }) => {
3645 : // This is crude: we reset the whole tenant such that the new timeline is detected
3646 : // and activated. We can come up with something more granular in the future.
3647 : //
3648 : // Note that we only reset the tenant if required: when the timeline is
3649 : // not present in [`Tenant::timelines`].
3650 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
3651 0 : state
3652 0 : .tenant_manager
3653 0 : .reset_tenant(tenant_shard_id, false, &ctx)
3654 0 : .await
3655 0 : .map_err(ApiError::InternalServerError)?;
3656 : }
3657 : Err(GetTimelineError::ShuttingDown) => {
3658 0 : return Err(ApiError::ShuttingDown);
3659 : }
3660 : Err(GetTimelineError::NotActive { .. }) => {
3661 0 : unreachable!("Called get_timeline with active_only=false");
3662 : }
3663 : }
3664 :
3665 0 : let timeline = tenant.get_timeline(timeline_id, false)?;
3666 :
3667 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn)
3668 0 : .with_scope_timeline(&timeline);
3669 :
3670 0 : let result =
3671 0 : tokio::time::timeout(activate_timeout, timeline.wait_to_become_active(&ctx)).await;
3672 0 : match result {
3673 0 : Ok(Ok(())) => {
3674 0 : // fallthrough
3675 0 : }
3676 : // Timeline reached some other state that's not active
3677 : // TODO(vlad): if the tenant is broken, return a permananet error
3678 0 : Ok(Err(_timeline_state)) => {
3679 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3680 0 : "Timeline activation failed"
3681 0 : )));
3682 : }
3683 : // Activation timed out
3684 : Err(_) => {
3685 0 : return Err(ApiError::Timeout("Timeline activation timed out".into()));
3686 : }
3687 : }
3688 :
3689 0 : let timeline_info = build_timeline_info(
3690 0 : &timeline, false, // include_non_incremental_logical_size,
3691 0 : false, // force_await_initial_logical_size
3692 0 : false, // include_image_consistent_lsn
3693 0 : &ctx,
3694 0 : )
3695 0 : .await
3696 0 : .context("get local timeline info")
3697 0 : .map_err(ApiError::InternalServerError)?;
3698 :
3699 0 : json_response(StatusCode::OK, timeline_info)
3700 0 : }
3701 0 : .instrument(span)
3702 0 : .await
3703 0 : }
3704 :
3705 : // [Hadron] Reset gauge metrics that are used to raised alerts. We need this API as a stop-gap measure to reset alerts
3706 : // after we manually rectify situations such as local SSD data loss. We will eventually automate this.
3707 0 : async fn hadron_reset_alert_gauges(
3708 0 : request: Request<Body>,
3709 0 : _cancel: CancellationToken,
3710 0 : ) -> Result<Response<Body>, ApiError> {
3711 0 : check_permission(&request, None)?;
3712 0 : LOCAL_DATA_LOSS_SUSPECTED.set(0);
3713 0 : json_response(StatusCode::OK, ())
3714 0 : }
3715 :
3716 : /// Read the end of a tar archive.
3717 : ///
3718 : /// A tar archive normally ends with two consecutive blocks of zeros, 512 bytes each.
3719 : /// `tokio_tar` already read the first such block. Read the second all-zeros block,
3720 : /// and check that there is no more data after the EOF marker.
3721 : ///
3722 : /// 'tar' command can also write extra blocks of zeros, up to a record
3723 : /// size, controlled by the --record-size argument. Ignore them too.
3724 0 : async fn read_tar_eof(mut reader: (impl tokio::io::AsyncRead + Unpin)) -> anyhow::Result<()> {
3725 : use tokio::io::AsyncReadExt;
3726 0 : let mut buf = [0u8; 512];
3727 :
3728 : // Read the all-zeros block, and verify it
3729 0 : let mut total_bytes = 0;
3730 0 : while total_bytes < 512 {
3731 0 : let nbytes = reader.read(&mut buf[total_bytes..]).await?;
3732 0 : total_bytes += nbytes;
3733 0 : if nbytes == 0 {
3734 0 : break;
3735 0 : }
3736 : }
3737 0 : if total_bytes < 512 {
3738 0 : anyhow::bail!("incomplete or invalid tar EOF marker");
3739 0 : }
3740 0 : if !buf.iter().all(|&x| x == 0) {
3741 0 : anyhow::bail!("invalid tar EOF marker");
3742 0 : }
3743 :
3744 : // Drain any extra zero-blocks after the EOF marker
3745 0 : let mut trailing_bytes = 0;
3746 0 : let mut seen_nonzero_bytes = false;
3747 : loop {
3748 0 : let nbytes = reader.read(&mut buf).await?;
3749 0 : trailing_bytes += nbytes;
3750 0 : if !buf.iter().all(|&x| x == 0) {
3751 0 : seen_nonzero_bytes = true;
3752 0 : }
3753 0 : if nbytes == 0 {
3754 0 : break;
3755 0 : }
3756 : }
3757 0 : if seen_nonzero_bytes {
3758 0 : anyhow::bail!("unexpected non-zero bytes after the tar archive");
3759 0 : }
3760 0 : if trailing_bytes % 512 != 0 {
3761 0 : anyhow::bail!(
3762 0 : "unexpected number of zeros ({trailing_bytes}), not divisible by tar block size (512 bytes), after the tar archive"
3763 : );
3764 0 : }
3765 0 : Ok(())
3766 0 : }
3767 :
3768 0 : async fn force_refresh_feature_flag(
3769 0 : request: Request<Body>,
3770 0 : _cancel: CancellationToken,
3771 0 : ) -> Result<Response<Body>, ApiError> {
3772 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3773 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3774 :
3775 0 : let state = get_state(&request);
3776 0 : let tenant = state
3777 0 : .tenant_manager
3778 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3779 0 : tenant
3780 0 : .feature_resolver
3781 0 : .refresh_properties_and_flags(&tenant);
3782 0 : json_response(StatusCode::OK, ())
3783 0 : }
3784 :
3785 0 : async fn tenant_evaluate_feature_flag(
3786 0 : request: Request<Body>,
3787 0 : _cancel: CancellationToken,
3788 0 : ) -> Result<Response<Body>, ApiError> {
3789 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3790 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3791 :
3792 0 : let flag: String = parse_request_param(&request, "flag_key")?;
3793 0 : let as_type: Option<String> = parse_query_param(&request, "as")?;
3794 :
3795 0 : let state = get_state(&request);
3796 :
3797 0 : async {
3798 0 : let tenant = state
3799 0 : .tenant_manager
3800 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3801 : // TODO: the properties we get here might be stale right after it is collected. But such races are rare (updated every 10s)
3802 : // and we don't need to worry about it for now.
3803 0 : let properties = tenant.feature_resolver.collect_properties();
3804 0 : if as_type.as_deref() == Some("boolean") {
3805 0 : let result = tenant.feature_resolver.evaluate_boolean(&flag);
3806 0 : let result = result.map(|_| true).map_err(|e| e.to_string());
3807 0 : json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
3808 0 : } else if as_type.as_deref() == Some("multivariate") {
3809 0 : let result = tenant.feature_resolver.evaluate_multivariate(&flag).map_err(|e| e.to_string());
3810 0 : json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
3811 : } else {
3812 : // Auto infer the type of the feature flag.
3813 0 : let is_boolean = tenant.feature_resolver.is_feature_flag_boolean(&flag).map_err(|e| ApiError::InternalServerError(anyhow::anyhow!("{e}")))?;
3814 0 : if is_boolean {
3815 0 : let result = tenant.feature_resolver.evaluate_boolean(&flag);
3816 0 : let result = result.map(|_| true).map_err(|e| e.to_string());
3817 0 : json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
3818 : } else {
3819 0 : let result = tenant.feature_resolver.evaluate_multivariate(&flag).map_err(|e| e.to_string());
3820 0 : json_response(StatusCode::OK, json!({ "result": result, "properties": properties }))
3821 : }
3822 : }
3823 0 : }
3824 0 : .instrument(info_span!("tenant_evaluate_feature_flag", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug()))
3825 0 : .await
3826 0 : }
3827 :
3828 0 : async fn force_override_feature_flag_for_testing_put(
3829 0 : request: Request<Body>,
3830 0 : _cancel: CancellationToken,
3831 0 : ) -> Result<Response<Body>, ApiError> {
3832 0 : check_permission(&request, None)?;
3833 :
3834 0 : let flag: String = parse_request_param(&request, "flag_key")?;
3835 0 : let value: String = must_parse_query_param(&request, "value")?;
3836 0 : let state = get_state(&request);
3837 0 : state
3838 0 : .feature_resolver
3839 0 : .force_override_for_testing(&flag, Some(&value));
3840 0 : json_response(StatusCode::OK, ())
3841 0 : }
3842 :
3843 0 : async fn force_override_feature_flag_for_testing_delete(
3844 0 : request: Request<Body>,
3845 0 : _cancel: CancellationToken,
3846 0 : ) -> Result<Response<Body>, ApiError> {
3847 0 : check_permission(&request, None)?;
3848 :
3849 0 : let flag: String = parse_request_param(&request, "flag_key")?;
3850 0 : let state = get_state(&request);
3851 0 : state
3852 0 : .feature_resolver
3853 0 : .force_override_for_testing(&flag, None);
3854 0 : json_response(StatusCode::OK, ())
3855 0 : }
3856 :
3857 0 : async fn update_feature_flag_spec(
3858 0 : mut request: Request<Body>,
3859 0 : _cancel: CancellationToken,
3860 0 : ) -> Result<Response<Body>, ApiError> {
3861 0 : check_permission(&request, None)?;
3862 0 : let body = json_request(&mut request).await?;
3863 0 : let state = get_state(&request);
3864 0 : state
3865 0 : .feature_resolver
3866 0 : .update(body)
3867 0 : .map_err(ApiError::InternalServerError)?;
3868 0 : json_response(StatusCode::OK, ())
3869 0 : }
3870 :
3871 : /// Common functionality of all the HTTP API handlers.
3872 : ///
3873 : /// - Adds a tracing span to each request (by `request_span`)
3874 : /// - Logs the request depending on the request method (by `request_span`)
3875 : /// - Logs the response if it was not successful (by `request_span`
3876 : /// - Shields the handler function from async cancellations. Hyper can drop the handler
3877 : /// Future if the connection to the client is lost, but most of the pageserver code is
3878 : /// not async cancellation safe. This converts the dropped future into a graceful cancellation
3879 : /// request with a CancellationToken.
3880 0 : async fn api_handler<R, H>(request: Request<Body>, handler: H) -> Result<Response<Body>, ApiError>
3881 0 : where
3882 0 : R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
3883 0 : H: FnOnce(Request<Body>, CancellationToken) -> R + Send + Sync + 'static,
3884 0 : {
3885 0 : if request.uri() != &"/v1/failpoints".parse::<Uri>().unwrap() {
3886 0 : fail::fail_point!("api-503", |_| Err(ApiError::ResourceUnavailable(
3887 0 : "failpoint".into()
3888 0 : )));
3889 :
3890 0 : fail::fail_point!("api-500", |_| Err(ApiError::InternalServerError(
3891 0 : anyhow::anyhow!("failpoint")
3892 0 : )));
3893 0 : }
3894 :
3895 : // Spawn a new task to handle the request, to protect the handler from unexpected
3896 : // async cancellations. Most pageserver functions are not async cancellation safe.
3897 : // We arm a drop-guard, so that if Hyper drops the Future, we signal the task
3898 : // with the cancellation token.
3899 0 : let token = CancellationToken::new();
3900 0 : let cancel_guard = token.clone().drop_guard();
3901 0 : let result = request_span(request, move |r| async {
3902 0 : let handle = tokio::spawn(
3903 0 : async {
3904 0 : let token_cloned = token.clone();
3905 0 : let result = handler(r, token).await;
3906 0 : if token_cloned.is_cancelled() {
3907 : // dropguard has executed: we will never turn this result into response.
3908 : //
3909 : // at least temporarily do {:?} logging; these failures are rare enough but
3910 : // could hide difficult errors.
3911 0 : match &result {
3912 0 : Ok(response) => {
3913 0 : let status = response.status();
3914 0 : info!(%status, "Cancelled request finished successfully")
3915 : }
3916 0 : Err(e) => match e {
3917 : ApiError::ShuttingDown | ApiError::ResourceUnavailable(_) => {
3918 : // Don't log this at error severity: they are normal during lifecycle of tenants/process
3919 0 : info!("Cancelled request aborted for shutdown")
3920 : }
3921 : _ => {
3922 : // Log these in a highly visible way, because we have no client to send the response to, but
3923 : // would like to know that something went wrong.
3924 0 : error!("Cancelled request finished with an error: {e:?}")
3925 : }
3926 : },
3927 : }
3928 0 : }
3929 : // only logging for cancelled panicked request handlers is the tracing_panic_hook,
3930 : // which should suffice.
3931 : //
3932 : // there is still a chance to lose the result due to race between
3933 : // returning from here and the actual connection closing happening
3934 : // before outer task gets to execute. leaving that up for #5815.
3935 0 : result
3936 0 : }
3937 0 : .in_current_span(),
3938 : );
3939 :
3940 0 : match handle.await {
3941 : // TODO: never actually return Err from here, always Ok(...) so that we can log
3942 : // spanned errors. Call api_error_handler instead and return appropriate Body.
3943 0 : Ok(result) => result,
3944 0 : Err(e) => {
3945 : // The handler task panicked. We have a global panic handler that logs the
3946 : // panic with its backtrace, so no need to log that here. Only log a brief
3947 : // message to make it clear that we returned the error to the client.
3948 0 : error!("HTTP request handler task panicked: {e:#}");
3949 :
3950 : // Don't return an Error here, because then fallback error handler that was
3951 : // installed in make_router() will print the error. Instead, construct the
3952 : // HTTP error response and return that.
3953 0 : Ok(
3954 0 : ApiError::InternalServerError(anyhow!("HTTP request handler task panicked"))
3955 0 : .into_response(),
3956 0 : )
3957 : }
3958 : }
3959 0 : })
3960 0 : .await;
3961 :
3962 0 : cancel_guard.disarm();
3963 :
3964 0 : result
3965 0 : }
3966 :
3967 : /// Like api_handler, but returns an error response if the server is built without
3968 : /// the 'testing' feature.
3969 0 : async fn testing_api_handler<R, H>(
3970 0 : desc: &str,
3971 0 : request: Request<Body>,
3972 0 : handler: H,
3973 0 : ) -> Result<Response<Body>, ApiError>
3974 0 : where
3975 0 : R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
3976 0 : H: FnOnce(Request<Body>, CancellationToken) -> R + Send + Sync + 'static,
3977 0 : {
3978 0 : if cfg!(feature = "testing") {
3979 0 : api_handler(request, handler).await
3980 : } else {
3981 0 : std::future::ready(Err(ApiError::BadRequest(anyhow!(
3982 0 : "Cannot {desc} because pageserver was compiled without testing APIs",
3983 0 : ))))
3984 0 : .await
3985 : }
3986 0 : }
3987 :
3988 0 : pub fn make_router(
3989 0 : state: Arc<State>,
3990 0 : launch_ts: &'static LaunchTimestamp,
3991 0 : auth: Option<Arc<SwappableJwtAuth>>,
3992 0 : ) -> anyhow::Result<RouterBuilder<hyper::Body, ApiError>> {
3993 0 : let spec = include_bytes!("openapi_spec.yml");
3994 0 : let mut router = attach_openapi_ui(endpoint::make_router(), spec, "/swagger.yml", "/v1/doc");
3995 0 : if auth.is_some() {
3996 0 : router = router.middleware(auth_middleware(|request| {
3997 0 : let state = get_state(request);
3998 0 : if state.allowlist_routes.contains(&request.uri().path()) {
3999 0 : None
4000 : } else {
4001 0 : state.auth.as_deref()
4002 : }
4003 0 : }))
4004 0 : }
4005 :
4006 0 : router = router.middleware(
4007 0 : endpoint::add_response_header_middleware(
4008 0 : "PAGESERVER_LAUNCH_TIMESTAMP",
4009 0 : &launch_ts.to_string(),
4010 : )
4011 0 : .expect("construct launch timestamp header middleware"),
4012 : );
4013 :
4014 0 : let force_metric_collection_on_scrape = state.conf.force_metric_collection_on_scrape;
4015 :
4016 0 : let prometheus_metrics_handler_wrapper =
4017 0 : move |req| prometheus_metrics_handler(req, force_metric_collection_on_scrape);
4018 :
4019 0 : Ok(router
4020 0 : .data(state)
4021 0 : .get("/metrics", move |r| request_span(r, prometheus_metrics_handler_wrapper))
4022 0 : .get("/profile/cpu", |r| request_span(r, profile_cpu_handler))
4023 0 : .get("/profile/heap", |r| request_span(r, profile_heap_handler))
4024 0 : .get("/v1/status", |r| api_handler(r, status_handler))
4025 0 : .put("/v1/failpoints", |r| {
4026 0 : testing_api_handler("manage failpoints", r, failpoints_handler)
4027 0 : })
4028 0 : .post("/v1/reload_auth_validation_keys", |r| {
4029 0 : api_handler(r, reload_auth_validation_keys_handler)
4030 0 : })
4031 0 : .get("/v1/tenant", |r| api_handler(r, tenant_list_handler))
4032 0 : .get("/v1/tenant/:tenant_shard_id", |r| {
4033 0 : api_handler(r, tenant_status)
4034 0 : })
4035 0 : .delete("/v1/tenant/:tenant_shard_id", |r| {
4036 0 : api_handler(r, tenant_delete_handler)
4037 0 : })
4038 0 : .get("/v1/tenant/:tenant_shard_id/synthetic_size", |r| {
4039 0 : api_handler(r, tenant_size_handler)
4040 0 : })
4041 0 : .patch("/v1/tenant/config", |r| {
4042 0 : api_handler(r, patch_tenant_config_handler)
4043 0 : })
4044 0 : .put("/v1/tenant/config", |r| {
4045 0 : api_handler(r, update_tenant_config_handler)
4046 0 : })
4047 0 : .put("/v1/tenant/:tenant_shard_id/shard_split", |r| {
4048 0 : api_handler(r, tenant_shard_split_handler)
4049 0 : })
4050 0 : .get("/v1/tenant/:tenant_shard_id/config", |r| {
4051 0 : api_handler(r, get_tenant_config_handler)
4052 0 : })
4053 0 : .put("/v1/tenant/:tenant_shard_id/location_config", |r| {
4054 0 : api_handler(r, put_tenant_location_config_handler)
4055 0 : })
4056 0 : .get("/v1/location_config", |r| {
4057 0 : api_handler(r, list_location_config_handler)
4058 0 : })
4059 0 : .get("/v1/location_config/:tenant_shard_id", |r| {
4060 0 : api_handler(r, get_location_config_handler)
4061 0 : })
4062 0 : .put(
4063 : "/v1/tenant/:tenant_shard_id/time_travel_remote_storage",
4064 0 : |r| api_handler(r, tenant_time_travel_remote_storage_handler),
4065 : )
4066 0 : .get("/v1/tenant/:tenant_shard_id/timeline", |r| {
4067 0 : api_handler(r, timeline_list_handler)
4068 0 : })
4069 0 : .get("/v1/tenant/:tenant_shard_id/timeline_and_offloaded", |r| {
4070 0 : api_handler(r, timeline_and_offloaded_list_handler)
4071 0 : })
4072 0 : .post("/v1/tenant/:tenant_shard_id/timeline", |r| {
4073 0 : api_handler(r, timeline_create_handler)
4074 0 : })
4075 0 : .post("/v1/tenant/:tenant_shard_id/reset", |r| {
4076 0 : api_handler(r, tenant_reset_handler)
4077 0 : })
4078 0 : .post(
4079 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/preserve_initdb_archive",
4080 0 : |r| api_handler(r, timeline_preserve_initdb_handler),
4081 : )
4082 0 : .put(
4083 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/archival_config",
4084 0 : |r| api_handler(r, timeline_archival_config_handler),
4085 : )
4086 0 : .get("/v1/tenant/:tenant_shard_id/timeline/:timeline_id", |r| {
4087 0 : api_handler(r, timeline_detail_handler)
4088 0 : })
4089 0 : .get(
4090 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_lsn_by_timestamp",
4091 0 : |r| api_handler(r, get_lsn_by_timestamp_handler),
4092 : )
4093 0 : .get(
4094 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_timestamp_of_lsn",
4095 0 : |r| api_handler(r, get_timestamp_of_lsn_handler),
4096 : )
4097 0 : .post(
4098 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/patch_index_part",
4099 0 : |r| api_handler(r, timeline_patch_index_part_handler),
4100 : )
4101 0 : .post(
4102 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/lsn_lease",
4103 0 : |r| api_handler(r, lsn_lease_handler),
4104 : )
4105 0 : .put(
4106 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/do_gc",
4107 0 : |r| api_handler(r, timeline_gc_handler),
4108 : )
4109 0 : .get(
4110 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
4111 0 : |r| api_handler(r, timeline_compact_info_handler),
4112 : )
4113 0 : .put(
4114 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
4115 0 : |r| api_handler(r, timeline_compact_handler),
4116 : )
4117 0 : .delete(
4118 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
4119 0 : |r| api_handler(r, timeline_cancel_compact_handler),
4120 : )
4121 0 : .put(
4122 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/offload",
4123 0 : |r| testing_api_handler("attempt timeline offload", r, timeline_offload_handler),
4124 : )
4125 0 : .put(
4126 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/mark_invisible",
4127 0 : |r| api_handler( r, timeline_mark_invisible_handler),
4128 : )
4129 0 : .put(
4130 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/checkpoint",
4131 0 : |r| testing_api_handler("run timeline checkpoint", r, timeline_checkpoint_handler),
4132 : )
4133 0 : .post(
4134 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_remote_layers",
4135 0 : |r| api_handler(r, timeline_download_remote_layers_handler_post),
4136 : )
4137 0 : .get(
4138 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_remote_layers",
4139 0 : |r| api_handler(r, timeline_download_remote_layers_handler_get),
4140 : )
4141 0 : .put(
4142 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/detach_ancestor",
4143 0 : |r| api_handler(r, timeline_detach_ancestor_handler),
4144 : )
4145 0 : .delete("/v1/tenant/:tenant_shard_id/timeline/:timeline_id", |r| {
4146 0 : api_handler(r, timeline_delete_handler)
4147 0 : })
4148 0 : .get(
4149 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer",
4150 0 : |r| api_handler(r, layer_map_info_handler),
4151 : )
4152 0 : .post(
4153 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_heatmap_layers",
4154 0 : |r| api_handler(r, timeline_download_heatmap_layers_handler),
4155 : )
4156 0 : .delete(
4157 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_heatmap_layers",
4158 0 : |r| api_handler(r, timeline_shutdown_download_heatmap_layers_handler),
4159 : )
4160 0 : .get(
4161 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_file_name",
4162 0 : |r| api_handler(r, layer_download_handler),
4163 : )
4164 0 : .delete(
4165 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_file_name",
4166 0 : |r| api_handler(r, evict_timeline_layer_handler),
4167 : )
4168 0 : .post(
4169 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_name/scan_disposable_keys",
4170 0 : |r| testing_api_handler("timeline_layer_scan_disposable_keys", r, timeline_layer_scan_disposable_keys),
4171 : )
4172 0 : .post(
4173 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/block_gc",
4174 0 : |r| api_handler(r, timeline_gc_blocking_handler),
4175 : )
4176 0 : .post(
4177 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/unblock_gc",
4178 0 : |r| api_handler(r, timeline_gc_unblocking_handler),
4179 : )
4180 0 : .get(
4181 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/page_trace",
4182 0 : |r| api_handler(r, timeline_page_trace_handler),
4183 : )
4184 0 : .post("/v1/tenant/:tenant_shard_id/heatmap_upload", |r| {
4185 0 : api_handler(r, secondary_upload_handler)
4186 0 : })
4187 0 : .get("/v1/tenant/:tenant_id/scan_remote_storage", |r| {
4188 0 : api_handler(r, tenant_scan_remote_handler)
4189 0 : })
4190 0 : .put("/v1/disk_usage_eviction/run", |r| {
4191 0 : api_handler(r, disk_usage_eviction_run)
4192 0 : })
4193 0 : .put("/v1/deletion_queue/flush", |r| {
4194 0 : api_handler(r, deletion_queue_flush)
4195 0 : })
4196 0 : .get("/v1/tenant/:tenant_shard_id/secondary/status", |r| {
4197 0 : api_handler(r, secondary_status_handler)
4198 0 : })
4199 0 : .post("/v1/tenant/:tenant_shard_id/secondary/download", |r| {
4200 0 : api_handler(r, secondary_download_handler)
4201 0 : })
4202 0 : .post("/v1/tenant/:tenant_shard_id/wait_lsn", |r| {
4203 0 : api_handler(r, wait_lsn_handler)
4204 0 : })
4205 0 : .put("/v1/tenant/:tenant_shard_id/break", |r| {
4206 0 : testing_api_handler("set tenant state to broken", r, handle_tenant_break)
4207 0 : })
4208 0 : .get("/v1/panic", |r| api_handler(r, always_panic_handler))
4209 0 : .post("/v1/tracing/event", |r| {
4210 0 : testing_api_handler("emit a tracing event", r, post_tracing_event_handler)
4211 0 : })
4212 0 : .get(
4213 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/getpage",
4214 0 : |r| testing_api_handler("getpage@lsn", r, getpage_at_lsn_handler),
4215 : )
4216 0 : .get(
4217 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/touchpage",
4218 0 : |r| api_handler(r, touchpage_at_lsn_handler),
4219 : )
4220 0 : .get(
4221 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/keyspace",
4222 0 : |r| api_handler(r, timeline_collect_keyspace),
4223 : )
4224 0 : .put("/v1/io_engine", |r| api_handler(r, put_io_engine_handler))
4225 0 : .put("/v1/io_mode", |r| api_handler(r, put_io_mode_handler))
4226 0 : .get("/v1/utilization", |r| api_handler(r, get_utilization))
4227 0 : .get("/v1/list_tenant_visible_size", |r| api_handler(r, list_tenant_visible_size_handler))
4228 0 : .post(
4229 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/ingest_aux_files",
4230 0 : |r| testing_api_handler("ingest_aux_files", r, ingest_aux_files),
4231 : )
4232 0 : .post(
4233 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/list_aux_files",
4234 0 : |r| testing_api_handler("list_aux_files", r, list_aux_files),
4235 : )
4236 0 : .post("/v1/top_tenants", |r| api_handler(r, post_top_tenants))
4237 0 : .post(
4238 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/perf_info",
4239 0 : |r| testing_api_handler("perf_info", r, perf_info),
4240 : )
4241 0 : .put(
4242 : "/v1/tenant/:tenant_id/timeline/:timeline_id/import_basebackup",
4243 0 : |r| api_handler(r, put_tenant_timeline_import_basebackup),
4244 : )
4245 0 : .put(
4246 : "/v1/tenant/:tenant_id/timeline/:timeline_id/import_wal",
4247 0 : |r| api_handler(r, put_tenant_timeline_import_wal),
4248 : )
4249 0 : .put(
4250 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/activate_post_import",
4251 0 : |r| api_handler(r, activate_post_import_handler),
4252 : )
4253 0 : .get("/v1/tenant/:tenant_shard_id/feature_flag/:flag_key", |r| {
4254 0 : api_handler(r, tenant_evaluate_feature_flag)
4255 0 : })
4256 0 : .post("/v1/tenant/:tenant_shard_id/force_refresh_feature_flag", |r| {
4257 0 : api_handler(r, force_refresh_feature_flag)
4258 0 : })
4259 0 : .put("/v1/feature_flag/:flag_key", |r| {
4260 0 : testing_api_handler("force override feature flag - put", r, force_override_feature_flag_for_testing_put)
4261 0 : })
4262 0 : .delete("/v1/feature_flag/:flag_key", |r| {
4263 0 : testing_api_handler("force override feature flag - delete", r, force_override_feature_flag_for_testing_delete)
4264 0 : })
4265 0 : .post("/v1/feature_flag_spec", |r| {
4266 0 : api_handler(r, update_feature_flag_spec)
4267 0 : })
4268 0 : .post("/hadron-internal/reset_alert_gauges", |r| {
4269 0 : api_handler(r, hadron_reset_alert_gauges)
4270 0 : })
4271 0 : .any(handler_404))
4272 0 : }
|