Line data Source code
1 : //!
2 : //! Management HTTP API
3 : //!
4 : use std::cmp::Reverse;
5 : use std::collections::{BinaryHeap, HashMap};
6 : use std::str::FromStr;
7 : use std::sync::Arc;
8 : use std::time::Duration;
9 :
10 : use anyhow::{Context, Result, anyhow};
11 : use enumset::EnumSet;
12 : use futures::future::join_all;
13 : use futures::{StreamExt, TryFutureExt};
14 : use http_utils::endpoint::{
15 : self, attach_openapi_ui, auth_middleware, check_permission_with, profile_cpu_handler,
16 : profile_heap_handler, prometheus_metrics_handler, request_span,
17 : };
18 : use http_utils::error::{ApiError, HttpErrorBody};
19 : use http_utils::failpoints::failpoints_handler;
20 : use http_utils::json::{json_request, json_request_maybe, json_response};
21 : use http_utils::request::{
22 : get_request_param, must_get_query_param, must_parse_query_param, parse_query_param,
23 : parse_request_param,
24 : };
25 : use http_utils::{RequestExt, RouterBuilder};
26 : use humantime::format_rfc3339;
27 : use hyper::{Body, Request, Response, StatusCode, Uri, header};
28 : use metrics::launch_timestamp::LaunchTimestamp;
29 : use pageserver_api::models::virtual_file::IoMode;
30 : use pageserver_api::models::{
31 : DetachBehavior, DownloadRemoteLayersTaskSpawnRequest, IngestAuxFilesRequest,
32 : ListAuxFilesRequest, LocationConfig, LocationConfigListResponse, LocationConfigMode, LsnLease,
33 : LsnLeaseRequest, OffloadedTimelineInfo, PageTraceEvent, ShardParameters, StatusResponse,
34 : TenantConfigPatchRequest, TenantConfigRequest, TenantDetails, TenantInfo,
35 : TenantLocationConfigRequest, TenantLocationConfigResponse, TenantScanRemoteStorageResponse,
36 : TenantScanRemoteStorageShard, TenantShardLocation, TenantShardSplitRequest,
37 : TenantShardSplitResponse, TenantSorting, TenantState, TenantWaitLsnRequest,
38 : TimelineArchivalConfigRequest, TimelineCreateRequest, TimelineCreateRequestMode,
39 : TimelineCreateRequestModeImportPgdata, TimelineGcRequest, TimelineInfo,
40 : TimelinePatchIndexPartRequest, TimelineVisibilityState, TimelinesInfoAndOffloaded,
41 : TopTenantShardItem, TopTenantShardsRequest, TopTenantShardsResponse,
42 : };
43 : use pageserver_api::shard::{ShardCount, TenantShardId};
44 : use remote_storage::{DownloadError, GenericRemoteStorage, TimeTravelError};
45 : use scopeguard::defer;
46 : use tenant_size_model::svg::SvgBranchKind;
47 : use tenant_size_model::{SizeResult, StorageModel};
48 : use tokio::time::Instant;
49 : use tokio_util::io::StreamReader;
50 : use tokio_util::sync::CancellationToken;
51 : use tracing::*;
52 : use utils::auth::SwappableJwtAuth;
53 : use utils::generation::Generation;
54 : use utils::id::{TenantId, TimelineId};
55 : use utils::lsn::Lsn;
56 :
57 : use crate::config::PageServerConf;
58 : use crate::context;
59 : use crate::context::{DownloadBehavior, RequestContext, RequestContextBuilder};
60 : use crate::deletion_queue::DeletionQueueClient;
61 : use crate::pgdatadir_mapping::LsnForTimestamp;
62 : use crate::task_mgr::TaskKind;
63 : use crate::tenant::config::LocationConf;
64 : use crate::tenant::mgr::{
65 : GetActiveTenantError, GetTenantError, TenantManager, TenantMapError, TenantMapInsertError,
66 : TenantSlot, TenantSlotError, TenantSlotUpsertError, TenantStateError, UpsertLocationError,
67 : };
68 : use crate::tenant::remote_timeline_client::index::GcCompactionState;
69 : use crate::tenant::remote_timeline_client::{
70 : download_index_part, list_remote_tenant_shards, list_remote_timelines,
71 : };
72 : use crate::tenant::secondary::SecondaryController;
73 : use crate::tenant::size::ModelInputs;
74 : use crate::tenant::storage_layer::{IoConcurrency, LayerAccessStatsReset, LayerName};
75 : use crate::tenant::timeline::offload::{OffloadError, offload_timeline};
76 : use crate::tenant::timeline::{
77 : CompactFlags, CompactOptions, CompactRequest, CompactionError, Timeline, WaitLsnTimeout,
78 : WaitLsnWaiter, import_pgdata,
79 : };
80 : use crate::tenant::{
81 : GetTimelineError, LogicalSizeCalculationCause, OffloadedTimeline, PageReconstructError,
82 : remote_timeline_client,
83 : };
84 : use crate::{DEFAULT_PG_VERSION, disk_usage_eviction_task, tenant};
85 :
86 : // For APIs that require an Active tenant, how long should we block waiting for that state?
87 : // This is not functionally necessary (clients will retry), but avoids generating a lot of
88 : // failed API calls while tenants are activating.
89 : #[cfg(not(feature = "testing"))]
90 : pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(5000);
91 :
92 : // Tests run on slow/oversubscribed nodes, and may need to wait much longer for tenants to
93 : // finish attaching, if calls to remote storage are slow.
94 : #[cfg(feature = "testing")]
95 : pub(crate) const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);
96 :
97 : pub struct State {
98 : conf: &'static PageServerConf,
99 : tenant_manager: Arc<TenantManager>,
100 : auth: Option<Arc<SwappableJwtAuth>>,
101 : allowlist_routes: &'static [&'static str],
102 : remote_storage: GenericRemoteStorage,
103 : broker_client: storage_broker::BrokerClientChannel,
104 : disk_usage_eviction_state: Arc<disk_usage_eviction_task::State>,
105 : deletion_queue_client: DeletionQueueClient,
106 : secondary_controller: SecondaryController,
107 : latest_utilization: tokio::sync::Mutex<Option<(std::time::Instant, bytes::Bytes)>>,
108 : }
109 :
110 : impl State {
111 : #[allow(clippy::too_many_arguments)]
112 0 : pub fn new(
113 0 : conf: &'static PageServerConf,
114 0 : tenant_manager: Arc<TenantManager>,
115 0 : auth: Option<Arc<SwappableJwtAuth>>,
116 0 : remote_storage: GenericRemoteStorage,
117 0 : broker_client: storage_broker::BrokerClientChannel,
118 0 : disk_usage_eviction_state: Arc<disk_usage_eviction_task::State>,
119 0 : deletion_queue_client: DeletionQueueClient,
120 0 : secondary_controller: SecondaryController,
121 0 : ) -> anyhow::Result<Self> {
122 0 : let allowlist_routes = &[
123 0 : "/v1/status",
124 0 : "/v1/doc",
125 0 : "/swagger.yml",
126 0 : "/metrics",
127 0 : "/profile/cpu",
128 0 : "/profile/heap",
129 0 : ];
130 0 : Ok(Self {
131 0 : conf,
132 0 : tenant_manager,
133 0 : auth,
134 0 : allowlist_routes,
135 0 : remote_storage,
136 0 : broker_client,
137 0 : disk_usage_eviction_state,
138 0 : deletion_queue_client,
139 0 : secondary_controller,
140 0 : latest_utilization: Default::default(),
141 0 : })
142 0 : }
143 : }
144 :
145 : #[inline(always)]
146 0 : fn get_state(request: &Request<Body>) -> &State {
147 0 : request
148 0 : .data::<Arc<State>>()
149 0 : .expect("unknown state type")
150 0 : .as_ref()
151 0 : }
152 :
153 : #[inline(always)]
154 0 : fn get_config(request: &Request<Body>) -> &'static PageServerConf {
155 0 : get_state(request).conf
156 0 : }
157 :
158 : /// Check that the requester is authorized to operate on given tenant
159 0 : fn check_permission(request: &Request<Body>, tenant_id: Option<TenantId>) -> Result<(), ApiError> {
160 0 : check_permission_with(request, |claims| {
161 0 : crate::auth::check_permission(claims, tenant_id)
162 0 : })
163 0 : }
164 :
165 : impl From<PageReconstructError> for ApiError {
166 0 : fn from(pre: PageReconstructError) -> ApiError {
167 0 : match pre {
168 0 : PageReconstructError::Other(other) => ApiError::InternalServerError(other),
169 0 : PageReconstructError::MissingKey(e) => ApiError::InternalServerError(e.into()),
170 0 : PageReconstructError::Cancelled => ApiError::Cancelled,
171 0 : PageReconstructError::AncestorLsnTimeout(e) => ApiError::Timeout(format!("{e}").into()),
172 0 : PageReconstructError::WalRedo(pre) => ApiError::InternalServerError(pre),
173 : }
174 0 : }
175 : }
176 :
177 : impl From<TenantMapInsertError> for ApiError {
178 0 : fn from(tmie: TenantMapInsertError) -> ApiError {
179 0 : match tmie {
180 0 : TenantMapInsertError::SlotError(e) => e.into(),
181 0 : TenantMapInsertError::SlotUpsertError(e) => e.into(),
182 0 : TenantMapInsertError::Other(e) => ApiError::InternalServerError(e),
183 : }
184 0 : }
185 : }
186 :
187 : impl From<TenantSlotError> for ApiError {
188 0 : fn from(e: TenantSlotError) -> ApiError {
189 : use TenantSlotError::*;
190 0 : match e {
191 0 : NotFound(tenant_id) => {
192 0 : ApiError::NotFound(anyhow::anyhow!("NotFound: tenant {tenant_id}").into())
193 : }
194 : InProgress => {
195 0 : ApiError::ResourceUnavailable("Tenant is being modified concurrently".into())
196 : }
197 0 : MapState(e) => e.into(),
198 : }
199 0 : }
200 : }
201 :
202 : impl From<TenantSlotUpsertError> for ApiError {
203 0 : fn from(e: TenantSlotUpsertError) -> ApiError {
204 : use TenantSlotUpsertError::*;
205 0 : match e {
206 0 : InternalError(e) => ApiError::InternalServerError(anyhow::anyhow!("{e}")),
207 0 : MapState(e) => e.into(),
208 0 : ShuttingDown(_) => ApiError::ShuttingDown,
209 : }
210 0 : }
211 : }
212 :
213 : impl From<UpsertLocationError> for ApiError {
214 0 : fn from(e: UpsertLocationError) -> ApiError {
215 : use UpsertLocationError::*;
216 0 : match e {
217 0 : BadRequest(e) => ApiError::BadRequest(e),
218 0 : Unavailable(_) => ApiError::ShuttingDown,
219 0 : e @ InProgress => ApiError::Conflict(format!("{e}")),
220 0 : Flush(e) | InternalError(e) => ApiError::InternalServerError(e),
221 : }
222 0 : }
223 : }
224 :
225 : impl From<TenantMapError> for ApiError {
226 0 : fn from(e: TenantMapError) -> ApiError {
227 : use TenantMapError::*;
228 0 : match e {
229 : StillInitializing | ShuttingDown => {
230 0 : ApiError::ResourceUnavailable(format!("{e}").into())
231 0 : }
232 0 : }
233 0 : }
234 : }
235 :
236 : impl From<TenantStateError> for ApiError {
237 0 : fn from(tse: TenantStateError) -> ApiError {
238 0 : match tse {
239 : TenantStateError::IsStopping(_) => {
240 0 : ApiError::ResourceUnavailable("Tenant is stopping".into())
241 : }
242 0 : TenantStateError::SlotError(e) => e.into(),
243 0 : TenantStateError::SlotUpsertError(e) => e.into(),
244 0 : TenantStateError::Other(e) => ApiError::InternalServerError(anyhow!(e)),
245 : }
246 0 : }
247 : }
248 :
249 : impl From<GetTenantError> for ApiError {
250 0 : fn from(tse: GetTenantError) -> ApiError {
251 0 : match tse {
252 0 : GetTenantError::NotFound(tid) => ApiError::NotFound(anyhow!("tenant {tid}").into()),
253 0 : GetTenantError::ShardNotFound(tid) => {
254 0 : ApiError::NotFound(anyhow!("tenant {tid}").into())
255 : }
256 : GetTenantError::NotActive(_) => {
257 : // Why is this not `ApiError::NotFound`?
258 : // Because we must be careful to never return 404 for a tenant if it does
259 : // in fact exist locally. If we did, the caller could draw the conclusion
260 : // that it can attach the tenant to another PS and we'd be in split-brain.
261 0 : ApiError::ResourceUnavailable("Tenant not yet active".into())
262 : }
263 0 : GetTenantError::MapState(e) => ApiError::ResourceUnavailable(format!("{e}").into()),
264 : }
265 0 : }
266 : }
267 :
268 : impl From<GetTimelineError> for ApiError {
269 0 : fn from(gte: GetTimelineError) -> Self {
270 0 : // Rationale: tenant is activated only after eligble timelines activate
271 0 : ApiError::NotFound(gte.into())
272 0 : }
273 : }
274 :
275 : impl From<GetActiveTenantError> for ApiError {
276 0 : fn from(e: GetActiveTenantError) -> ApiError {
277 0 : match e {
278 0 : GetActiveTenantError::Broken(reason) => {
279 0 : ApiError::InternalServerError(anyhow!("tenant is broken: {}", reason))
280 : }
281 : GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => {
282 0 : ApiError::ShuttingDown
283 : }
284 0 : GetActiveTenantError::WillNotBecomeActive(_) => ApiError::Conflict(format!("{}", e)),
285 0 : GetActiveTenantError::Cancelled => ApiError::ShuttingDown,
286 0 : GetActiveTenantError::NotFound(gte) => gte.into(),
287 : GetActiveTenantError::WaitForActiveTimeout { .. } => {
288 0 : ApiError::ResourceUnavailable(format!("{}", e).into())
289 : }
290 : GetActiveTenantError::SwitchedTenant => {
291 : // in our HTTP handlers, this error doesn't happen
292 : // TODO: separate error types
293 0 : ApiError::ResourceUnavailable("switched tenant".into())
294 : }
295 : }
296 0 : }
297 : }
298 :
299 : impl From<crate::tenant::DeleteTimelineError> for ApiError {
300 0 : fn from(value: crate::tenant::DeleteTimelineError) -> Self {
301 : use crate::tenant::DeleteTimelineError::*;
302 0 : match value {
303 0 : NotFound => ApiError::NotFound(anyhow::anyhow!("timeline not found").into()),
304 0 : HasChildren(children) => ApiError::PreconditionFailed(
305 0 : format!("Cannot delete timeline which has child timelines: {children:?}")
306 0 : .into_boxed_str(),
307 0 : ),
308 0 : a @ AlreadyInProgress(_) => ApiError::Conflict(a.to_string()),
309 0 : Cancelled => ApiError::ResourceUnavailable("shutting down".into()),
310 0 : Other(e) => ApiError::InternalServerError(e),
311 : }
312 0 : }
313 : }
314 :
315 : impl From<crate::tenant::TimelineArchivalError> for ApiError {
316 0 : fn from(value: crate::tenant::TimelineArchivalError) -> Self {
317 : use crate::tenant::TimelineArchivalError::*;
318 0 : match value {
319 0 : NotFound => ApiError::NotFound(anyhow::anyhow!("timeline not found").into()),
320 0 : Timeout => ApiError::Timeout("hit pageserver internal timeout".into()),
321 0 : Cancelled => ApiError::ShuttingDown,
322 0 : e @ HasArchivedParent(_) => {
323 0 : ApiError::PreconditionFailed(e.to_string().into_boxed_str())
324 : }
325 0 : HasUnarchivedChildren(children) => ApiError::PreconditionFailed(
326 0 : format!(
327 0 : "Cannot archive timeline which has non-archived child timelines: {children:?}"
328 0 : )
329 0 : .into_boxed_str(),
330 0 : ),
331 0 : a @ AlreadyInProgress => ApiError::Conflict(a.to_string()),
332 0 : Other(e) => ApiError::InternalServerError(e),
333 : }
334 0 : }
335 : }
336 :
337 : impl From<crate::tenant::mgr::DeleteTimelineError> for ApiError {
338 0 : fn from(value: crate::tenant::mgr::DeleteTimelineError) -> Self {
339 : use crate::tenant::mgr::DeleteTimelineError::*;
340 0 : match value {
341 : // Report Precondition failed so client can distinguish between
342 : // "tenant is missing" case from "timeline is missing"
343 0 : Tenant(GetTenantError::NotFound(..)) => ApiError::PreconditionFailed(
344 0 : "Requested tenant is missing".to_owned().into_boxed_str(),
345 0 : ),
346 0 : Tenant(t) => ApiError::from(t),
347 0 : Timeline(t) => ApiError::from(t),
348 : }
349 0 : }
350 : }
351 :
352 : impl From<crate::tenant::mgr::DeleteTenantError> for ApiError {
353 0 : fn from(value: crate::tenant::mgr::DeleteTenantError) -> Self {
354 : use crate::tenant::mgr::DeleteTenantError::*;
355 0 : match value {
356 0 : SlotError(e) => e.into(),
357 0 : Other(o) => ApiError::InternalServerError(o),
358 0 : Cancelled => ApiError::ShuttingDown,
359 : }
360 0 : }
361 : }
362 :
363 : impl From<crate::tenant::secondary::SecondaryTenantError> for ApiError {
364 0 : fn from(ste: crate::tenant::secondary::SecondaryTenantError) -> ApiError {
365 : use crate::tenant::secondary::SecondaryTenantError;
366 0 : match ste {
367 0 : SecondaryTenantError::GetTenant(gte) => gte.into(),
368 0 : SecondaryTenantError::ShuttingDown => ApiError::ShuttingDown,
369 : }
370 0 : }
371 : }
372 :
373 : // Helper function to construct a TimelineInfo struct for a timeline
374 0 : async fn build_timeline_info(
375 0 : timeline: &Arc<Timeline>,
376 0 : include_non_incremental_logical_size: bool,
377 0 : force_await_initial_logical_size: bool,
378 0 : ctx: &RequestContext,
379 0 : ) -> anyhow::Result<TimelineInfo> {
380 0 : crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id();
381 0 :
382 0 : if force_await_initial_logical_size {
383 0 : timeline.clone().await_initial_logical_size().await
384 0 : }
385 :
386 0 : let mut info = build_timeline_info_common(
387 0 : timeline,
388 0 : ctx,
389 0 : tenant::timeline::GetLogicalSizePriority::Background,
390 0 : )
391 0 : .await?;
392 0 : if include_non_incremental_logical_size {
393 : // XXX we should be using spawn_ondemand_logical_size_calculation here.
394 : // Otherwise, if someone deletes the timeline / detaches the tenant while
395 : // we're executing this function, we will outlive the timeline on-disk state.
396 : info.current_logical_size_non_incremental = Some(
397 0 : timeline
398 0 : .get_current_logical_size_non_incremental(info.last_record_lsn, ctx)
399 0 : .await?,
400 : );
401 0 : }
402 0 : Ok(info)
403 0 : }
404 :
405 0 : async fn build_timeline_info_common(
406 0 : timeline: &Arc<Timeline>,
407 0 : ctx: &RequestContext,
408 0 : logical_size_task_priority: tenant::timeline::GetLogicalSizePriority,
409 0 : ) -> anyhow::Result<TimelineInfo> {
410 0 : crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id();
411 0 : let initdb_lsn = timeline.initdb_lsn;
412 0 : let last_record_lsn = timeline.get_last_record_lsn();
413 0 : let (wal_source_connstr, last_received_msg_lsn, last_received_msg_ts) = {
414 0 : let guard = timeline.last_received_wal.lock().unwrap();
415 0 : if let Some(info) = guard.as_ref() {
416 0 : (
417 0 : Some(format!("{}", info.wal_source_connconf)), // Password is hidden, but it's for statistics only.
418 0 : Some(info.last_received_msg_lsn),
419 0 : Some(info.last_received_msg_ts),
420 0 : )
421 : } else {
422 0 : (None, None, None)
423 : }
424 : };
425 :
426 0 : let ancestor_timeline_id = timeline.get_ancestor_timeline_id();
427 0 : let ancestor_lsn = match timeline.get_ancestor_lsn() {
428 0 : Lsn(0) => None,
429 0 : lsn @ Lsn(_) => Some(lsn),
430 : };
431 0 : let current_logical_size = timeline.get_current_logical_size(logical_size_task_priority, ctx);
432 0 : let current_physical_size = Some(timeline.layer_size_sum().await);
433 0 : let state = timeline.current_state();
434 0 : // Report is_archived = false if the timeline is still loading
435 0 : let is_archived = timeline.is_archived().unwrap_or(false);
436 0 : let remote_consistent_lsn_projected = timeline
437 0 : .get_remote_consistent_lsn_projected()
438 0 : .unwrap_or(Lsn(0));
439 0 : let remote_consistent_lsn_visible = timeline
440 0 : .get_remote_consistent_lsn_visible()
441 0 : .unwrap_or(Lsn(0));
442 0 : let is_invisible = timeline.remote_client.is_invisible().unwrap_or(false);
443 0 :
444 0 : let walreceiver_status = timeline.walreceiver_status();
445 0 :
446 0 : let (pitr_history_size, within_ancestor_pitr) = timeline.get_pitr_history_stats();
447 0 :
448 0 : let min_readable_lsn = std::cmp::max(
449 0 : timeline.get_gc_cutoff_lsn(),
450 0 : *timeline.get_applied_gc_cutoff_lsn(),
451 0 : );
452 :
453 0 : let info = TimelineInfo {
454 0 : tenant_id: timeline.tenant_shard_id,
455 0 : timeline_id: timeline.timeline_id,
456 0 : ancestor_timeline_id,
457 0 : ancestor_lsn,
458 0 : disk_consistent_lsn: timeline.get_disk_consistent_lsn(),
459 0 : remote_consistent_lsn: remote_consistent_lsn_projected,
460 0 : remote_consistent_lsn_visible,
461 0 : initdb_lsn,
462 0 : last_record_lsn,
463 0 : prev_record_lsn: Some(timeline.get_prev_record_lsn()),
464 0 : _unused: Default::default(), // Unused, for legacy decode only
465 0 : min_readable_lsn,
466 0 : applied_gc_cutoff_lsn: *timeline.get_applied_gc_cutoff_lsn(),
467 0 : current_logical_size: current_logical_size.size_dont_care_about_accuracy(),
468 0 : current_logical_size_is_accurate: match current_logical_size.accuracy() {
469 0 : tenant::timeline::logical_size::Accuracy::Approximate => false,
470 0 : tenant::timeline::logical_size::Accuracy::Exact => true,
471 : },
472 0 : directory_entries_counts: timeline.get_directory_metrics().to_vec(),
473 0 : current_physical_size,
474 0 : current_logical_size_non_incremental: None,
475 0 : pitr_history_size,
476 0 : within_ancestor_pitr,
477 0 : timeline_dir_layer_file_size_sum: None,
478 0 : wal_source_connstr,
479 0 : last_received_msg_lsn,
480 0 : last_received_msg_ts,
481 0 : pg_version: timeline.pg_version,
482 0 :
483 0 : state,
484 0 : is_archived: Some(is_archived),
485 0 : rel_size_migration: Some(timeline.get_rel_size_v2_status()),
486 0 : is_invisible: Some(is_invisible),
487 0 :
488 0 : walreceiver_status,
489 0 : };
490 0 : Ok(info)
491 0 : }
492 :
493 0 : fn build_timeline_offloaded_info(offloaded: &Arc<OffloadedTimeline>) -> OffloadedTimelineInfo {
494 0 : let &OffloadedTimeline {
495 0 : tenant_shard_id,
496 0 : timeline_id,
497 0 : ancestor_retain_lsn,
498 0 : ancestor_timeline_id,
499 0 : archived_at,
500 0 : ..
501 0 : } = offloaded.as_ref();
502 0 : OffloadedTimelineInfo {
503 0 : tenant_id: tenant_shard_id,
504 0 : timeline_id,
505 0 : ancestor_retain_lsn,
506 0 : ancestor_timeline_id,
507 0 : archived_at: archived_at.and_utc(),
508 0 : }
509 0 : }
510 :
511 : // healthcheck handler
512 0 : async fn status_handler(
513 0 : request: Request<Body>,
514 0 : _cancel: CancellationToken,
515 0 : ) -> Result<Response<Body>, ApiError> {
516 0 : check_permission(&request, None)?;
517 0 : let config = get_config(&request);
518 0 : json_response(StatusCode::OK, StatusResponse { id: config.id })
519 0 : }
520 :
521 0 : async fn reload_auth_validation_keys_handler(
522 0 : request: Request<Body>,
523 0 : _cancel: CancellationToken,
524 0 : ) -> Result<Response<Body>, ApiError> {
525 0 : check_permission(&request, None)?;
526 0 : let config = get_config(&request);
527 0 : let state = get_state(&request);
528 0 : let Some(shared_auth) = &state.auth else {
529 0 : return json_response(StatusCode::BAD_REQUEST, ());
530 : };
531 : // unwrap is ok because check is performed when creating config, so path is set and exists
532 0 : let key_path = config.auth_validation_public_key_path.as_ref().unwrap();
533 0 : info!("Reloading public key(s) for verifying JWT tokens from {key_path:?}");
534 :
535 0 : match utils::auth::JwtAuth::from_key_path(key_path) {
536 0 : Ok(new_auth) => {
537 0 : shared_auth.swap(new_auth);
538 0 : json_response(StatusCode::OK, ())
539 : }
540 0 : Err(e) => {
541 0 : let err_msg = "Error reloading public keys";
542 0 : warn!("Error reloading public keys from {key_path:?}: {e:}");
543 0 : json_response(
544 0 : StatusCode::INTERNAL_SERVER_ERROR,
545 0 : HttpErrorBody::from_msg(err_msg.to_string()),
546 0 : )
547 : }
548 : }
549 0 : }
550 :
551 0 : async fn timeline_create_handler(
552 0 : mut request: Request<Body>,
553 0 : _cancel: CancellationToken,
554 0 : ) -> Result<Response<Body>, ApiError> {
555 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
556 0 : let request_data: TimelineCreateRequest = json_request(&mut request).await?;
557 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
558 :
559 0 : let new_timeline_id = request_data.new_timeline_id;
560 : // fill in the default pg_version if not provided & convert request into domain model
561 0 : let params: tenant::CreateTimelineParams = match request_data.mode {
562 : TimelineCreateRequestMode::Bootstrap {
563 0 : existing_initdb_timeline_id,
564 0 : pg_version,
565 0 : } => tenant::CreateTimelineParams::Bootstrap(tenant::CreateTimelineParamsBootstrap {
566 0 : new_timeline_id,
567 0 : existing_initdb_timeline_id,
568 0 : pg_version: pg_version.unwrap_or(DEFAULT_PG_VERSION),
569 0 : }),
570 : TimelineCreateRequestMode::Branch {
571 0 : ancestor_timeline_id,
572 0 : ancestor_start_lsn,
573 0 : pg_version: _,
574 0 : } => tenant::CreateTimelineParams::Branch(tenant::CreateTimelineParamsBranch {
575 0 : new_timeline_id,
576 0 : ancestor_timeline_id,
577 0 : ancestor_start_lsn,
578 0 : }),
579 : TimelineCreateRequestMode::ImportPgdata {
580 : import_pgdata:
581 : TimelineCreateRequestModeImportPgdata {
582 0 : location,
583 0 : idempotency_key,
584 0 : },
585 0 : } => tenant::CreateTimelineParams::ImportPgdata(tenant::CreateTimelineParamsImportPgdata {
586 0 : idempotency_key: import_pgdata::index_part_format::IdempotencyKey::new(
587 0 : idempotency_key.0,
588 0 : ),
589 0 : new_timeline_id,
590 : location: {
591 0 : use import_pgdata::index_part_format::Location;
592 0 : use pageserver_api::models::ImportPgdataLocation;
593 0 : match location {
594 : #[cfg(feature = "testing")]
595 0 : ImportPgdataLocation::LocalFs { path } => Location::LocalFs { path },
596 : ImportPgdataLocation::AwsS3 {
597 0 : region,
598 0 : bucket,
599 0 : key,
600 0 : } => Location::AwsS3 {
601 0 : region,
602 0 : bucket,
603 0 : key,
604 0 : },
605 : }
606 : },
607 : }),
608 : };
609 :
610 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Error);
611 0 :
612 0 : let state = get_state(&request);
613 :
614 0 : async {
615 0 : let tenant = state
616 0 : .tenant_manager
617 0 : .get_attached_tenant_shard(tenant_shard_id)?;
618 :
619 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
620 :
621 : // earlier versions of the code had pg_version and ancestor_lsn in the span
622 : // => continue to provide that information, but, through a log message that doesn't require us to destructure
623 0 : tracing::info!(?params, "creating timeline");
624 :
625 0 : match tenant
626 0 : .create_timeline(params, state.broker_client.clone(), &ctx)
627 0 : .await
628 : {
629 0 : Ok(new_timeline) => {
630 : // Created. Construct a TimelineInfo for it.
631 0 : let timeline_info = build_timeline_info_common(
632 0 : &new_timeline,
633 0 : &ctx,
634 0 : tenant::timeline::GetLogicalSizePriority::User,
635 0 : )
636 0 : .await
637 0 : .map_err(ApiError::InternalServerError)?;
638 0 : json_response(StatusCode::CREATED, timeline_info)
639 : }
640 0 : Err(_) if tenant.cancel.is_cancelled() => {
641 0 : // In case we get some ugly error type during shutdown, cast it into a clean 503.
642 0 : json_response(
643 0 : StatusCode::SERVICE_UNAVAILABLE,
644 0 : HttpErrorBody::from_msg("Tenant shutting down".to_string()),
645 0 : )
646 : }
647 0 : Err(e @ tenant::CreateTimelineError::Conflict) => {
648 0 : json_response(StatusCode::CONFLICT, HttpErrorBody::from_msg(e.to_string()))
649 : }
650 0 : Err(e @ tenant::CreateTimelineError::AlreadyCreating) => json_response(
651 0 : StatusCode::TOO_MANY_REQUESTS,
652 0 : HttpErrorBody::from_msg(e.to_string()),
653 0 : ),
654 0 : Err(tenant::CreateTimelineError::AncestorLsn(err)) => json_response(
655 0 : StatusCode::NOT_ACCEPTABLE,
656 0 : HttpErrorBody::from_msg(format!("{err:#}")),
657 0 : ),
658 0 : Err(e @ tenant::CreateTimelineError::AncestorNotActive) => json_response(
659 0 : StatusCode::SERVICE_UNAVAILABLE,
660 0 : HttpErrorBody::from_msg(e.to_string()),
661 0 : ),
662 0 : Err(e @ tenant::CreateTimelineError::AncestorArchived) => json_response(
663 0 : StatusCode::NOT_ACCEPTABLE,
664 0 : HttpErrorBody::from_msg(e.to_string()),
665 0 : ),
666 0 : Err(tenant::CreateTimelineError::ShuttingDown) => json_response(
667 0 : StatusCode::SERVICE_UNAVAILABLE,
668 0 : HttpErrorBody::from_msg("tenant shutting down".to_string()),
669 0 : ),
670 0 : Err(tenant::CreateTimelineError::Other(err)) => Err(ApiError::InternalServerError(err)),
671 : }
672 0 : }
673 0 : .instrument(info_span!("timeline_create",
674 : tenant_id = %tenant_shard_id.tenant_id,
675 0 : shard_id = %tenant_shard_id.shard_slug(),
676 : timeline_id = %new_timeline_id,
677 : ))
678 0 : .await
679 0 : }
680 :
681 0 : async fn timeline_list_handler(
682 0 : request: Request<Body>,
683 0 : _cancel: CancellationToken,
684 0 : ) -> Result<Response<Body>, ApiError> {
685 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
686 0 : let include_non_incremental_logical_size: Option<bool> =
687 0 : parse_query_param(&request, "include-non-incremental-logical-size")?;
688 0 : let force_await_initial_logical_size: Option<bool> =
689 0 : parse_query_param(&request, "force-await-initial-logical-size")?;
690 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
691 :
692 0 : let state = get_state(&request);
693 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
694 :
695 0 : let response_data = async {
696 0 : let tenant = state
697 0 : .tenant_manager
698 0 : .get_attached_tenant_shard(tenant_shard_id)?;
699 :
700 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
701 :
702 0 : let timelines = tenant.list_timelines();
703 0 :
704 0 : let mut response_data = Vec::with_capacity(timelines.len());
705 0 : for timeline in timelines {
706 0 : let timeline_info = build_timeline_info(
707 0 : &timeline,
708 0 : include_non_incremental_logical_size.unwrap_or(false),
709 0 : force_await_initial_logical_size.unwrap_or(false),
710 0 : &ctx,
711 0 : )
712 0 : .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id))
713 0 : .await
714 0 : .context("Failed to build timeline info")
715 0 : .map_err(ApiError::InternalServerError)?;
716 :
717 0 : response_data.push(timeline_info);
718 : }
719 0 : Ok::<Vec<TimelineInfo>, ApiError>(response_data)
720 0 : }
721 0 : .instrument(info_span!("timeline_list",
722 : tenant_id = %tenant_shard_id.tenant_id,
723 0 : shard_id = %tenant_shard_id.shard_slug()))
724 0 : .await?;
725 :
726 0 : json_response(StatusCode::OK, response_data)
727 0 : }
728 :
729 0 : async fn timeline_and_offloaded_list_handler(
730 0 : request: Request<Body>,
731 0 : _cancel: CancellationToken,
732 0 : ) -> Result<Response<Body>, ApiError> {
733 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
734 0 : let include_non_incremental_logical_size: Option<bool> =
735 0 : parse_query_param(&request, "include-non-incremental-logical-size")?;
736 0 : let force_await_initial_logical_size: Option<bool> =
737 0 : parse_query_param(&request, "force-await-initial-logical-size")?;
738 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
739 :
740 0 : let state = get_state(&request);
741 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
742 :
743 0 : let response_data = async {
744 0 : let tenant = state
745 0 : .tenant_manager
746 0 : .get_attached_tenant_shard(tenant_shard_id)?;
747 :
748 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
749 :
750 0 : let (timelines, offloadeds) = tenant.list_timelines_and_offloaded();
751 0 :
752 0 : let mut timeline_infos = Vec::with_capacity(timelines.len());
753 0 : for timeline in timelines {
754 0 : let timeline_info = build_timeline_info(
755 0 : &timeline,
756 0 : include_non_incremental_logical_size.unwrap_or(false),
757 0 : force_await_initial_logical_size.unwrap_or(false),
758 0 : &ctx,
759 0 : )
760 0 : .instrument(info_span!("build_timeline_info", timeline_id = %timeline.timeline_id))
761 0 : .await
762 0 : .context("Failed to build timeline info")
763 0 : .map_err(ApiError::InternalServerError)?;
764 :
765 0 : timeline_infos.push(timeline_info);
766 : }
767 0 : let offloaded_infos = offloadeds
768 0 : .into_iter()
769 0 : .map(|offloaded| build_timeline_offloaded_info(&offloaded))
770 0 : .collect::<Vec<_>>();
771 0 : let res = TimelinesInfoAndOffloaded {
772 0 : timelines: timeline_infos,
773 0 : offloaded: offloaded_infos,
774 0 : };
775 0 : Ok::<TimelinesInfoAndOffloaded, ApiError>(res)
776 0 : }
777 0 : .instrument(info_span!("timeline_and_offloaded_list",
778 : tenant_id = %tenant_shard_id.tenant_id,
779 0 : shard_id = %tenant_shard_id.shard_slug()))
780 0 : .await?;
781 :
782 0 : json_response(StatusCode::OK, response_data)
783 0 : }
784 :
785 0 : async fn timeline_preserve_initdb_handler(
786 0 : request: Request<Body>,
787 0 : _cancel: CancellationToken,
788 0 : ) -> Result<Response<Body>, ApiError> {
789 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
790 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
791 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
792 0 : let state = get_state(&request);
793 :
794 : // Part of the process for disaster recovery from safekeeper-stored WAL:
795 : // If we don't recover into a new timeline but want to keep the timeline ID,
796 : // then the initdb archive is deleted. This endpoint copies it to a different
797 : // location where timeline recreation cand find it.
798 :
799 0 : async {
800 0 : let tenant = state
801 0 : .tenant_manager
802 0 : .get_attached_tenant_shard(tenant_shard_id)?;
803 :
804 0 : let timeline = tenant.get_timeline(timeline_id, false)?;
805 :
806 0 : timeline
807 0 : .preserve_initdb_archive()
808 0 : .await
809 0 : .context("preserving initdb archive")
810 0 : .map_err(ApiError::InternalServerError)?;
811 :
812 0 : Ok::<_, ApiError>(())
813 0 : }
814 0 : .instrument(info_span!("timeline_preserve_initdb_archive",
815 : tenant_id = %tenant_shard_id.tenant_id,
816 0 : shard_id = %tenant_shard_id.shard_slug(),
817 : %timeline_id))
818 0 : .await?;
819 :
820 0 : json_response(StatusCode::OK, ())
821 0 : }
822 :
823 0 : async fn timeline_archival_config_handler(
824 0 : mut request: Request<Body>,
825 0 : _cancel: CancellationToken,
826 0 : ) -> Result<Response<Body>, ApiError> {
827 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
828 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
829 :
830 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
831 :
832 0 : let request_data: TimelineArchivalConfigRequest = json_request(&mut request).await?;
833 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
834 0 : let state = get_state(&request);
835 :
836 0 : async {
837 0 : let tenant = state
838 0 : .tenant_manager
839 0 : .get_attached_tenant_shard(tenant_shard_id)?;
840 :
841 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
842 :
843 0 : tenant
844 0 : .apply_timeline_archival_config(
845 0 : timeline_id,
846 0 : request_data.state,
847 0 : state.broker_client.clone(),
848 0 : ctx,
849 0 : )
850 0 : .await?;
851 0 : Ok::<_, ApiError>(())
852 0 : }
853 0 : .instrument(info_span!("timeline_archival_config",
854 : tenant_id = %tenant_shard_id.tenant_id,
855 0 : shard_id = %tenant_shard_id.shard_slug(),
856 : state = ?request_data.state,
857 : %timeline_id))
858 0 : .await?;
859 :
860 0 : json_response(StatusCode::OK, ())
861 0 : }
862 :
863 : /// This API is used to patch the index part of a timeline. You must ensure such patches are safe to apply. Use this API as an emergency
864 : /// measure only.
865 : ///
866 : /// Some examples of safe patches:
867 : /// - Increase the gc_cutoff and gc_compaction_cutoff to a larger value in case of a bug that didn't bump the cutoff and cause read errors.
868 : /// - Force set the index part to use reldir v2 (migrating/migrated).
869 : ///
870 : /// Some examples of unsafe patches:
871 : /// - Force set the index part from v2 to v1 (legacy). This will cause the code path to ignore anything written to the new keyspace and cause
872 : /// errors.
873 : /// - Decrease the gc_cutoff without validating the data really exists. It will cause read errors in the background.
874 0 : async fn timeline_patch_index_part_handler(
875 0 : mut request: Request<Body>,
876 0 : _cancel: CancellationToken,
877 0 : ) -> Result<Response<Body>, ApiError> {
878 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
879 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
880 :
881 0 : let request_data: TimelinePatchIndexPartRequest = json_request(&mut request).await?;
882 0 : check_permission(&request, None)?; // require global permission for this request
883 0 : let state = get_state(&request);
884 :
885 0 : async {
886 0 : let timeline =
887 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
888 0 : .await?;
889 :
890 0 : if let Some(rel_size_migration) = request_data.rel_size_migration {
891 0 : timeline
892 0 : .update_rel_size_v2_status(rel_size_migration)
893 0 : .map_err(ApiError::InternalServerError)?;
894 0 : }
895 :
896 0 : if let Some(gc_compaction_last_completed_lsn) =
897 0 : request_data.gc_compaction_last_completed_lsn
898 : {
899 0 : timeline
900 0 : .update_gc_compaction_state(GcCompactionState {
901 0 : last_completed_lsn: gc_compaction_last_completed_lsn,
902 0 : })
903 0 : .map_err(ApiError::InternalServerError)?;
904 0 : }
905 :
906 0 : if let Some(applied_gc_cutoff_lsn) = request_data.applied_gc_cutoff_lsn {
907 0 : {
908 0 : let guard = timeline.applied_gc_cutoff_lsn.lock_for_write();
909 0 : guard.store_and_unlock(applied_gc_cutoff_lsn);
910 0 : }
911 0 : }
912 :
913 0 : if request_data.force_index_update {
914 0 : timeline
915 0 : .remote_client
916 0 : .force_schedule_index_upload()
917 0 : .context("force schedule index upload")
918 0 : .map_err(ApiError::InternalServerError)?;
919 0 : }
920 :
921 0 : Ok::<_, ApiError>(())
922 0 : }
923 0 : .instrument(info_span!("timeline_patch_index_part",
924 : tenant_id = %tenant_shard_id.tenant_id,
925 0 : shard_id = %tenant_shard_id.shard_slug(),
926 : %timeline_id))
927 0 : .await?;
928 :
929 0 : json_response(StatusCode::OK, ())
930 0 : }
931 :
932 0 : async fn timeline_detail_handler(
933 0 : request: Request<Body>,
934 0 : _cancel: CancellationToken,
935 0 : ) -> Result<Response<Body>, ApiError> {
936 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
937 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
938 0 : let include_non_incremental_logical_size: Option<bool> =
939 0 : parse_query_param(&request, "include-non-incremental-logical-size")?;
940 0 : let force_await_initial_logical_size: Option<bool> =
941 0 : parse_query_param(&request, "force-await-initial-logical-size")?;
942 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
943 :
944 : // Logical size calculation needs downloading.
945 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
946 0 : let state = get_state(&request);
947 :
948 0 : let timeline_info = async {
949 0 : let tenant = state
950 0 : .tenant_manager
951 0 : .get_attached_tenant_shard(tenant_shard_id)?;
952 :
953 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
954 :
955 0 : let timeline = tenant.get_timeline(timeline_id, false)?;
956 0 : let ctx = &ctx.with_scope_timeline(&timeline);
957 :
958 0 : let timeline_info = build_timeline_info(
959 0 : &timeline,
960 0 : include_non_incremental_logical_size.unwrap_or(false),
961 0 : force_await_initial_logical_size.unwrap_or(false),
962 0 : ctx,
963 0 : )
964 0 : .await
965 0 : .context("get local timeline info")
966 0 : .map_err(ApiError::InternalServerError)?;
967 :
968 0 : Ok::<_, ApiError>(timeline_info)
969 0 : }
970 0 : .instrument(info_span!("timeline_detail",
971 : tenant_id = %tenant_shard_id.tenant_id,
972 0 : shard_id = %tenant_shard_id.shard_slug(),
973 : %timeline_id))
974 0 : .await?;
975 :
976 0 : json_response(StatusCode::OK, timeline_info)
977 0 : }
978 :
979 0 : async fn get_lsn_by_timestamp_handler(
980 0 : request: Request<Body>,
981 0 : cancel: CancellationToken,
982 0 : ) -> Result<Response<Body>, ApiError> {
983 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
984 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
985 0 : let state = get_state(&request);
986 0 :
987 0 : if !tenant_shard_id.is_shard_zero() {
988 : // Requires SLRU contents, which are only stored on shard zero
989 0 : return Err(ApiError::BadRequest(anyhow!(
990 0 : "Size calculations are only available on shard zero"
991 0 : )));
992 0 : }
993 :
994 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
995 0 : let timestamp_raw = must_get_query_param(&request, "timestamp")?;
996 0 : let timestamp = humantime::parse_rfc3339(×tamp_raw)
997 0 : .with_context(|| format!("Invalid time: {:?}", timestamp_raw))
998 0 : .map_err(ApiError::BadRequest)?;
999 0 : let timestamp_pg = postgres_ffi::to_pg_timestamp(timestamp);
1000 :
1001 0 : let with_lease = parse_query_param(&request, "with_lease")?.unwrap_or(false);
1002 :
1003 0 : let timeline =
1004 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1005 0 : .await?;
1006 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1007 0 : .with_scope_timeline(&timeline);
1008 0 : let result = timeline
1009 0 : .find_lsn_for_timestamp(timestamp_pg, &cancel, &ctx)
1010 0 : .await?;
1011 :
1012 : #[derive(serde::Serialize, Debug)]
1013 : struct Result {
1014 : lsn: Lsn,
1015 : kind: &'static str,
1016 : #[serde(default)]
1017 : #[serde(skip_serializing_if = "Option::is_none")]
1018 : #[serde(flatten)]
1019 : lease: Option<LsnLease>,
1020 : }
1021 0 : let (lsn, kind) = match result {
1022 0 : LsnForTimestamp::Present(lsn) => (lsn, "present"),
1023 0 : LsnForTimestamp::Future(lsn) => (lsn, "future"),
1024 0 : LsnForTimestamp::Past(lsn) => (lsn, "past"),
1025 0 : LsnForTimestamp::NoData(lsn) => (lsn, "nodata"),
1026 : };
1027 :
1028 0 : let lease = if with_lease {
1029 0 : timeline
1030 0 : .init_lsn_lease(lsn, timeline.get_lsn_lease_length_for_ts(), &ctx)
1031 0 : .inspect_err(|_| {
1032 0 : warn!("fail to grant a lease to {}", lsn);
1033 0 : })
1034 0 : .ok()
1035 : } else {
1036 0 : None
1037 : };
1038 :
1039 0 : let result = Result { lsn, kind, lease };
1040 0 : let valid_until = result
1041 0 : .lease
1042 0 : .as_ref()
1043 0 : .map(|l| humantime::format_rfc3339_millis(l.valid_until).to_string());
1044 0 : tracing::info!(
1045 : lsn=?result.lsn,
1046 : kind=%result.kind,
1047 : timestamp=%timestamp_raw,
1048 : valid_until=?valid_until,
1049 0 : "lsn_by_timestamp finished"
1050 : );
1051 0 : json_response(StatusCode::OK, result)
1052 0 : }
1053 :
1054 0 : async fn get_timestamp_of_lsn_handler(
1055 0 : request: Request<Body>,
1056 0 : _cancel: CancellationToken,
1057 0 : ) -> Result<Response<Body>, ApiError> {
1058 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1059 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1060 0 : let state = get_state(&request);
1061 0 :
1062 0 : if !tenant_shard_id.is_shard_zero() {
1063 : // Requires SLRU contents, which are only stored on shard zero
1064 0 : return Err(ApiError::BadRequest(anyhow!(
1065 0 : "Size calculations are only available on shard zero"
1066 0 : )));
1067 0 : }
1068 :
1069 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1070 :
1071 0 : let lsn_str = must_get_query_param(&request, "lsn")?;
1072 0 : let lsn = Lsn::from_str(&lsn_str)
1073 0 : .with_context(|| format!("Invalid LSN: {lsn_str:?}"))
1074 0 : .map_err(ApiError::BadRequest)?;
1075 :
1076 0 : let timeline =
1077 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1078 0 : .await?;
1079 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1080 0 : .with_scope_timeline(&timeline);
1081 0 : let result = timeline.get_timestamp_for_lsn(lsn, &ctx).await?;
1082 :
1083 0 : match result {
1084 0 : Some(time) => {
1085 0 : let time = format_rfc3339(
1086 0 : postgres_ffi::try_from_pg_timestamp(time).map_err(ApiError::InternalServerError)?,
1087 : )
1088 0 : .to_string();
1089 0 : json_response(StatusCode::OK, time)
1090 : }
1091 0 : None => Err(ApiError::NotFound(
1092 0 : anyhow::anyhow!("Timestamp for lsn {} not found", lsn).into(),
1093 0 : )),
1094 : }
1095 0 : }
1096 :
1097 0 : async fn timeline_delete_handler(
1098 0 : request: Request<Body>,
1099 0 : _cancel: CancellationToken,
1100 0 : ) -> Result<Response<Body>, ApiError> {
1101 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1102 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1103 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1104 :
1105 0 : let state = get_state(&request);
1106 :
1107 0 : let tenant = state
1108 0 : .tenant_manager
1109 0 : .get_attached_tenant_shard(tenant_shard_id)
1110 0 : .map_err(|e| {
1111 0 : match e {
1112 : // GetTenantError has a built-in conversion to ApiError, but in this context we don't
1113 : // want to treat missing tenants as 404, to avoid ambiguity with successful deletions.
1114 : GetTenantError::NotFound(_) | GetTenantError::ShardNotFound(_) => {
1115 0 : ApiError::PreconditionFailed(
1116 0 : "Requested tenant is missing".to_string().into_boxed_str(),
1117 0 : )
1118 : }
1119 0 : e => e.into(),
1120 : }
1121 0 : })?;
1122 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1123 0 : tenant.delete_timeline(timeline_id).instrument(info_span!("timeline_delete", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id))
1124 0 : .await?;
1125 :
1126 0 : json_response(StatusCode::ACCEPTED, ())
1127 0 : }
1128 :
1129 0 : async fn tenant_reset_handler(
1130 0 : request: Request<Body>,
1131 0 : _cancel: CancellationToken,
1132 0 : ) -> Result<Response<Body>, ApiError> {
1133 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1134 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1135 :
1136 0 : let drop_cache: Option<bool> = parse_query_param(&request, "drop_cache")?;
1137 :
1138 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
1139 0 : let state = get_state(&request);
1140 0 : state
1141 0 : .tenant_manager
1142 0 : .reset_tenant(tenant_shard_id, drop_cache.unwrap_or(false), &ctx)
1143 0 : .await
1144 0 : .map_err(ApiError::InternalServerError)?;
1145 :
1146 0 : json_response(StatusCode::OK, ())
1147 0 : }
1148 :
1149 0 : async fn tenant_list_handler(
1150 0 : request: Request<Body>,
1151 0 : _cancel: CancellationToken,
1152 0 : ) -> Result<Response<Body>, ApiError> {
1153 0 : check_permission(&request, None)?;
1154 0 : let state = get_state(&request);
1155 :
1156 0 : let response_data = state
1157 0 : .tenant_manager
1158 0 : .list_tenants()
1159 0 : .map_err(|_| {
1160 0 : ApiError::ResourceUnavailable("Tenant map is initializing or shutting down".into())
1161 0 : })?
1162 0 : .iter()
1163 0 : .map(|(id, state, gen_)| TenantInfo {
1164 0 : id: *id,
1165 0 : state: state.clone(),
1166 0 : current_physical_size: None,
1167 0 : attachment_status: state.attachment_status(),
1168 0 : generation: (*gen_)
1169 0 : .into()
1170 0 : .expect("Tenants are always attached with a generation"),
1171 0 : gc_blocking: None,
1172 0 : })
1173 0 : .collect::<Vec<TenantInfo>>();
1174 0 :
1175 0 : json_response(StatusCode::OK, response_data)
1176 0 : }
1177 :
1178 0 : async fn tenant_status(
1179 0 : request: Request<Body>,
1180 0 : _cancel: CancellationToken,
1181 0 : ) -> Result<Response<Body>, ApiError> {
1182 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1183 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1184 0 : let state = get_state(&request);
1185 0 :
1186 0 : // In tests, sometimes we want to query the state of a tenant without auto-activating it if it's currently waiting.
1187 0 : let activate = true;
1188 : #[cfg(feature = "testing")]
1189 0 : let activate = parse_query_param(&request, "activate")?.unwrap_or(activate);
1190 :
1191 0 : let tenant_info = async {
1192 0 : let tenant = state
1193 0 : .tenant_manager
1194 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1195 :
1196 0 : if activate {
1197 : // This is advisory: we prefer to let the tenant activate on-demand when this function is
1198 : // called, but it is still valid to return 200 and describe the current state of the tenant
1199 : // if it doesn't make it into an active state.
1200 0 : tenant
1201 0 : .wait_to_become_active(ACTIVE_TENANT_TIMEOUT)
1202 0 : .await
1203 0 : .ok();
1204 0 : }
1205 :
1206 : // Calculate total physical size of all timelines
1207 0 : let mut current_physical_size = 0;
1208 0 : for timeline in tenant.list_timelines().iter() {
1209 0 : current_physical_size += timeline.layer_size_sum().await;
1210 : }
1211 :
1212 0 : let state = tenant.current_state();
1213 0 : Result::<_, ApiError>::Ok(TenantDetails {
1214 0 : tenant_info: TenantInfo {
1215 0 : id: tenant_shard_id,
1216 0 : state: state.clone(),
1217 0 : current_physical_size: Some(current_physical_size),
1218 0 : attachment_status: state.attachment_status(),
1219 0 : generation: tenant
1220 0 : .generation()
1221 0 : .into()
1222 0 : .expect("Tenants are always attached with a generation"),
1223 0 : gc_blocking: tenant.gc_block.summary().map(|x| format!("{x:?}")),
1224 0 : },
1225 0 : walredo: tenant.wal_redo_manager_status(),
1226 0 : timelines: tenant.list_timeline_ids(),
1227 0 : })
1228 0 : }
1229 0 : .instrument(info_span!("tenant_status_handler",
1230 : tenant_id = %tenant_shard_id.tenant_id,
1231 0 : shard_id = %tenant_shard_id.shard_slug()))
1232 0 : .await?;
1233 :
1234 0 : json_response(StatusCode::OK, tenant_info)
1235 0 : }
1236 :
1237 0 : async fn tenant_delete_handler(
1238 0 : request: Request<Body>,
1239 0 : _cancel: CancellationToken,
1240 0 : ) -> Result<Response<Body>, ApiError> {
1241 : // TODO openapi spec
1242 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1243 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1244 :
1245 0 : let state = get_state(&request);
1246 0 :
1247 0 : state
1248 0 : .tenant_manager
1249 0 : .delete_tenant(tenant_shard_id)
1250 0 : .instrument(info_span!("tenant_delete_handler",
1251 : tenant_id = %tenant_shard_id.tenant_id,
1252 0 : shard_id = %tenant_shard_id.shard_slug()
1253 : ))
1254 0 : .await?;
1255 :
1256 0 : json_response(StatusCode::OK, ())
1257 0 : }
1258 :
1259 : /// HTTP endpoint to query the current tenant_size of a tenant.
1260 : ///
1261 : /// This is not used by consumption metrics under [`crate::consumption_metrics`], but can be used
1262 : /// to debug any of the calculations. Requires `tenant_id` request parameter, supports
1263 : /// `inputs_only=true|false` (default false) which supports debugging failure to calculate model
1264 : /// values.
1265 : ///
1266 : /// 'retention_period' query parameter overrides the cutoff that is used to calculate the size
1267 : /// (only if it is shorter than the real cutoff).
1268 : ///
1269 : /// Note: we don't update the cached size and prometheus metric here.
1270 : /// The retention period might be different, and it's nice to have a method to just calculate it
1271 : /// without modifying anything anyway.
1272 0 : async fn tenant_size_handler(
1273 0 : request: Request<Body>,
1274 0 : cancel: CancellationToken,
1275 0 : ) -> Result<Response<Body>, ApiError> {
1276 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1277 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1278 0 : let inputs_only: Option<bool> = parse_query_param(&request, "inputs_only")?;
1279 0 : let retention_period: Option<u64> = parse_query_param(&request, "retention_period")?;
1280 0 : let headers = request.headers();
1281 0 : let state = get_state(&request);
1282 0 :
1283 0 : if !tenant_shard_id.is_shard_zero() {
1284 0 : return Err(ApiError::BadRequest(anyhow!(
1285 0 : "Size calculations are only available on shard zero"
1286 0 : )));
1287 0 : }
1288 0 :
1289 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
1290 0 : let tenant = state
1291 0 : .tenant_manager
1292 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1293 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1294 :
1295 : // this can be long operation
1296 0 : let inputs = tenant
1297 0 : .gather_size_inputs(
1298 0 : retention_period,
1299 0 : LogicalSizeCalculationCause::TenantSizeHandler,
1300 0 : &cancel,
1301 0 : &ctx,
1302 0 : )
1303 0 : .await
1304 0 : .map_err(|e| match e {
1305 0 : crate::tenant::size::CalculateSyntheticSizeError::Cancelled => ApiError::ShuttingDown,
1306 0 : other => ApiError::InternalServerError(anyhow::anyhow!(other)),
1307 0 : })?;
1308 :
1309 0 : let mut sizes = None;
1310 0 : let accepts_html = headers
1311 0 : .get(header::ACCEPT)
1312 0 : .map(|v| v == "text/html")
1313 0 : .unwrap_or_default();
1314 0 : if !inputs_only.unwrap_or(false) {
1315 0 : let storage_model = inputs.calculate_model();
1316 0 : let size = storage_model.calculate();
1317 0 :
1318 0 : // If request header expects html, return html
1319 0 : if accepts_html {
1320 0 : return synthetic_size_html_response(inputs, storage_model, size);
1321 0 : }
1322 0 : sizes = Some(size);
1323 0 : } else if accepts_html {
1324 0 : return Err(ApiError::BadRequest(anyhow!(
1325 0 : "inputs_only parameter is incompatible with html output request"
1326 0 : )));
1327 0 : }
1328 :
1329 : /// The type resides in the pageserver not to expose `ModelInputs`.
1330 : #[derive(serde::Serialize)]
1331 : struct TenantHistorySize {
1332 : id: TenantId,
1333 : /// Size is a mixture of WAL and logical size, so the unit is bytes.
1334 : ///
1335 : /// Will be none if `?inputs_only=true` was given.
1336 : size: Option<u64>,
1337 : /// Size of each segment used in the model.
1338 : /// Will be null if `?inputs_only=true` was given.
1339 : segment_sizes: Option<Vec<tenant_size_model::SegmentSizeResult>>,
1340 : inputs: crate::tenant::size::ModelInputs,
1341 : }
1342 :
1343 0 : json_response(
1344 0 : StatusCode::OK,
1345 0 : TenantHistorySize {
1346 0 : id: tenant_shard_id.tenant_id,
1347 0 : size: sizes.as_ref().map(|x| x.total_size),
1348 0 : segment_sizes: sizes.map(|x| x.segments),
1349 0 : inputs,
1350 0 : },
1351 0 : )
1352 0 : }
1353 :
1354 0 : async fn tenant_shard_split_handler(
1355 0 : mut request: Request<Body>,
1356 0 : _cancel: CancellationToken,
1357 0 : ) -> Result<Response<Body>, ApiError> {
1358 0 : let req: TenantShardSplitRequest = json_request(&mut request).await?;
1359 :
1360 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1361 0 : let state = get_state(&request);
1362 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
1363 :
1364 0 : let tenant = state
1365 0 : .tenant_manager
1366 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1367 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1368 :
1369 0 : let new_shards = state
1370 0 : .tenant_manager
1371 0 : .shard_split(
1372 0 : tenant,
1373 0 : ShardCount::new(req.new_shard_count),
1374 0 : req.new_stripe_size,
1375 0 : &ctx,
1376 0 : )
1377 0 : .await
1378 0 : .map_err(ApiError::InternalServerError)?;
1379 :
1380 0 : json_response(StatusCode::OK, TenantShardSplitResponse { new_shards })
1381 0 : }
1382 :
1383 0 : async fn layer_map_info_handler(
1384 0 : request: Request<Body>,
1385 0 : _cancel: CancellationToken,
1386 0 : ) -> Result<Response<Body>, ApiError> {
1387 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1388 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1389 0 : let reset: LayerAccessStatsReset =
1390 0 : parse_query_param(&request, "reset")?.unwrap_or(LayerAccessStatsReset::NoReset);
1391 0 : let state = get_state(&request);
1392 0 :
1393 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1394 :
1395 0 : let timeline =
1396 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1397 0 : .await?;
1398 0 : let layer_map_info = timeline
1399 0 : .layer_map_info(reset)
1400 0 : .await
1401 0 : .map_err(|_shutdown| ApiError::ShuttingDown)?;
1402 :
1403 0 : json_response(StatusCode::OK, layer_map_info)
1404 0 : }
1405 :
1406 : #[instrument(skip_all, fields(tenant_id, shard_id, timeline_id, layer_name))]
1407 : async fn timeline_layer_scan_disposable_keys(
1408 : request: Request<Body>,
1409 : cancel: CancellationToken,
1410 : ) -> Result<Response<Body>, ApiError> {
1411 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1412 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1413 : let layer_name: LayerName = parse_request_param(&request, "layer_name")?;
1414 :
1415 : tracing::Span::current().record(
1416 : "tenant_id",
1417 : tracing::field::display(&tenant_shard_id.tenant_id),
1418 : );
1419 : tracing::Span::current().record(
1420 : "shard_id",
1421 : tracing::field::display(tenant_shard_id.shard_slug()),
1422 : );
1423 : tracing::Span::current().record("timeline_id", tracing::field::display(&timeline_id));
1424 : tracing::Span::current().record("layer_name", tracing::field::display(&layer_name));
1425 :
1426 : let state = get_state(&request);
1427 :
1428 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1429 :
1430 : // technically the timeline need not be active for this scan to complete
1431 : let timeline =
1432 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1433 : .await?;
1434 :
1435 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1436 : .with_scope_timeline(&timeline);
1437 :
1438 : let guard = timeline.layers.read().await;
1439 : let Some(layer) = guard.try_get_from_key(&layer_name.clone().into()) else {
1440 : return Err(ApiError::NotFound(
1441 : anyhow::anyhow!("Layer {tenant_shard_id}/{timeline_id}/{layer_name} not found").into(),
1442 : ));
1443 : };
1444 :
1445 : let resident_layer = layer
1446 : .download_and_keep_resident(&ctx)
1447 : .await
1448 0 : .map_err(|err| match err {
1449 : tenant::storage_layer::layer::DownloadError::TimelineShutdown
1450 : | tenant::storage_layer::layer::DownloadError::DownloadCancelled => {
1451 0 : ApiError::ShuttingDown
1452 : }
1453 : tenant::storage_layer::layer::DownloadError::ContextAndConfigReallyDeniesDownloads
1454 : | tenant::storage_layer::layer::DownloadError::DownloadRequired
1455 : | tenant::storage_layer::layer::DownloadError::NotFile(_)
1456 : | tenant::storage_layer::layer::DownloadError::DownloadFailed
1457 : | tenant::storage_layer::layer::DownloadError::PreStatFailed(_) => {
1458 0 : ApiError::InternalServerError(err.into())
1459 : }
1460 : #[cfg(test)]
1461 : tenant::storage_layer::layer::DownloadError::Failpoint(_) => {
1462 0 : ApiError::InternalServerError(err.into())
1463 : }
1464 0 : })?;
1465 :
1466 : let keys = resident_layer
1467 : .load_keys(&ctx)
1468 : .await
1469 : .map_err(ApiError::InternalServerError)?;
1470 :
1471 : let shard_identity = timeline.get_shard_identity();
1472 :
1473 : let mut disposable_count = 0;
1474 : let mut not_disposable_count = 0;
1475 : let cancel = cancel.clone();
1476 : for (i, key) in keys.into_iter().enumerate() {
1477 : if shard_identity.is_key_disposable(&key) {
1478 : disposable_count += 1;
1479 : tracing::debug!(key = %key, key.dbg=?key, "disposable key");
1480 : } else {
1481 : not_disposable_count += 1;
1482 : }
1483 : #[allow(clippy::collapsible_if)]
1484 : if i % 10000 == 0 {
1485 : if cancel.is_cancelled() || timeline.cancel.is_cancelled() || timeline.is_stopping() {
1486 : return Err(ApiError::ShuttingDown);
1487 : }
1488 : }
1489 : }
1490 :
1491 : json_response(
1492 : StatusCode::OK,
1493 : pageserver_api::models::ScanDisposableKeysResponse {
1494 : disposable_count,
1495 : not_disposable_count,
1496 : },
1497 : )
1498 : }
1499 :
1500 0 : async fn timeline_download_heatmap_layers_handler(
1501 0 : request: Request<Body>,
1502 0 : _cancel: CancellationToken,
1503 0 : ) -> Result<Response<Body>, ApiError> {
1504 : // Only used in the case where remote storage is not configured.
1505 : const DEFAULT_MAX_CONCURRENCY: usize = 100;
1506 : // A conservative default.
1507 : const DEFAULT_CONCURRENCY: usize = 16;
1508 :
1509 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1510 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1511 :
1512 0 : let desired_concurrency =
1513 0 : parse_query_param(&request, "concurrency")?.unwrap_or(DEFAULT_CONCURRENCY);
1514 0 : let recurse = parse_query_param(&request, "recurse")?.unwrap_or(false);
1515 0 :
1516 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1517 :
1518 0 : let state = get_state(&request);
1519 0 : let timeline =
1520 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1521 0 : .await?;
1522 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1523 0 : .with_scope_timeline(&timeline);
1524 0 :
1525 0 : let max_concurrency = get_config(&request)
1526 0 : .remote_storage_config
1527 0 : .as_ref()
1528 0 : .map(|c| c.concurrency_limit())
1529 0 : .unwrap_or(DEFAULT_MAX_CONCURRENCY);
1530 0 : let concurrency = std::cmp::min(max_concurrency, desired_concurrency);
1531 0 :
1532 0 : timeline.start_heatmap_layers_download(concurrency, recurse, &ctx)?;
1533 :
1534 0 : json_response(StatusCode::ACCEPTED, ())
1535 0 : }
1536 :
1537 0 : async fn timeline_shutdown_download_heatmap_layers_handler(
1538 0 : request: Request<Body>,
1539 0 : _cancel: CancellationToken,
1540 0 : ) -> Result<Response<Body>, ApiError> {
1541 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1542 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1543 :
1544 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1545 :
1546 0 : let state = get_state(&request);
1547 0 : let timeline =
1548 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1549 0 : .await?;
1550 :
1551 0 : timeline.stop_and_drain_heatmap_layers_download().await;
1552 :
1553 0 : json_response(StatusCode::OK, ())
1554 0 : }
1555 :
1556 0 : async fn layer_download_handler(
1557 0 : request: Request<Body>,
1558 0 : _cancel: CancellationToken,
1559 0 : ) -> Result<Response<Body>, ApiError> {
1560 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1561 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1562 0 : let layer_file_name = get_request_param(&request, "layer_file_name")?;
1563 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1564 0 : let layer_name = LayerName::from_str(layer_file_name)
1565 0 : .map_err(|s| ApiError::BadRequest(anyhow::anyhow!(s)))?;
1566 0 : let state = get_state(&request);
1567 :
1568 0 : let timeline =
1569 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1570 0 : .await?;
1571 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
1572 0 : .with_scope_timeline(&timeline);
1573 0 : let downloaded = timeline
1574 0 : .download_layer(&layer_name, &ctx)
1575 0 : .await
1576 0 : .map_err(|e| match e {
1577 : tenant::storage_layer::layer::DownloadError::TimelineShutdown
1578 : | tenant::storage_layer::layer::DownloadError::DownloadCancelled => {
1579 0 : ApiError::ShuttingDown
1580 : }
1581 0 : other => ApiError::InternalServerError(other.into()),
1582 0 : })?;
1583 :
1584 0 : match downloaded {
1585 0 : Some(true) => json_response(StatusCode::OK, ()),
1586 0 : Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
1587 0 : None => json_response(
1588 0 : StatusCode::BAD_REQUEST,
1589 0 : format!("Layer {tenant_shard_id}/{timeline_id}/{layer_file_name} not found"),
1590 0 : ),
1591 : }
1592 0 : }
1593 :
1594 0 : async fn evict_timeline_layer_handler(
1595 0 : request: Request<Body>,
1596 0 : _cancel: CancellationToken,
1597 0 : ) -> Result<Response<Body>, ApiError> {
1598 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1599 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1600 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1601 0 : let layer_file_name = get_request_param(&request, "layer_file_name")?;
1602 0 : let state = get_state(&request);
1603 :
1604 0 : let layer_name = LayerName::from_str(layer_file_name)
1605 0 : .map_err(|s| ApiError::BadRequest(anyhow::anyhow!(s)))?;
1606 :
1607 0 : let timeline =
1608 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1609 0 : .await?;
1610 0 : let evicted = timeline
1611 0 : .evict_layer(&layer_name)
1612 0 : .await
1613 0 : .map_err(ApiError::InternalServerError)?;
1614 :
1615 0 : match evicted {
1616 0 : Some(true) => json_response(StatusCode::OK, ()),
1617 0 : Some(false) => json_response(StatusCode::NOT_MODIFIED, ()),
1618 0 : None => json_response(
1619 0 : StatusCode::BAD_REQUEST,
1620 0 : format!("Layer {tenant_shard_id}/{timeline_id}/{layer_file_name} not found"),
1621 0 : ),
1622 : }
1623 0 : }
1624 :
1625 0 : async fn timeline_gc_blocking_handler(
1626 0 : request: Request<Body>,
1627 0 : _cancel: CancellationToken,
1628 0 : ) -> Result<Response<Body>, ApiError> {
1629 0 : block_or_unblock_gc(request, true).await
1630 0 : }
1631 :
1632 0 : async fn timeline_gc_unblocking_handler(
1633 0 : request: Request<Body>,
1634 0 : _cancel: CancellationToken,
1635 0 : ) -> Result<Response<Body>, ApiError> {
1636 0 : block_or_unblock_gc(request, false).await
1637 0 : }
1638 :
1639 : /// Traces GetPage@LSN requests for a timeline, and emits metadata in an efficient binary encoding.
1640 : /// Use the `pagectl page-trace` command to decode and analyze the output.
1641 0 : async fn timeline_page_trace_handler(
1642 0 : request: Request<Body>,
1643 0 : cancel: CancellationToken,
1644 0 : ) -> Result<Response<Body>, ApiError> {
1645 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1646 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1647 0 : let state = get_state(&request);
1648 0 : check_permission(&request, None)?;
1649 :
1650 0 : let size_limit: usize = parse_query_param(&request, "size_limit_bytes")?.unwrap_or(1024 * 1024);
1651 0 : let time_limit_secs: u64 = parse_query_param(&request, "time_limit_secs")?.unwrap_or(5);
1652 :
1653 : // Convert size limit to event limit based on the serialized size of an event. The event size is
1654 : // fixed, as the default bincode serializer uses fixed-width integer encoding.
1655 0 : let event_size = bincode::serialize(&PageTraceEvent::default())
1656 0 : .map_err(|err| ApiError::InternalServerError(err.into()))?
1657 0 : .len();
1658 0 : let event_limit = size_limit / event_size;
1659 :
1660 0 : let timeline =
1661 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
1662 0 : .await?;
1663 :
1664 : // Install a page trace, unless one is already in progress. We just use a buffered channel,
1665 : // which may 2x the memory usage in the worst case, but it's still bounded.
1666 0 : let (trace_tx, mut trace_rx) = tokio::sync::mpsc::channel(event_limit);
1667 0 : let cur = timeline.page_trace.load();
1668 0 : let installed = cur.is_none()
1669 0 : && timeline
1670 0 : .page_trace
1671 0 : .compare_and_swap(cur, Some(Arc::new(trace_tx)))
1672 0 : .is_none();
1673 0 : if !installed {
1674 0 : return Err(ApiError::Conflict("page trace already active".to_string()));
1675 0 : }
1676 0 : defer!(timeline.page_trace.store(None)); // uninstall on return
1677 0 :
1678 0 : // Collect the trace and return it to the client. We could stream the response, but this is
1679 0 : // simple and fine.
1680 0 : let mut body = Vec::with_capacity(size_limit);
1681 0 : let deadline = Instant::now() + Duration::from_secs(time_limit_secs);
1682 :
1683 0 : while body.len() < size_limit {
1684 0 : tokio::select! {
1685 0 : event = trace_rx.recv() => {
1686 0 : let Some(event) = event else {
1687 0 : break; // shouldn't happen (sender doesn't close, unless timeline dropped)
1688 : };
1689 0 : bincode::serialize_into(&mut body, &event)
1690 0 : .map_err(|err| ApiError::InternalServerError(err.into()))?;
1691 : }
1692 0 : _ = tokio::time::sleep_until(deadline) => break, // time limit reached
1693 0 : _ = cancel.cancelled() => return Err(ApiError::Cancelled),
1694 : }
1695 : }
1696 :
1697 0 : Ok(Response::builder()
1698 0 : .status(StatusCode::OK)
1699 0 : .header(header::CONTENT_TYPE, "application/octet-stream")
1700 0 : .body(hyper::Body::from(body))
1701 0 : .unwrap())
1702 0 : }
1703 :
1704 : /// Adding a block is `POST ../block_gc`, removing a block is `POST ../unblock_gc`.
1705 : ///
1706 : /// Both are technically unsafe because they might fire off index uploads, thus they are POST.
1707 0 : async fn block_or_unblock_gc(
1708 0 : request: Request<Body>,
1709 0 : block: bool,
1710 0 : ) -> Result<Response<Body>, ApiError> {
1711 : use crate::tenant::remote_timeline_client::WaitCompletionError;
1712 : use crate::tenant::upload_queue::NotInitialized;
1713 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1714 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1715 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
1716 0 : let state = get_state(&request);
1717 :
1718 0 : let tenant = state
1719 0 : .tenant_manager
1720 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1721 :
1722 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1723 :
1724 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
1725 :
1726 0 : let fut = async {
1727 0 : if block {
1728 0 : timeline.block_gc(&tenant).await.map(|_| ())
1729 : } else {
1730 0 : timeline.unblock_gc(&tenant).await
1731 : }
1732 0 : };
1733 :
1734 0 : let span = tracing::info_span!(
1735 : "block_or_unblock_gc",
1736 : tenant_id = %tenant_shard_id.tenant_id,
1737 0 : shard_id = %tenant_shard_id.shard_slug(),
1738 : timeline_id = %timeline_id,
1739 : block = block,
1740 : );
1741 :
1742 0 : let res = fut.instrument(span).await;
1743 :
1744 0 : res.map_err(|e| {
1745 0 : if e.is::<NotInitialized>() || e.is::<WaitCompletionError>() {
1746 0 : ApiError::ShuttingDown
1747 : } else {
1748 0 : ApiError::InternalServerError(e)
1749 : }
1750 0 : })?;
1751 :
1752 0 : json_response(StatusCode::OK, ())
1753 0 : }
1754 :
1755 : /// Get tenant_size SVG graph along with the JSON data.
1756 0 : fn synthetic_size_html_response(
1757 0 : inputs: ModelInputs,
1758 0 : storage_model: StorageModel,
1759 0 : sizes: SizeResult,
1760 0 : ) -> Result<Response<Body>, ApiError> {
1761 0 : let mut timeline_ids: Vec<String> = Vec::new();
1762 0 : let mut timeline_map: HashMap<TimelineId, usize> = HashMap::new();
1763 0 : for (index, ti) in inputs.timeline_inputs.iter().enumerate() {
1764 0 : timeline_map.insert(ti.timeline_id, index);
1765 0 : timeline_ids.push(ti.timeline_id.to_string());
1766 0 : }
1767 0 : let seg_to_branch: Vec<(usize, SvgBranchKind)> = inputs
1768 0 : .segments
1769 0 : .iter()
1770 0 : .map(|seg| {
1771 0 : (
1772 0 : *timeline_map.get(&seg.timeline_id).unwrap(),
1773 0 : seg.kind.into(),
1774 0 : )
1775 0 : })
1776 0 : .collect();
1777 :
1778 0 : let svg =
1779 0 : tenant_size_model::svg::draw_svg(&storage_model, &timeline_ids, &seg_to_branch, &sizes)
1780 0 : .map_err(ApiError::InternalServerError)?;
1781 :
1782 0 : let mut response = String::new();
1783 :
1784 : use std::fmt::Write;
1785 0 : write!(response, "<html>\n<body>\n").unwrap();
1786 0 : write!(response, "<div>\n{svg}\n</div>").unwrap();
1787 0 : writeln!(response, "Project size: {}", sizes.total_size).unwrap();
1788 0 : writeln!(response, "<pre>").unwrap();
1789 0 : writeln!(
1790 0 : response,
1791 0 : "{}",
1792 0 : serde_json::to_string_pretty(&inputs).unwrap()
1793 0 : )
1794 0 : .unwrap();
1795 0 : writeln!(
1796 0 : response,
1797 0 : "{}",
1798 0 : serde_json::to_string_pretty(&sizes.segments).unwrap()
1799 0 : )
1800 0 : .unwrap();
1801 0 : writeln!(response, "</pre>").unwrap();
1802 0 : write!(response, "</body>\n</html>\n").unwrap();
1803 0 :
1804 0 : html_response(StatusCode::OK, response)
1805 0 : }
1806 :
1807 0 : pub fn html_response(status: StatusCode, data: String) -> Result<Response<Body>, ApiError> {
1808 0 : let response = Response::builder()
1809 0 : .status(status)
1810 0 : .header(header::CONTENT_TYPE, "text/html")
1811 0 : .body(Body::from(data.as_bytes().to_vec()))
1812 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
1813 0 : Ok(response)
1814 0 : }
1815 :
1816 0 : async fn get_tenant_config_handler(
1817 0 : request: Request<Body>,
1818 0 : _cancel: CancellationToken,
1819 0 : ) -> Result<Response<Body>, ApiError> {
1820 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1821 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1822 0 : let state = get_state(&request);
1823 :
1824 0 : let tenant = state
1825 0 : .tenant_manager
1826 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1827 :
1828 0 : let response = HashMap::from([
1829 : (
1830 : "tenant_specific_overrides",
1831 0 : serde_json::to_value(tenant.tenant_specific_overrides())
1832 0 : .context("serializing tenant specific overrides")
1833 0 : .map_err(ApiError::InternalServerError)?,
1834 : ),
1835 : (
1836 0 : "effective_config",
1837 0 : serde_json::to_value(tenant.effective_config())
1838 0 : .context("serializing effective config")
1839 0 : .map_err(ApiError::InternalServerError)?,
1840 : ),
1841 : ]);
1842 :
1843 0 : json_response(StatusCode::OK, response)
1844 0 : }
1845 :
1846 0 : async fn update_tenant_config_handler(
1847 0 : mut request: Request<Body>,
1848 0 : _cancel: CancellationToken,
1849 0 : ) -> Result<Response<Body>, ApiError> {
1850 0 : let request_data: TenantConfigRequest = json_request(&mut request).await?;
1851 0 : let tenant_id = request_data.tenant_id;
1852 0 : check_permission(&request, Some(tenant_id))?;
1853 :
1854 0 : let new_tenant_conf = request_data.config;
1855 0 :
1856 0 : let state = get_state(&request);
1857 0 :
1858 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
1859 :
1860 0 : let tenant = state
1861 0 : .tenant_manager
1862 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1863 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1864 :
1865 : // This is a legacy API that only operates on attached tenants: the preferred
1866 : // API to use is the location_config/ endpoint, which lets the caller provide
1867 : // the full LocationConf.
1868 0 : let location_conf = LocationConf::attached_single(
1869 0 : new_tenant_conf.clone(),
1870 0 : tenant.get_generation(),
1871 0 : &ShardParameters::default(),
1872 0 : );
1873 0 :
1874 0 : crate::tenant::Tenant::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
1875 0 : .await
1876 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
1877 :
1878 0 : let _ = tenant
1879 0 : .update_tenant_config(|_crnt| Ok(new_tenant_conf.clone()))
1880 0 : .expect("Closure returns Ok()");
1881 0 :
1882 0 : json_response(StatusCode::OK, ())
1883 0 : }
1884 :
1885 0 : async fn patch_tenant_config_handler(
1886 0 : mut request: Request<Body>,
1887 0 : _cancel: CancellationToken,
1888 0 : ) -> Result<Response<Body>, ApiError> {
1889 0 : let request_data: TenantConfigPatchRequest = json_request(&mut request).await?;
1890 0 : let tenant_id = request_data.tenant_id;
1891 0 : check_permission(&request, Some(tenant_id))?;
1892 :
1893 0 : let state = get_state(&request);
1894 0 :
1895 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
1896 :
1897 0 : let tenant = state
1898 0 : .tenant_manager
1899 0 : .get_attached_tenant_shard(tenant_shard_id)?;
1900 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
1901 :
1902 0 : let updated = tenant
1903 0 : .update_tenant_config(|crnt| {
1904 0 : crnt.apply_patch(request_data.config.clone())
1905 0 : .map_err(anyhow::Error::new)
1906 0 : })
1907 0 : .map_err(ApiError::BadRequest)?;
1908 :
1909 : // This is a legacy API that only operates on attached tenants: the preferred
1910 : // API to use is the location_config/ endpoint, which lets the caller provide
1911 : // the full LocationConf.
1912 0 : let location_conf = LocationConf::attached_single(
1913 0 : updated,
1914 0 : tenant.get_generation(),
1915 0 : &ShardParameters::default(),
1916 0 : );
1917 0 :
1918 0 : crate::tenant::Tenant::persist_tenant_config(state.conf, &tenant_shard_id, &location_conf)
1919 0 : .await
1920 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
1921 :
1922 0 : json_response(StatusCode::OK, ())
1923 0 : }
1924 :
1925 0 : async fn put_tenant_location_config_handler(
1926 0 : mut request: Request<Body>,
1927 0 : _cancel: CancellationToken,
1928 0 : ) -> Result<Response<Body>, ApiError> {
1929 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
1930 :
1931 0 : let request_data: TenantLocationConfigRequest = json_request(&mut request).await?;
1932 0 : let flush = parse_query_param(&request, "flush_ms")?.map(Duration::from_millis);
1933 0 : let lazy = parse_query_param(&request, "lazy")?.unwrap_or(false);
1934 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
1935 :
1936 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
1937 0 : let state = get_state(&request);
1938 0 : let conf = state.conf;
1939 0 :
1940 0 : // The `Detached` state is special, it doesn't upsert a tenant, it removes
1941 0 : // its local disk content and drops it from memory.
1942 0 : if let LocationConfigMode::Detached = request_data.config.mode {
1943 0 : if let Err(e) = state
1944 0 : .tenant_manager
1945 0 : .detach_tenant(conf, tenant_shard_id, &state.deletion_queue_client)
1946 0 : .instrument(info_span!("tenant_detach",
1947 : tenant_id = %tenant_shard_id.tenant_id,
1948 0 : shard_id = %tenant_shard_id.shard_slug()
1949 : ))
1950 0 : .await
1951 : {
1952 0 : match e {
1953 0 : TenantStateError::SlotError(TenantSlotError::NotFound(_)) => {
1954 0 : // This API is idempotent: a NotFound on a detach is fine.
1955 0 : }
1956 0 : _ => return Err(e.into()),
1957 : }
1958 0 : }
1959 0 : return json_response(StatusCode::OK, ());
1960 0 : }
1961 :
1962 0 : let location_conf =
1963 0 : LocationConf::try_from(&request_data.config).map_err(ApiError::BadRequest)?;
1964 :
1965 : // lazy==true queues up for activation or jumps the queue like normal when a compute connects,
1966 : // similar to at startup ordering.
1967 0 : let spawn_mode = if lazy {
1968 0 : tenant::SpawnMode::Lazy
1969 : } else {
1970 0 : tenant::SpawnMode::Eager
1971 : };
1972 :
1973 0 : let tenant = state
1974 0 : .tenant_manager
1975 0 : .upsert_location(tenant_shard_id, location_conf, flush, spawn_mode, &ctx)
1976 0 : .await?;
1977 0 : let stripe_size = tenant.as_ref().map(|t| t.get_shard_stripe_size());
1978 0 : let attached = tenant.is_some();
1979 :
1980 0 : if let Some(_flush_ms) = flush {
1981 0 : match state
1982 0 : .secondary_controller
1983 0 : .upload_tenant(tenant_shard_id)
1984 0 : .await
1985 : {
1986 : Ok(()) => {
1987 0 : tracing::info!("Uploaded heatmap during flush");
1988 : }
1989 0 : Err(e) => {
1990 0 : tracing::warn!("Failed to flush heatmap: {e}");
1991 : }
1992 : }
1993 : } else {
1994 0 : tracing::info!("No flush requested when configuring");
1995 : }
1996 :
1997 : // This API returns a vector of pageservers where the tenant is attached: this is
1998 : // primarily for use in the sharding service. For compatibilty, we also return this
1999 : // when called directly on a pageserver, but the payload is always zero or one shards.
2000 0 : let mut response = TenantLocationConfigResponse {
2001 0 : shards: Vec::new(),
2002 0 : stripe_size: None,
2003 0 : };
2004 0 : if attached {
2005 0 : response.shards.push(TenantShardLocation {
2006 0 : shard_id: tenant_shard_id,
2007 0 : node_id: state.conf.id,
2008 0 : });
2009 0 : if tenant_shard_id.shard_count.count() > 1 {
2010 : // Stripe size should be set if we are attached
2011 0 : debug_assert!(stripe_size.is_some());
2012 0 : response.stripe_size = stripe_size;
2013 0 : }
2014 0 : }
2015 :
2016 0 : json_response(StatusCode::OK, response)
2017 0 : }
2018 :
2019 0 : async fn list_location_config_handler(
2020 0 : request: Request<Body>,
2021 0 : _cancel: CancellationToken,
2022 0 : ) -> Result<Response<Body>, ApiError> {
2023 0 : let state = get_state(&request);
2024 0 : let slots = state.tenant_manager.list();
2025 0 : let result = LocationConfigListResponse {
2026 0 : tenant_shards: slots
2027 0 : .into_iter()
2028 0 : .map(|(tenant_shard_id, slot)| {
2029 0 : let v = match slot {
2030 0 : TenantSlot::Attached(t) => Some(t.get_location_conf()),
2031 0 : TenantSlot::Secondary(s) => Some(s.get_location_conf()),
2032 0 : TenantSlot::InProgress(_) => None,
2033 : };
2034 0 : (tenant_shard_id, v)
2035 0 : })
2036 0 : .collect(),
2037 0 : };
2038 0 : json_response(StatusCode::OK, result)
2039 0 : }
2040 :
2041 0 : async fn get_location_config_handler(
2042 0 : request: Request<Body>,
2043 0 : _cancel: CancellationToken,
2044 0 : ) -> Result<Response<Body>, ApiError> {
2045 0 : let state = get_state(&request);
2046 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2047 0 : let slot = state.tenant_manager.get(tenant_shard_id);
2048 :
2049 0 : let Some(slot) = slot else {
2050 0 : return Err(ApiError::NotFound(
2051 0 : anyhow::anyhow!("Tenant shard not found").into(),
2052 0 : ));
2053 : };
2054 :
2055 0 : let result: Option<LocationConfig> = match slot {
2056 0 : TenantSlot::Attached(t) => Some(t.get_location_conf()),
2057 0 : TenantSlot::Secondary(s) => Some(s.get_location_conf()),
2058 0 : TenantSlot::InProgress(_) => None,
2059 : };
2060 :
2061 0 : json_response(StatusCode::OK, result)
2062 0 : }
2063 :
2064 : // Do a time travel recovery on the given tenant/tenant shard. Tenant needs to be detached
2065 : // (from all pageservers) as it invalidates consistency assumptions.
2066 0 : async fn tenant_time_travel_remote_storage_handler(
2067 0 : request: Request<Body>,
2068 0 : cancel: CancellationToken,
2069 0 : ) -> Result<Response<Body>, ApiError> {
2070 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2071 :
2072 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2073 :
2074 0 : let timestamp_raw = must_get_query_param(&request, "travel_to")?;
2075 0 : let timestamp = humantime::parse_rfc3339(×tamp_raw)
2076 0 : .with_context(|| format!("Invalid time for travel_to: {timestamp_raw:?}"))
2077 0 : .map_err(ApiError::BadRequest)?;
2078 :
2079 0 : let done_if_after_raw = must_get_query_param(&request, "done_if_after")?;
2080 0 : let done_if_after = humantime::parse_rfc3339(&done_if_after_raw)
2081 0 : .with_context(|| format!("Invalid time for done_if_after: {done_if_after_raw:?}"))
2082 0 : .map_err(ApiError::BadRequest)?;
2083 :
2084 : // This is just a sanity check to fend off naive wrong usages of the API:
2085 : // the tenant needs to be detached *everywhere*
2086 0 : let state = get_state(&request);
2087 0 : let we_manage_tenant = state.tenant_manager.manages_tenant_shard(tenant_shard_id);
2088 0 : if we_manage_tenant {
2089 0 : return Err(ApiError::BadRequest(anyhow!(
2090 0 : "Tenant {tenant_shard_id} is already attached at this pageserver"
2091 0 : )));
2092 0 : }
2093 0 :
2094 0 : if timestamp > done_if_after {
2095 0 : return Err(ApiError::BadRequest(anyhow!(
2096 0 : "The done_if_after timestamp comes before the timestamp to recover to"
2097 0 : )));
2098 0 : }
2099 0 :
2100 0 : tracing::info!(
2101 0 : "Issuing time travel request internally. timestamp={timestamp_raw}, done_if_after={done_if_after_raw}"
2102 : );
2103 :
2104 0 : remote_timeline_client::upload::time_travel_recover_tenant(
2105 0 : &state.remote_storage,
2106 0 : &tenant_shard_id,
2107 0 : timestamp,
2108 0 : done_if_after,
2109 0 : &cancel,
2110 0 : )
2111 0 : .await
2112 0 : .map_err(|e| match e {
2113 0 : TimeTravelError::BadInput(e) => {
2114 0 : warn!("bad input error: {e}");
2115 0 : ApiError::BadRequest(anyhow!("bad input error"))
2116 : }
2117 : TimeTravelError::Unimplemented => {
2118 0 : ApiError::BadRequest(anyhow!("unimplemented for the configured remote storage"))
2119 : }
2120 0 : TimeTravelError::Cancelled => ApiError::InternalServerError(anyhow!("cancelled")),
2121 : TimeTravelError::TooManyVersions => {
2122 0 : ApiError::InternalServerError(anyhow!("too many versions in remote storage"))
2123 : }
2124 0 : TimeTravelError::Other(e) => {
2125 0 : warn!("internal error: {e}");
2126 0 : ApiError::InternalServerError(anyhow!("internal error"))
2127 : }
2128 0 : })?;
2129 :
2130 0 : json_response(StatusCode::OK, ())
2131 0 : }
2132 :
2133 : /// Testing helper to transition a tenant to [`crate::tenant::TenantState::Broken`].
2134 0 : async fn handle_tenant_break(
2135 0 : r: Request<Body>,
2136 0 : _cancel: CancellationToken,
2137 0 : ) -> Result<Response<Body>, ApiError> {
2138 0 : let tenant_shard_id: TenantShardId = parse_request_param(&r, "tenant_shard_id")?;
2139 :
2140 0 : let state = get_state(&r);
2141 0 : state
2142 0 : .tenant_manager
2143 0 : .get_attached_tenant_shard(tenant_shard_id)?
2144 0 : .set_broken("broken from test".to_owned())
2145 0 : .await;
2146 :
2147 0 : json_response(StatusCode::OK, ())
2148 0 : }
2149 :
2150 : // Obtains an lsn lease on the given timeline.
2151 0 : async fn lsn_lease_handler(
2152 0 : mut request: Request<Body>,
2153 0 : _cancel: CancellationToken,
2154 0 : ) -> Result<Response<Body>, ApiError> {
2155 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2156 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2157 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2158 0 : let lsn = json_request::<LsnLeaseRequest>(&mut request).await?.lsn;
2159 :
2160 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
2161 0 :
2162 0 : let state = get_state(&request);
2163 :
2164 0 : let timeline =
2165 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2166 0 : .await?;
2167 :
2168 0 : let result = async {
2169 0 : timeline
2170 0 : .init_lsn_lease(lsn, timeline.get_lsn_lease_length(), &ctx)
2171 0 : .map_err(|e| {
2172 0 : ApiError::InternalServerError(
2173 0 : e.context(format!("invalid lsn lease request at {lsn}")),
2174 0 : )
2175 0 : })
2176 0 : }
2177 0 : .instrument(info_span!("init_lsn_lease", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2178 0 : .await?;
2179 :
2180 0 : json_response(StatusCode::OK, result)
2181 0 : }
2182 :
2183 : // Run GC immediately on given timeline.
2184 0 : async fn timeline_gc_handler(
2185 0 : mut request: Request<Body>,
2186 0 : cancel: CancellationToken,
2187 0 : ) -> Result<Response<Body>, ApiError> {
2188 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2189 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2190 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2191 :
2192 0 : let gc_req: TimelineGcRequest = json_request(&mut request).await?;
2193 :
2194 0 : let state = get_state(&request);
2195 0 :
2196 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
2197 0 : let gc_result = state
2198 0 : .tenant_manager
2199 0 : .immediate_gc(tenant_shard_id, timeline_id, gc_req, cancel, &ctx)
2200 0 : .await?;
2201 :
2202 0 : json_response(StatusCode::OK, gc_result)
2203 0 : }
2204 :
2205 : // Cancel scheduled compaction tasks
2206 0 : async fn timeline_cancel_compact_handler(
2207 0 : request: Request<Body>,
2208 0 : _cancel: CancellationToken,
2209 0 : ) -> Result<Response<Body>, ApiError> {
2210 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2211 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2212 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2213 0 : let state = get_state(&request);
2214 0 : async {
2215 0 : let tenant = state
2216 0 : .tenant_manager
2217 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2218 0 : tenant.cancel_scheduled_compaction(timeline_id);
2219 0 : json_response(StatusCode::OK, ())
2220 0 : }
2221 0 : .instrument(info_span!("timeline_cancel_compact", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2222 0 : .await
2223 0 : }
2224 :
2225 : // Get compact info of a timeline
2226 0 : async fn timeline_compact_info_handler(
2227 0 : request: Request<Body>,
2228 0 : _cancel: CancellationToken,
2229 0 : ) -> Result<Response<Body>, ApiError> {
2230 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2231 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2232 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2233 0 : let state = get_state(&request);
2234 0 : async {
2235 0 : let tenant = state
2236 0 : .tenant_manager
2237 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2238 0 : let resp = tenant.get_scheduled_compaction_tasks(timeline_id);
2239 0 : json_response(StatusCode::OK, resp)
2240 0 : }
2241 0 : .instrument(info_span!("timeline_compact_info", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2242 0 : .await
2243 0 : }
2244 :
2245 : // Run compaction immediately on given timeline.
2246 0 : async fn timeline_compact_handler(
2247 0 : mut request: Request<Body>,
2248 0 : cancel: CancellationToken,
2249 0 : ) -> Result<Response<Body>, ApiError> {
2250 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2251 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2252 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2253 :
2254 0 : let compact_request = json_request_maybe::<Option<CompactRequest>>(&mut request).await?;
2255 :
2256 0 : let state = get_state(&request);
2257 0 :
2258 0 : let mut flags = EnumSet::empty();
2259 0 : flags |= CompactFlags::NoYield; // run compaction to completion
2260 0 :
2261 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_l0_compaction")? {
2262 0 : flags |= CompactFlags::ForceL0Compaction;
2263 0 : }
2264 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_repartition")? {
2265 0 : flags |= CompactFlags::ForceRepartition;
2266 0 : }
2267 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? {
2268 0 : flags |= CompactFlags::ForceImageLayerCreation;
2269 0 : }
2270 0 : if Some(true) == parse_query_param::<_, bool>(&request, "enhanced_gc_bottom_most_compaction")? {
2271 0 : flags |= CompactFlags::EnhancedGcBottomMostCompaction;
2272 0 : }
2273 0 : if Some(true) == parse_query_param::<_, bool>(&request, "dry_run")? {
2274 0 : flags |= CompactFlags::DryRun;
2275 0 : }
2276 :
2277 0 : let wait_until_uploaded =
2278 0 : parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false);
2279 :
2280 0 : let wait_until_scheduled_compaction_done =
2281 0 : parse_query_param::<_, bool>(&request, "wait_until_scheduled_compaction_done")?
2282 0 : .unwrap_or(false);
2283 0 :
2284 0 : let sub_compaction = compact_request
2285 0 : .as_ref()
2286 0 : .map(|r| r.sub_compaction)
2287 0 : .unwrap_or(false);
2288 0 : let sub_compaction_max_job_size_mb = compact_request
2289 0 : .as_ref()
2290 0 : .and_then(|r| r.sub_compaction_max_job_size_mb);
2291 0 :
2292 0 : let options = CompactOptions {
2293 0 : compact_key_range: compact_request
2294 0 : .as_ref()
2295 0 : .and_then(|r| r.compact_key_range.clone()),
2296 0 : compact_lsn_range: compact_request
2297 0 : .as_ref()
2298 0 : .and_then(|r| r.compact_lsn_range.clone()),
2299 0 : flags,
2300 0 : sub_compaction,
2301 0 : sub_compaction_max_job_size_mb,
2302 0 : };
2303 0 :
2304 0 : let scheduled = compact_request
2305 0 : .as_ref()
2306 0 : .map(|r| r.scheduled)
2307 0 : .unwrap_or(false);
2308 :
2309 0 : async {
2310 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2311 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
2312 0 : if scheduled {
2313 0 : let tenant = state
2314 0 : .tenant_manager
2315 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2316 0 : let rx = tenant.schedule_compaction(timeline_id, options).await.map_err(ApiError::InternalServerError)?;
2317 0 : if wait_until_scheduled_compaction_done {
2318 : // It is possible that this will take a long time, dropping the HTTP request will not cancel the compaction.
2319 0 : rx.await.ok();
2320 0 : }
2321 : } else {
2322 0 : timeline
2323 0 : .compact_with_options(&cancel, options, &ctx)
2324 0 : .await
2325 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
2326 0 : if wait_until_uploaded {
2327 0 : timeline.remote_client.wait_completion().await
2328 : // XXX map to correct ApiError for the cases where it's due to shutdown
2329 0 : .context("wait completion").map_err(ApiError::InternalServerError)?;
2330 0 : }
2331 : }
2332 0 : json_response(StatusCode::OK, ())
2333 0 : }
2334 0 : .instrument(info_span!("manual_compaction", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2335 0 : .await
2336 0 : }
2337 :
2338 0 : async fn timeline_mark_invisible_handler(
2339 0 : request: Request<Body>,
2340 0 : _cancel: CancellationToken,
2341 0 : ) -> Result<Response<Body>, ApiError> {
2342 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2343 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2344 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2345 :
2346 0 : let state = get_state(&request);
2347 :
2348 0 : async {
2349 0 : let tenant = state
2350 0 : .tenant_manager
2351 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2352 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
2353 0 : timeline.remote_client.schedule_index_upload_for_timeline_invisible_state(TimelineVisibilityState::Invisible).map_err(ApiError::InternalServerError)?;
2354 0 : json_response(StatusCode::OK, ())
2355 0 : }
2356 0 : .instrument(info_span!("manual_timeline_mark_invisible", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2357 0 : .await
2358 0 : }
2359 :
2360 : // Run offload immediately on given timeline.
2361 0 : async fn timeline_offload_handler(
2362 0 : request: Request<Body>,
2363 0 : _cancel: CancellationToken,
2364 0 : ) -> Result<Response<Body>, ApiError> {
2365 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2366 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2367 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2368 :
2369 0 : let state = get_state(&request);
2370 :
2371 0 : async {
2372 0 : let tenant = state
2373 0 : .tenant_manager
2374 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2375 :
2376 0 : if tenant.get_offloaded_timeline(timeline_id).is_ok() {
2377 0 : return json_response(StatusCode::OK, ());
2378 0 : }
2379 0 : let timeline =
2380 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2381 0 : .await?;
2382 :
2383 0 : if !tenant.timeline_has_no_attached_children(timeline_id) {
2384 0 : return Err(ApiError::PreconditionFailed(
2385 0 : "timeline has attached children".into(),
2386 0 : ));
2387 0 : }
2388 0 : if let (false, reason) = timeline.can_offload() {
2389 0 : return Err(ApiError::PreconditionFailed(
2390 0 : format!("Timeline::can_offload() check failed: {}", reason) .into(),
2391 0 : ));
2392 0 : }
2393 0 : offload_timeline(&tenant, &timeline)
2394 0 : .await
2395 0 : .map_err(|e| {
2396 0 : match e {
2397 0 : OffloadError::Cancelled => ApiError::ResourceUnavailable("Timeline shutting down".into()),
2398 0 : _ => ApiError::InternalServerError(anyhow!(e))
2399 : }
2400 0 : })?;
2401 :
2402 0 : json_response(StatusCode::OK, ())
2403 0 : }
2404 0 : .instrument(info_span!("manual_timeline_offload", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2405 0 : .await
2406 0 : }
2407 :
2408 : // Run checkpoint immediately on given timeline.
2409 0 : async fn timeline_checkpoint_handler(
2410 0 : request: Request<Body>,
2411 0 : cancel: CancellationToken,
2412 0 : ) -> Result<Response<Body>, ApiError> {
2413 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2414 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2415 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2416 :
2417 0 : let state = get_state(&request);
2418 0 :
2419 0 : let mut flags = EnumSet::empty();
2420 0 : flags |= CompactFlags::NoYield; // run compaction to completion
2421 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_l0_compaction")? {
2422 0 : flags |= CompactFlags::ForceL0Compaction;
2423 0 : }
2424 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_repartition")? {
2425 0 : flags |= CompactFlags::ForceRepartition;
2426 0 : }
2427 0 : if Some(true) == parse_query_param::<_, bool>(&request, "force_image_layer_creation")? {
2428 0 : flags |= CompactFlags::ForceImageLayerCreation;
2429 0 : }
2430 :
2431 : // By default, checkpoints come with a compaction, but this may be optionally disabled by tests that just want to flush + upload.
2432 0 : let compact = parse_query_param::<_, bool>(&request, "compact")?.unwrap_or(true);
2433 :
2434 0 : let wait_until_flushed: bool =
2435 0 : parse_query_param(&request, "wait_until_flushed")?.unwrap_or(true);
2436 :
2437 0 : let wait_until_uploaded =
2438 0 : parse_query_param::<_, bool>(&request, "wait_until_uploaded")?.unwrap_or(false);
2439 :
2440 0 : async {
2441 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2442 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
2443 0 : if wait_until_flushed {
2444 0 : timeline.freeze_and_flush().await
2445 : } else {
2446 0 : timeline.freeze().await.and(Ok(()))
2447 0 : }.map_err(|e| {
2448 0 : match e {
2449 0 : tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown,
2450 0 : other => ApiError::InternalServerError(other.into()),
2451 :
2452 : }
2453 0 : })?;
2454 0 : if compact {
2455 0 : timeline
2456 0 : .compact(&cancel, flags, &ctx)
2457 0 : .await
2458 0 : .map_err(|e|
2459 0 : match e {
2460 0 : CompactionError::ShuttingDown => ApiError::ShuttingDown,
2461 0 : CompactionError::Offload(e) => ApiError::InternalServerError(anyhow::anyhow!(e)),
2462 0 : CompactionError::CollectKeySpaceError(e) => ApiError::InternalServerError(anyhow::anyhow!(e)),
2463 0 : CompactionError::Other(e) => ApiError::InternalServerError(e),
2464 0 : CompactionError::AlreadyRunning(_) => ApiError::InternalServerError(anyhow::anyhow!(e)),
2465 0 : }
2466 0 : )?;
2467 0 : }
2468 :
2469 0 : if wait_until_uploaded {
2470 0 : tracing::info!("Waiting for uploads to complete...");
2471 0 : timeline.remote_client.wait_completion().await
2472 : // XXX map to correct ApiError for the cases where it's due to shutdown
2473 0 : .context("wait completion").map_err(ApiError::InternalServerError)?;
2474 0 : tracing::info!("Uploads completed up to {}", timeline.get_remote_consistent_lsn_projected().unwrap_or(Lsn(0)));
2475 0 : }
2476 :
2477 0 : json_response(StatusCode::OK, ())
2478 0 : }
2479 0 : .instrument(info_span!("manual_checkpoint", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2480 0 : .await
2481 0 : }
2482 :
2483 0 : async fn timeline_download_remote_layers_handler_post(
2484 0 : mut request: Request<Body>,
2485 0 : _cancel: CancellationToken,
2486 0 : ) -> Result<Response<Body>, ApiError> {
2487 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2488 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2489 0 : let body: DownloadRemoteLayersTaskSpawnRequest = json_request(&mut request).await?;
2490 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2491 :
2492 0 : let state = get_state(&request);
2493 :
2494 0 : let timeline =
2495 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2496 0 : .await?;
2497 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download)
2498 0 : .with_scope_timeline(&timeline);
2499 0 : match timeline.spawn_download_all_remote_layers(body, &ctx).await {
2500 0 : Ok(st) => json_response(StatusCode::ACCEPTED, st),
2501 0 : Err(st) => json_response(StatusCode::CONFLICT, st),
2502 : }
2503 0 : }
2504 :
2505 0 : async fn timeline_download_remote_layers_handler_get(
2506 0 : request: Request<Body>,
2507 0 : _cancel: CancellationToken,
2508 0 : ) -> Result<Response<Body>, ApiError> {
2509 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2510 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2511 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2512 0 : let state = get_state(&request);
2513 :
2514 0 : let timeline =
2515 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
2516 0 : .await?;
2517 0 : let info = timeline
2518 0 : .get_download_all_remote_layers_task_info()
2519 0 : .context("task never started since last pageserver process start")
2520 0 : .map_err(|e| ApiError::NotFound(e.into()))?;
2521 0 : json_response(StatusCode::OK, info)
2522 0 : }
2523 :
2524 0 : async fn timeline_detach_ancestor_handler(
2525 0 : request: Request<Body>,
2526 0 : _cancel: CancellationToken,
2527 0 : ) -> Result<Response<Body>, ApiError> {
2528 : use pageserver_api::models::detach_ancestor::AncestorDetached;
2529 :
2530 : use crate::tenant::timeline::detach_ancestor;
2531 :
2532 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2533 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2534 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2535 0 : let behavior: Option<DetachBehavior> = parse_query_param(&request, "detach_behavior")?;
2536 :
2537 0 : let behavior = behavior.unwrap_or_default();
2538 :
2539 0 : let span = tracing::info_span!("detach_ancestor", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id);
2540 :
2541 0 : async move {
2542 0 : let mut options = detach_ancestor::Options::default();
2543 :
2544 0 : let rewrite_concurrency =
2545 0 : parse_query_param::<_, std::num::NonZeroUsize>(&request, "rewrite_concurrency")?;
2546 0 : let copy_concurrency =
2547 0 : parse_query_param::<_, std::num::NonZeroUsize>(&request, "copy_concurrency")?;
2548 :
2549 0 : [
2550 0 : (&mut options.rewrite_concurrency, rewrite_concurrency),
2551 0 : (&mut options.copy_concurrency, copy_concurrency),
2552 0 : ]
2553 0 : .into_iter()
2554 0 : .filter_map(|(target, val)| val.map(|val| (target, val)))
2555 0 : .for_each(|(target, val)| *target = val);
2556 0 :
2557 0 : let state = get_state(&request);
2558 :
2559 0 : let tenant = state
2560 0 : .tenant_manager
2561 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2562 :
2563 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
2564 :
2565 0 : let ctx = RequestContext::new(TaskKind::DetachAncestor, DownloadBehavior::Download);
2566 0 : let ctx = &ctx;
2567 :
2568 : // Flush the upload queues of all timelines before detaching ancestor. We do the same thing again
2569 : // during shutdown. This early upload ensures the pageserver does not need to upload too many
2570 : // things and creates downtime during timeline reloads.
2571 0 : for timeline in tenant.list_timelines() {
2572 0 : timeline
2573 0 : .remote_client
2574 0 : .wait_completion()
2575 0 : .await
2576 0 : .map_err(|e| {
2577 0 : ApiError::PreconditionFailed(format!("cannot drain upload queue: {e}").into())
2578 0 : })?;
2579 : }
2580 :
2581 0 : tracing::info!("all timeline upload queues are drained");
2582 :
2583 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
2584 0 : let ctx = &ctx.with_scope_timeline(&timeline);
2585 :
2586 0 : let progress = timeline
2587 0 : .prepare_to_detach_from_ancestor(&tenant, options, behavior, ctx)
2588 0 : .await?;
2589 :
2590 : // uncomment to allow early as possible Tenant::drop
2591 : // drop(tenant);
2592 :
2593 0 : let resp = match progress {
2594 0 : detach_ancestor::Progress::Prepared(attempt, prepared) => {
2595 : // it would be great to tag the guard on to the tenant activation future
2596 0 : let reparented_timelines = state
2597 0 : .tenant_manager
2598 0 : .complete_detaching_timeline_ancestor(
2599 0 : tenant_shard_id,
2600 0 : timeline_id,
2601 0 : prepared,
2602 0 : behavior,
2603 0 : attempt,
2604 0 : ctx,
2605 0 : )
2606 0 : .await?;
2607 :
2608 0 : AncestorDetached {
2609 0 : reparented_timelines,
2610 0 : }
2611 : }
2612 0 : detach_ancestor::Progress::Done(resp) => resp,
2613 : };
2614 :
2615 0 : json_response(StatusCode::OK, resp)
2616 0 : }
2617 0 : .instrument(span)
2618 0 : .await
2619 0 : }
2620 :
2621 0 : async fn deletion_queue_flush(
2622 0 : r: Request<Body>,
2623 0 : cancel: CancellationToken,
2624 0 : ) -> Result<Response<Body>, ApiError> {
2625 0 : let state = get_state(&r);
2626 :
2627 0 : let execute = parse_query_param(&r, "execute")?.unwrap_or(false);
2628 0 :
2629 0 : let flush = async {
2630 0 : if execute {
2631 0 : state.deletion_queue_client.flush_execute().await
2632 : } else {
2633 0 : state.deletion_queue_client.flush().await
2634 : }
2635 0 : }
2636 : // DeletionQueueError's only case is shutting down.
2637 0 : .map_err(|_| ApiError::ShuttingDown);
2638 0 :
2639 0 : tokio::select! {
2640 0 : res = flush => {
2641 0 : res.map(|()| json_response(StatusCode::OK, ()))?
2642 : }
2643 0 : _ = cancel.cancelled() => {
2644 0 : Err(ApiError::ShuttingDown)
2645 : }
2646 : }
2647 0 : }
2648 :
2649 0 : async fn getpage_at_lsn_handler(
2650 0 : request: Request<Body>,
2651 0 : cancel: CancellationToken,
2652 0 : ) -> Result<Response<Body>, ApiError> {
2653 0 : getpage_at_lsn_handler_inner(false, request, cancel).await
2654 0 : }
2655 :
2656 0 : async fn touchpage_at_lsn_handler(
2657 0 : request: Request<Body>,
2658 0 : cancel: CancellationToken,
2659 0 : ) -> Result<Response<Body>, ApiError> {
2660 0 : getpage_at_lsn_handler_inner(true, request, cancel).await
2661 0 : }
2662 :
2663 : /// Try if `GetPage@Lsn` is successful, useful for manual debugging.
2664 0 : async fn getpage_at_lsn_handler_inner(
2665 0 : touch: bool,
2666 0 : request: Request<Body>,
2667 0 : _cancel: CancellationToken,
2668 0 : ) -> Result<Response<Body>, ApiError> {
2669 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2670 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2671 : // Require pageserver admin permission for this API instead of only tenant-level token.
2672 0 : check_permission(&request, None)?;
2673 0 : let state = get_state(&request);
2674 :
2675 : struct Key(pageserver_api::key::Key);
2676 :
2677 : impl std::str::FromStr for Key {
2678 : type Err = anyhow::Error;
2679 :
2680 0 : fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
2681 0 : pageserver_api::key::Key::from_hex(s).map(Key)
2682 0 : }
2683 : }
2684 :
2685 0 : let key: Key = parse_query_param(&request, "key")?
2686 0 : .ok_or_else(|| ApiError::BadRequest(anyhow!("missing 'key' query parameter")))?;
2687 0 : let lsn: Option<Lsn> = parse_query_param(&request, "lsn")?;
2688 :
2689 0 : async {
2690 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
2691 : // Enable read path debugging
2692 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2693 0 : let ctx = RequestContextBuilder::extend(&ctx).read_path_debug(true)
2694 0 : .scope(context::Scope::new_timeline(&timeline)).build();
2695 0 :
2696 0 : // Use last_record_lsn if no lsn is provided
2697 0 : let lsn = lsn.unwrap_or_else(|| timeline.get_last_record_lsn());
2698 0 : let page = timeline.get(key.0, lsn, &ctx).await?;
2699 :
2700 0 : if touch {
2701 0 : json_response(StatusCode::OK, ())
2702 : } else {
2703 0 : Result::<_, ApiError>::Ok(
2704 0 : Response::builder()
2705 0 : .status(StatusCode::OK)
2706 0 : .header(header::CONTENT_TYPE, "application/octet-stream")
2707 0 : .body(hyper::Body::from(page))
2708 0 : .unwrap(),
2709 0 : )
2710 : }
2711 0 : }
2712 0 : .instrument(info_span!("timeline_get", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2713 0 : .await
2714 0 : }
2715 :
2716 0 : async fn timeline_collect_keyspace(
2717 0 : request: Request<Body>,
2718 0 : _cancel: CancellationToken,
2719 0 : ) -> Result<Response<Body>, ApiError> {
2720 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2721 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
2722 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
2723 0 : let state = get_state(&request);
2724 :
2725 0 : let at_lsn: Option<Lsn> = parse_query_param(&request, "at_lsn")?;
2726 :
2727 0 : async {
2728 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id).await?;
2729 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download).with_scope_timeline(&timeline);
2730 0 : let at_lsn = at_lsn.unwrap_or_else(|| timeline.get_last_record_lsn());
2731 0 : let (dense_ks, sparse_ks) = timeline
2732 0 : .collect_keyspace(at_lsn, &ctx)
2733 0 : .await
2734 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?;
2735 :
2736 : // This API is currently used by pagebench. Pagebench will iterate all keys within the keyspace.
2737 : // Therefore, we split dense/sparse keys in this API.
2738 0 : let res = pageserver_api::models::partitioning::Partitioning { keys: dense_ks, sparse_keys: sparse_ks, at_lsn };
2739 0 :
2740 0 : json_response(StatusCode::OK, res)
2741 0 : }
2742 0 : .instrument(info_span!("timeline_collect_keyspace", tenant_id = %tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug(), %timeline_id))
2743 0 : .await
2744 0 : }
2745 :
2746 0 : async fn active_timeline_of_active_tenant(
2747 0 : tenant_manager: &TenantManager,
2748 0 : tenant_shard_id: TenantShardId,
2749 0 : timeline_id: TimelineId,
2750 0 : ) -> Result<Arc<Timeline>, ApiError> {
2751 0 : let tenant = tenant_manager.get_attached_tenant_shard(tenant_shard_id)?;
2752 :
2753 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
2754 :
2755 0 : Ok(tenant.get_timeline(timeline_id, true)?)
2756 0 : }
2757 :
2758 0 : async fn always_panic_handler(
2759 0 : req: Request<Body>,
2760 0 : _cancel: CancellationToken,
2761 0 : ) -> Result<Response<Body>, ApiError> {
2762 0 : // Deliberately cause a panic to exercise the panic hook registered via std::panic::set_hook().
2763 0 : // For pageserver, the relevant panic hook is `tracing_panic_hook` , and the `sentry` crate's wrapper around it.
2764 0 : // Use catch_unwind to ensure that tokio nor hyper are distracted by our panic.
2765 0 : let query = req.uri().query();
2766 0 : let _ = std::panic::catch_unwind(|| {
2767 0 : panic!("unconditional panic for testing panic hook integration; request query: {query:?}")
2768 0 : });
2769 0 : json_response(StatusCode::NO_CONTENT, ())
2770 0 : }
2771 :
2772 0 : async fn disk_usage_eviction_run(
2773 0 : mut r: Request<Body>,
2774 0 : cancel: CancellationToken,
2775 0 : ) -> Result<Response<Body>, ApiError> {
2776 0 : check_permission(&r, None)?;
2777 :
2778 0 : #[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)]
2779 : struct Config {
2780 : /// How many bytes to evict before reporting that pressure is relieved.
2781 : evict_bytes: u64,
2782 :
2783 : #[serde(default)]
2784 : eviction_order: pageserver_api::config::EvictionOrder,
2785 : }
2786 :
2787 : #[derive(Debug, Clone, Copy, serde::Serialize)]
2788 : struct Usage {
2789 : // remains unchanged after instantiation of the struct
2790 : evict_bytes: u64,
2791 : // updated by `add_available_bytes`
2792 : freed_bytes: u64,
2793 : }
2794 :
2795 : impl crate::disk_usage_eviction_task::Usage for Usage {
2796 0 : fn has_pressure(&self) -> bool {
2797 0 : self.evict_bytes > self.freed_bytes
2798 0 : }
2799 :
2800 0 : fn add_available_bytes(&mut self, bytes: u64) {
2801 0 : self.freed_bytes += bytes;
2802 0 : }
2803 : }
2804 :
2805 0 : let config = json_request::<Config>(&mut r).await?;
2806 :
2807 0 : let usage = Usage {
2808 0 : evict_bytes: config.evict_bytes,
2809 0 : freed_bytes: 0,
2810 0 : };
2811 0 :
2812 0 : let state = get_state(&r);
2813 0 : let eviction_state = state.disk_usage_eviction_state.clone();
2814 :
2815 0 : let res = crate::disk_usage_eviction_task::disk_usage_eviction_task_iteration_impl(
2816 0 : &eviction_state,
2817 0 : &state.remote_storage,
2818 0 : usage,
2819 0 : &state.tenant_manager,
2820 0 : config.eviction_order.into(),
2821 0 : &cancel,
2822 0 : )
2823 0 : .await;
2824 :
2825 0 : info!(?res, "disk_usage_eviction_task_iteration_impl finished");
2826 :
2827 0 : let res = res.map_err(ApiError::InternalServerError)?;
2828 :
2829 0 : json_response(StatusCode::OK, res)
2830 0 : }
2831 :
2832 0 : async fn secondary_upload_handler(
2833 0 : request: Request<Body>,
2834 0 : _cancel: CancellationToken,
2835 0 : ) -> Result<Response<Body>, ApiError> {
2836 0 : let state = get_state(&request);
2837 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2838 0 : state
2839 0 : .secondary_controller
2840 0 : .upload_tenant(tenant_shard_id)
2841 0 : .await?;
2842 :
2843 0 : json_response(StatusCode::OK, ())
2844 0 : }
2845 :
2846 0 : async fn tenant_scan_remote_handler(
2847 0 : request: Request<Body>,
2848 0 : cancel: CancellationToken,
2849 0 : ) -> Result<Response<Body>, ApiError> {
2850 0 : let state = get_state(&request);
2851 0 : let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
2852 :
2853 0 : let mut response = TenantScanRemoteStorageResponse::default();
2854 :
2855 0 : let (shards, _other_keys) =
2856 0 : list_remote_tenant_shards(&state.remote_storage, tenant_id, cancel.clone())
2857 0 : .await
2858 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
2859 :
2860 0 : for tenant_shard_id in shards {
2861 0 : let (timeline_ids, _other_keys) =
2862 0 : list_remote_timelines(&state.remote_storage, tenant_shard_id, cancel.clone())
2863 0 : .await
2864 0 : .map_err(|e| ApiError::InternalServerError(anyhow::anyhow!(e)))?;
2865 :
2866 0 : let mut generation = Generation::none();
2867 0 : for timeline_id in timeline_ids {
2868 0 : match download_index_part(
2869 0 : &state.remote_storage,
2870 0 : &tenant_shard_id,
2871 0 : &timeline_id,
2872 0 : Generation::MAX,
2873 0 : &cancel,
2874 0 : )
2875 0 : .instrument(info_span!("download_index_part",
2876 : tenant_id=%tenant_shard_id.tenant_id,
2877 0 : shard_id=%tenant_shard_id.shard_slug(),
2878 : %timeline_id))
2879 0 : .await
2880 : {
2881 0 : Ok((index_part, index_generation, _index_mtime)) => {
2882 0 : tracing::info!(
2883 0 : "Found timeline {tenant_shard_id}/{timeline_id} metadata (gen {index_generation:?}, {} layers, {} consistent LSN)",
2884 0 : index_part.layer_metadata.len(),
2885 0 : index_part.metadata.disk_consistent_lsn()
2886 : );
2887 0 : generation = std::cmp::max(generation, index_generation);
2888 : }
2889 : Err(DownloadError::NotFound) => {
2890 : // This is normal for tenants that were created with multiple shards: they have an unsharded path
2891 : // containing the timeline's initdb tarball but no index. Otherwise it is a bit strange.
2892 0 : tracing::info!(
2893 0 : "Timeline path {tenant_shard_id}/{timeline_id} exists in remote storage but has no index, skipping"
2894 : );
2895 0 : continue;
2896 : }
2897 0 : Err(e) => {
2898 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
2899 : }
2900 : };
2901 : }
2902 :
2903 0 : response.shards.push(TenantScanRemoteStorageShard {
2904 0 : tenant_shard_id,
2905 0 : generation: generation.into(),
2906 0 : });
2907 : }
2908 :
2909 0 : if response.shards.is_empty() {
2910 0 : return Err(ApiError::NotFound(
2911 0 : anyhow::anyhow!("No shards found for tenant ID {tenant_id}").into(),
2912 0 : ));
2913 0 : }
2914 0 :
2915 0 : json_response(StatusCode::OK, response)
2916 0 : }
2917 :
2918 0 : async fn secondary_download_handler(
2919 0 : request: Request<Body>,
2920 0 : _cancel: CancellationToken,
2921 0 : ) -> Result<Response<Body>, ApiError> {
2922 0 : let state = get_state(&request);
2923 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2924 0 : let wait = parse_query_param(&request, "wait_ms")?.map(Duration::from_millis);
2925 :
2926 : // We don't need this to issue the download request, but:
2927 : // - it enables us to cleanly return 404 if we get a request for an absent shard
2928 : // - we will use this to provide status feedback in the response
2929 0 : let Some(secondary_tenant) = state
2930 0 : .tenant_manager
2931 0 : .get_secondary_tenant_shard(tenant_shard_id)
2932 : else {
2933 0 : return Err(ApiError::NotFound(
2934 0 : anyhow::anyhow!("Shard {} not found", tenant_shard_id).into(),
2935 0 : ));
2936 : };
2937 :
2938 0 : let timeout = wait.unwrap_or(Duration::MAX);
2939 :
2940 0 : let result = tokio::time::timeout(
2941 0 : timeout,
2942 0 : state.secondary_controller.download_tenant(tenant_shard_id),
2943 0 : )
2944 0 : .await;
2945 :
2946 0 : let progress = secondary_tenant.progress.lock().unwrap().clone();
2947 :
2948 0 : let status = match result {
2949 : Ok(Ok(())) => {
2950 0 : if progress.layers_downloaded >= progress.layers_total {
2951 : // Download job ran to completion
2952 0 : StatusCode::OK
2953 : } else {
2954 : // Download dropped out without errors because it ran out of time budget
2955 0 : StatusCode::ACCEPTED
2956 : }
2957 : }
2958 : // Edge case: downloads aren't usually fallible: things like a missing heatmap are considered
2959 : // okay. We could get an error here in the unlikely edge case that the tenant
2960 : // was detached between our check above and executing the download job.
2961 0 : Ok(Err(e)) => return Err(e.into()),
2962 : // A timeout is not an error: we have started the download, we're just not done
2963 : // yet. The caller will get a response body indicating status.
2964 0 : Err(_) => StatusCode::ACCEPTED,
2965 : };
2966 :
2967 0 : json_response(status, progress)
2968 0 : }
2969 :
2970 0 : async fn wait_lsn_handler(
2971 0 : mut request: Request<Body>,
2972 0 : cancel: CancellationToken,
2973 0 : ) -> Result<Response<Body>, ApiError> {
2974 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
2975 0 : let wait_lsn_request: TenantWaitLsnRequest = json_request(&mut request).await?;
2976 :
2977 0 : let state = get_state(&request);
2978 0 : let tenant = state
2979 0 : .tenant_manager
2980 0 : .get_attached_tenant_shard(tenant_shard_id)?;
2981 :
2982 0 : let mut wait_futures = Vec::default();
2983 0 : for timeline in tenant.list_timelines() {
2984 0 : let Some(lsn) = wait_lsn_request.timelines.get(&timeline.timeline_id) else {
2985 0 : continue;
2986 : };
2987 :
2988 0 : let fut = {
2989 0 : let timeline = timeline.clone();
2990 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Error);
2991 0 : async move {
2992 0 : timeline
2993 0 : .wait_lsn(
2994 0 : *lsn,
2995 0 : WaitLsnWaiter::HttpEndpoint,
2996 0 : WaitLsnTimeout::Custom(wait_lsn_request.timeout),
2997 0 : &ctx,
2998 0 : )
2999 0 : .await
3000 0 : }
3001 0 : };
3002 0 : wait_futures.push(fut);
3003 0 : }
3004 :
3005 0 : if wait_futures.is_empty() {
3006 0 : return json_response(StatusCode::NOT_FOUND, ());
3007 0 : }
3008 :
3009 0 : let all_done = tokio::select! {
3010 0 : results = join_all(wait_futures) => {
3011 0 : results.iter().all(|res| res.is_ok())
3012 : },
3013 0 : _ = cancel.cancelled() => {
3014 0 : return Err(ApiError::Cancelled);
3015 : }
3016 : };
3017 :
3018 0 : let status = if all_done {
3019 0 : StatusCode::OK
3020 : } else {
3021 0 : StatusCode::ACCEPTED
3022 : };
3023 :
3024 0 : json_response(status, ())
3025 0 : }
3026 :
3027 0 : async fn secondary_status_handler(
3028 0 : request: Request<Body>,
3029 0 : _cancel: CancellationToken,
3030 0 : ) -> Result<Response<Body>, ApiError> {
3031 0 : let state = get_state(&request);
3032 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3033 :
3034 0 : let Some(secondary_tenant) = state
3035 0 : .tenant_manager
3036 0 : .get_secondary_tenant_shard(tenant_shard_id)
3037 : else {
3038 0 : return Err(ApiError::NotFound(
3039 0 : anyhow::anyhow!("Shard {} not found", tenant_shard_id).into(),
3040 0 : ));
3041 : };
3042 :
3043 0 : let progress = secondary_tenant.progress.lock().unwrap().clone();
3044 0 :
3045 0 : json_response(StatusCode::OK, progress)
3046 0 : }
3047 :
3048 0 : async fn handler_404(_: Request<Body>) -> Result<Response<Body>, ApiError> {
3049 0 : json_response(
3050 0 : StatusCode::NOT_FOUND,
3051 0 : HttpErrorBody::from_msg("page not found".to_owned()),
3052 0 : )
3053 0 : }
3054 :
3055 0 : async fn post_tracing_event_handler(
3056 0 : mut r: Request<Body>,
3057 0 : _cancel: CancellationToken,
3058 0 : ) -> Result<Response<Body>, ApiError> {
3059 0 : #[derive(Debug, serde::Deserialize)]
3060 : #[serde(rename_all = "lowercase")]
3061 : enum Level {
3062 : Error,
3063 : Warn,
3064 : Info,
3065 : Debug,
3066 : Trace,
3067 : }
3068 0 : #[derive(Debug, serde::Deserialize)]
3069 : struct Request {
3070 : level: Level,
3071 : message: String,
3072 : }
3073 0 : let body: Request = json_request(&mut r)
3074 0 : .await
3075 0 : .map_err(|_| ApiError::BadRequest(anyhow::anyhow!("invalid JSON body")))?;
3076 :
3077 0 : match body.level {
3078 0 : Level::Error => tracing::error!(?body.message),
3079 0 : Level::Warn => tracing::warn!(?body.message),
3080 0 : Level::Info => tracing::info!(?body.message),
3081 0 : Level::Debug => tracing::debug!(?body.message),
3082 0 : Level::Trace => tracing::trace!(?body.message),
3083 : }
3084 :
3085 0 : json_response(StatusCode::OK, ())
3086 0 : }
3087 :
3088 0 : async fn put_io_engine_handler(
3089 0 : mut r: Request<Body>,
3090 0 : _cancel: CancellationToken,
3091 0 : ) -> Result<Response<Body>, ApiError> {
3092 0 : check_permission(&r, None)?;
3093 0 : let kind: crate::virtual_file::IoEngineKind = json_request(&mut r).await?;
3094 0 : crate::virtual_file::io_engine::set(kind);
3095 0 : json_response(StatusCode::OK, ())
3096 0 : }
3097 :
3098 0 : async fn put_io_mode_handler(
3099 0 : mut r: Request<Body>,
3100 0 : _cancel: CancellationToken,
3101 0 : ) -> Result<Response<Body>, ApiError> {
3102 0 : check_permission(&r, None)?;
3103 0 : let mode: IoMode = json_request(&mut r).await?;
3104 0 : crate::virtual_file::set_io_mode(mode);
3105 0 : json_response(StatusCode::OK, ())
3106 0 : }
3107 :
3108 : /// Polled by control plane.
3109 : ///
3110 : /// See [`crate::utilization`].
3111 0 : async fn get_utilization(
3112 0 : r: Request<Body>,
3113 0 : _cancel: CancellationToken,
3114 0 : ) -> Result<Response<Body>, ApiError> {
3115 0 : fail::fail_point!("get-utilization-http-handler", |_| {
3116 0 : Err(ApiError::ResourceUnavailable("failpoint".into()))
3117 0 : });
3118 :
3119 : // this probably could be completely public, but lets make that change later.
3120 0 : check_permission(&r, None)?;
3121 :
3122 0 : let state = get_state(&r);
3123 0 : let mut g = state.latest_utilization.lock().await;
3124 :
3125 0 : let regenerate_every = Duration::from_secs(1);
3126 0 : let still_valid = g
3127 0 : .as_ref()
3128 0 : .is_some_and(|(captured_at, _)| captured_at.elapsed() < regenerate_every);
3129 0 :
3130 0 : // avoid needless statvfs calls even though those should be non-blocking fast.
3131 0 : // regenerate at most 1Hz to allow polling at any rate.
3132 0 : if !still_valid {
3133 0 : let path = state.conf.tenants_path();
3134 0 : let doc =
3135 0 : crate::utilization::regenerate(state.conf, path.as_std_path(), &state.tenant_manager)
3136 0 : .map_err(ApiError::InternalServerError)?;
3137 :
3138 0 : let mut buf = Vec::new();
3139 0 : serde_json::to_writer(&mut buf, &doc)
3140 0 : .context("serialize")
3141 0 : .map_err(ApiError::InternalServerError)?;
3142 :
3143 0 : let body = bytes::Bytes::from(buf);
3144 0 :
3145 0 : *g = Some((std::time::Instant::now(), body));
3146 0 : }
3147 :
3148 : // hyper 0.14 doesn't yet have Response::clone so this is a bit of extra legwork
3149 0 : let cached = g.as_ref().expect("just set").1.clone();
3150 0 :
3151 0 : Response::builder()
3152 0 : .header(hyper::http::header::CONTENT_TYPE, "application/json")
3153 0 : // thought of using http date header, but that is second precision which does not give any
3154 0 : // debugging aid
3155 0 : .status(StatusCode::OK)
3156 0 : .body(hyper::Body::from(cached))
3157 0 : .context("build response")
3158 0 : .map_err(ApiError::InternalServerError)
3159 0 : }
3160 :
3161 0 : async fn list_aux_files(
3162 0 : mut request: Request<Body>,
3163 0 : _cancel: CancellationToken,
3164 0 : ) -> Result<Response<Body>, ApiError> {
3165 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3166 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3167 0 : let body: ListAuxFilesRequest = json_request(&mut request).await?;
3168 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3169 :
3170 0 : let state = get_state(&request);
3171 :
3172 0 : let timeline =
3173 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
3174 0 : .await?;
3175 :
3176 0 : let io_concurrency = IoConcurrency::spawn_from_conf(
3177 0 : state.conf,
3178 0 : timeline.gate.enter().map_err(|_| ApiError::Cancelled)?,
3179 : );
3180 :
3181 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
3182 0 : let files = timeline
3183 0 : .list_aux_files(body.lsn, &ctx, io_concurrency)
3184 0 : .await?;
3185 0 : json_response(StatusCode::OK, files)
3186 0 : }
3187 :
3188 0 : async fn perf_info(
3189 0 : request: Request<Body>,
3190 0 : _cancel: CancellationToken,
3191 0 : ) -> Result<Response<Body>, ApiError> {
3192 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3193 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3194 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3195 :
3196 0 : let state = get_state(&request);
3197 :
3198 0 : let timeline =
3199 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
3200 0 : .await?;
3201 :
3202 0 : let result = timeline.perf_info().await;
3203 :
3204 0 : json_response(StatusCode::OK, result)
3205 0 : }
3206 :
3207 0 : async fn ingest_aux_files(
3208 0 : mut request: Request<Body>,
3209 0 : _cancel: CancellationToken,
3210 0 : ) -> Result<Response<Body>, ApiError> {
3211 0 : let tenant_shard_id: TenantShardId = parse_request_param(&request, "tenant_shard_id")?;
3212 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3213 0 : let body: IngestAuxFilesRequest = json_request(&mut request).await?;
3214 0 : check_permission(&request, Some(tenant_shard_id.tenant_id))?;
3215 :
3216 0 : let state = get_state(&request);
3217 :
3218 0 : let timeline =
3219 0 : active_timeline_of_active_tenant(&state.tenant_manager, tenant_shard_id, timeline_id)
3220 0 : .await?;
3221 :
3222 0 : let mut modification = timeline.begin_modification(
3223 0 : Lsn(timeline.get_last_record_lsn().0 + 8), /* advance LSN by 8 */
3224 0 : );
3225 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Download);
3226 0 : for (fname, content) in body.aux_files {
3227 0 : modification
3228 0 : .put_file(&fname, content.as_bytes(), &ctx)
3229 0 : .await
3230 0 : .map_err(ApiError::InternalServerError)?;
3231 : }
3232 0 : modification
3233 0 : .commit(&ctx)
3234 0 : .await
3235 0 : .map_err(ApiError::InternalServerError)?;
3236 :
3237 0 : json_response(StatusCode::OK, ())
3238 0 : }
3239 :
3240 : /// Report on the largest tenants on this pageserver, for the storage controller to identify
3241 : /// candidates for splitting
3242 0 : async fn post_top_tenants(
3243 0 : mut r: Request<Body>,
3244 0 : _cancel: CancellationToken,
3245 0 : ) -> Result<Response<Body>, ApiError> {
3246 0 : check_permission(&r, None)?;
3247 0 : let request: TopTenantShardsRequest = json_request(&mut r).await?;
3248 0 : let state = get_state(&r);
3249 :
3250 0 : fn get_size_metric(sizes: &TopTenantShardItem, order_by: &TenantSorting) -> u64 {
3251 0 : match order_by {
3252 0 : TenantSorting::ResidentSize => sizes.resident_size,
3253 0 : TenantSorting::MaxLogicalSize => sizes.max_logical_size,
3254 0 : TenantSorting::MaxLogicalSizePerShard => sizes.max_logical_size_per_shard,
3255 : }
3256 0 : }
3257 :
3258 : #[derive(Eq, PartialEq)]
3259 : struct HeapItem {
3260 : metric: u64,
3261 : sizes: TopTenantShardItem,
3262 : }
3263 :
3264 : impl PartialOrd for HeapItem {
3265 0 : fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
3266 0 : Some(self.cmp(other))
3267 0 : }
3268 : }
3269 :
3270 : /// Heap items have reverse ordering on their metric: this enables using BinaryHeap, which
3271 : /// supports popping the greatest item but not the smallest.
3272 : impl Ord for HeapItem {
3273 0 : fn cmp(&self, other: &Self) -> std::cmp::Ordering {
3274 0 : Reverse(self.metric).cmp(&Reverse(other.metric))
3275 0 : }
3276 : }
3277 :
3278 0 : let mut top_n: BinaryHeap<HeapItem> = BinaryHeap::with_capacity(request.limit);
3279 :
3280 : // FIXME: this is a lot of clones to take this tenant list
3281 0 : for (tenant_shard_id, tenant_slot) in state.tenant_manager.list() {
3282 0 : if let Some(shards_lt) = request.where_shards_lt {
3283 : // Ignore tenants which already have >= this many shards
3284 0 : if tenant_shard_id.shard_count >= shards_lt {
3285 0 : continue;
3286 0 : }
3287 0 : }
3288 :
3289 0 : let sizes = match tenant_slot {
3290 0 : TenantSlot::Attached(tenant) => tenant.get_sizes(),
3291 : TenantSlot::Secondary(_) | TenantSlot::InProgress(_) => {
3292 0 : continue;
3293 : }
3294 : };
3295 0 : let metric = get_size_metric(&sizes, &request.order_by);
3296 :
3297 0 : if let Some(gt) = request.where_gt {
3298 : // Ignore tenants whose metric is <= the lower size threshold, to do less sorting work
3299 0 : if metric <= gt {
3300 0 : continue;
3301 0 : }
3302 0 : };
3303 :
3304 0 : match top_n.peek() {
3305 0 : None => {
3306 0 : // Top N list is empty: candidate becomes first member
3307 0 : top_n.push(HeapItem { metric, sizes });
3308 0 : }
3309 0 : Some(i) if i.metric > metric && top_n.len() < request.limit => {
3310 0 : // Lowest item in list is greater than our candidate, but we aren't at limit yet: push to end
3311 0 : top_n.push(HeapItem { metric, sizes });
3312 0 : }
3313 0 : Some(i) if i.metric > metric => {
3314 0 : // List is at limit and lowest value is greater than our candidate, drop it.
3315 0 : }
3316 0 : Some(_) => top_n.push(HeapItem { metric, sizes }),
3317 : }
3318 :
3319 0 : while top_n.len() > request.limit {
3320 0 : top_n.pop();
3321 0 : }
3322 : }
3323 :
3324 0 : json_response(
3325 0 : StatusCode::OK,
3326 0 : TopTenantShardsResponse {
3327 0 : shards: top_n.into_iter().map(|i| i.sizes).collect(),
3328 0 : },
3329 0 : )
3330 0 : }
3331 :
3332 0 : async fn put_tenant_timeline_import_basebackup(
3333 0 : request: Request<Body>,
3334 0 : _cancel: CancellationToken,
3335 0 : ) -> Result<Response<Body>, ApiError> {
3336 0 : let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
3337 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3338 0 : let base_lsn: Lsn = must_parse_query_param(&request, "base_lsn")?;
3339 0 : let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
3340 0 : let pg_version: u32 = must_parse_query_param(&request, "pg_version")?;
3341 :
3342 0 : check_permission(&request, Some(tenant_id))?;
3343 :
3344 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
3345 0 :
3346 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
3347 :
3348 0 : let span = info_span!("import_basebackup",
3349 0 : tenant_id=%tenant_id, timeline_id=%timeline_id, shard_id=%tenant_shard_id.shard_slug(),
3350 : base_lsn=%base_lsn, end_lsn=%end_lsn, pg_version=%pg_version);
3351 0 : async move {
3352 0 : let state = get_state(&request);
3353 0 : let tenant = state
3354 0 : .tenant_manager
3355 0 : .get_attached_tenant_shard(tenant_shard_id)?;
3356 :
3357 0 : let broker_client = state.broker_client.clone();
3358 0 :
3359 0 : let mut body = StreamReader::new(request.into_body().map(|res| {
3360 0 : res.map_err(|error| {
3361 0 : std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!(error))
3362 0 : })
3363 0 : }));
3364 0 :
3365 0 : tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
3366 :
3367 0 : let (timeline, timeline_ctx) = tenant
3368 0 : .create_empty_timeline(timeline_id, base_lsn, pg_version, &ctx)
3369 0 : .map_err(ApiError::InternalServerError)
3370 0 : .await?;
3371 :
3372 : // TODO mark timeline as not ready until it reaches end_lsn.
3373 : // We might have some wal to import as well, and we should prevent compute
3374 : // from connecting before that and writing conflicting wal.
3375 : //
3376 : // This is not relevant for pageserver->pageserver migrations, since there's
3377 : // no wal to import. But should be fixed if we want to import from postgres.
3378 :
3379 : // TODO leave clean state on error. For now you can use detach to clean
3380 : // up broken state from a failed import.
3381 :
3382 : // Import basebackup provided via CopyData
3383 0 : info!("importing basebackup");
3384 :
3385 0 : timeline
3386 0 : .import_basebackup_from_tar(
3387 0 : tenant.clone(),
3388 0 : &mut body,
3389 0 : base_lsn,
3390 0 : broker_client,
3391 0 : &timeline_ctx,
3392 0 : )
3393 0 : .await
3394 0 : .map_err(ApiError::InternalServerError)?;
3395 :
3396 : // Read the end of the tar archive.
3397 0 : read_tar_eof(body)
3398 0 : .await
3399 0 : .map_err(ApiError::InternalServerError)?;
3400 :
3401 : // TODO check checksum
3402 : // Meanwhile you can verify client-side by taking fullbackup
3403 : // and checking that it matches in size with what was imported.
3404 : // It wouldn't work if base came from vanilla postgres though,
3405 : // since we discard some log files.
3406 :
3407 0 : info!("done");
3408 0 : json_response(StatusCode::OK, ())
3409 0 : }
3410 0 : .instrument(span)
3411 0 : .await
3412 0 : }
3413 :
3414 0 : async fn put_tenant_timeline_import_wal(
3415 0 : request: Request<Body>,
3416 0 : _cancel: CancellationToken,
3417 0 : ) -> Result<Response<Body>, ApiError> {
3418 0 : let tenant_id: TenantId = parse_request_param(&request, "tenant_id")?;
3419 0 : let timeline_id: TimelineId = parse_request_param(&request, "timeline_id")?;
3420 0 : let start_lsn: Lsn = must_parse_query_param(&request, "start_lsn")?;
3421 0 : let end_lsn: Lsn = must_parse_query_param(&request, "end_lsn")?;
3422 :
3423 0 : check_permission(&request, Some(tenant_id))?;
3424 :
3425 0 : let ctx = RequestContext::new(TaskKind::MgmtRequest, DownloadBehavior::Warn);
3426 :
3427 0 : let span = info_span!("import_wal", tenant_id=%tenant_id, timeline_id=%timeline_id, start_lsn=%start_lsn, end_lsn=%end_lsn);
3428 0 : async move {
3429 0 : let state = get_state(&request);
3430 :
3431 0 : let timeline = active_timeline_of_active_tenant(&state.tenant_manager, TenantShardId::unsharded(tenant_id), timeline_id).await?;
3432 0 : let ctx = RequestContextBuilder::extend(&ctx).scope(context::Scope::new_timeline(&timeline)).build();
3433 0 :
3434 0 : let mut body = StreamReader::new(request.into_body().map(|res| {
3435 0 : res.map_err(|error| {
3436 0 : std::io::Error::new(std::io::ErrorKind::Other, anyhow::anyhow!(error))
3437 0 : })
3438 0 : }));
3439 0 :
3440 0 : let last_record_lsn = timeline.get_last_record_lsn();
3441 0 : if last_record_lsn != start_lsn {
3442 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}")));
3443 0 : }
3444 0 :
3445 0 : // TODO leave clean state on error. For now you can use detach to clean
3446 0 : // up broken state from a failed import.
3447 0 :
3448 0 : // Import wal provided via CopyData
3449 0 : info!("importing wal");
3450 0 : crate::import_datadir::import_wal_from_tar(&timeline, &mut body, start_lsn, end_lsn, &ctx).await.map_err(ApiError::InternalServerError)?;
3451 0 : info!("wal import complete");
3452 :
3453 : // Read the end of the tar archive.
3454 0 : read_tar_eof(body).await.map_err(ApiError::InternalServerError)?;
3455 :
3456 : // TODO Does it make sense to overshoot?
3457 0 : if timeline.get_last_record_lsn() < end_lsn {
3458 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}")));
3459 0 : }
3460 0 :
3461 0 : // Flush data to disk, then upload to s3. No need for a forced checkpoint.
3462 0 : // We only want to persist the data, and it doesn't matter if it's in the
3463 0 : // shape of deltas or images.
3464 0 : info!("flushing layers");
3465 0 : timeline.freeze_and_flush().await.map_err(|e| match e {
3466 0 : tenant::timeline::FlushLayerError::Cancelled => ApiError::ShuttingDown,
3467 0 : other => ApiError::InternalServerError(anyhow::anyhow!(other)),
3468 0 : })?;
3469 :
3470 0 : info!("done");
3471 :
3472 0 : json_response(StatusCode::OK, ())
3473 0 : }.instrument(span).await
3474 0 : }
3475 :
3476 : /// Read the end of a tar archive.
3477 : ///
3478 : /// A tar archive normally ends with two consecutive blocks of zeros, 512 bytes each.
3479 : /// `tokio_tar` already read the first such block. Read the second all-zeros block,
3480 : /// and check that there is no more data after the EOF marker.
3481 : ///
3482 : /// 'tar' command can also write extra blocks of zeros, up to a record
3483 : /// size, controlled by the --record-size argument. Ignore them too.
3484 0 : async fn read_tar_eof(mut reader: (impl tokio::io::AsyncRead + Unpin)) -> anyhow::Result<()> {
3485 : use tokio::io::AsyncReadExt;
3486 0 : let mut buf = [0u8; 512];
3487 0 :
3488 0 : // Read the all-zeros block, and verify it
3489 0 : let mut total_bytes = 0;
3490 0 : while total_bytes < 512 {
3491 0 : let nbytes = reader.read(&mut buf[total_bytes..]).await?;
3492 0 : total_bytes += nbytes;
3493 0 : if nbytes == 0 {
3494 0 : break;
3495 0 : }
3496 : }
3497 0 : if total_bytes < 512 {
3498 0 : anyhow::bail!("incomplete or invalid tar EOF marker");
3499 0 : }
3500 0 : if !buf.iter().all(|&x| x == 0) {
3501 0 : anyhow::bail!("invalid tar EOF marker");
3502 0 : }
3503 0 :
3504 0 : // Drain any extra zero-blocks after the EOF marker
3505 0 : let mut trailing_bytes = 0;
3506 0 : let mut seen_nonzero_bytes = false;
3507 : loop {
3508 0 : let nbytes = reader.read(&mut buf).await?;
3509 0 : trailing_bytes += nbytes;
3510 0 : if !buf.iter().all(|&x| x == 0) {
3511 0 : seen_nonzero_bytes = true;
3512 0 : }
3513 0 : if nbytes == 0 {
3514 0 : break;
3515 0 : }
3516 : }
3517 0 : if seen_nonzero_bytes {
3518 0 : anyhow::bail!("unexpected non-zero bytes after the tar archive");
3519 0 : }
3520 0 : if trailing_bytes % 512 != 0 {
3521 0 : anyhow::bail!(
3522 0 : "unexpected number of zeros ({trailing_bytes}), not divisible by tar block size (512 bytes), after the tar archive"
3523 0 : );
3524 0 : }
3525 0 : Ok(())
3526 0 : }
3527 :
3528 : /// Common functionality of all the HTTP API handlers.
3529 : ///
3530 : /// - Adds a tracing span to each request (by `request_span`)
3531 : /// - Logs the request depending on the request method (by `request_span`)
3532 : /// - Logs the response if it was not successful (by `request_span`
3533 : /// - Shields the handler function from async cancellations. Hyper can drop the handler
3534 : /// Future if the connection to the client is lost, but most of the pageserver code is
3535 : /// not async cancellation safe. This converts the dropped future into a graceful cancellation
3536 : /// request with a CancellationToken.
3537 0 : async fn api_handler<R, H>(request: Request<Body>, handler: H) -> Result<Response<Body>, ApiError>
3538 0 : where
3539 0 : R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
3540 0 : H: FnOnce(Request<Body>, CancellationToken) -> R + Send + Sync + 'static,
3541 0 : {
3542 0 : if request.uri() != &"/v1/failpoints".parse::<Uri>().unwrap() {
3543 0 : fail::fail_point!("api-503", |_| Err(ApiError::ResourceUnavailable(
3544 0 : "failpoint".into()
3545 0 : )));
3546 :
3547 0 : fail::fail_point!("api-500", |_| Err(ApiError::InternalServerError(
3548 0 : anyhow::anyhow!("failpoint")
3549 0 : )));
3550 0 : }
3551 :
3552 : // Spawn a new task to handle the request, to protect the handler from unexpected
3553 : // async cancellations. Most pageserver functions are not async cancellation safe.
3554 : // We arm a drop-guard, so that if Hyper drops the Future, we signal the task
3555 : // with the cancellation token.
3556 0 : let token = CancellationToken::new();
3557 0 : let cancel_guard = token.clone().drop_guard();
3558 0 : let result = request_span(request, move |r| async {
3559 0 : let handle = tokio::spawn(
3560 0 : async {
3561 0 : let token_cloned = token.clone();
3562 0 : let result = handler(r, token).await;
3563 0 : if token_cloned.is_cancelled() {
3564 : // dropguard has executed: we will never turn this result into response.
3565 : //
3566 : // at least temporarily do {:?} logging; these failures are rare enough but
3567 : // could hide difficult errors.
3568 0 : match &result {
3569 0 : Ok(response) => {
3570 0 : let status = response.status();
3571 0 : info!(%status, "Cancelled request finished successfully")
3572 : }
3573 0 : Err(e) => match e {
3574 : ApiError::ShuttingDown | ApiError::ResourceUnavailable(_) => {
3575 : // Don't log this at error severity: they are normal during lifecycle of tenants/process
3576 0 : info!("Cancelled request aborted for shutdown")
3577 : }
3578 : _ => {
3579 : // Log these in a highly visible way, because we have no client to send the response to, but
3580 : // would like to know that something went wrong.
3581 0 : error!("Cancelled request finished with an error: {e:?}")
3582 : }
3583 : },
3584 : }
3585 0 : }
3586 : // only logging for cancelled panicked request handlers is the tracing_panic_hook,
3587 : // which should suffice.
3588 : //
3589 : // there is still a chance to lose the result due to race between
3590 : // returning from here and the actual connection closing happening
3591 : // before outer task gets to execute. leaving that up for #5815.
3592 0 : result
3593 0 : }
3594 0 : .in_current_span(),
3595 0 : );
3596 0 :
3597 0 : match handle.await {
3598 : // TODO: never actually return Err from here, always Ok(...) so that we can log
3599 : // spanned errors. Call api_error_handler instead and return appropriate Body.
3600 0 : Ok(result) => result,
3601 0 : Err(e) => {
3602 0 : // The handler task panicked. We have a global panic handler that logs the
3603 0 : // panic with its backtrace, so no need to log that here. Only log a brief
3604 0 : // message to make it clear that we returned the error to the client.
3605 0 : error!("HTTP request handler task panicked: {e:#}");
3606 :
3607 : // Don't return an Error here, because then fallback error handler that was
3608 : // installed in make_router() will print the error. Instead, construct the
3609 : // HTTP error response and return that.
3610 0 : Ok(
3611 0 : ApiError::InternalServerError(anyhow!("HTTP request handler task panicked"))
3612 0 : .into_response(),
3613 0 : )
3614 : }
3615 : }
3616 0 : })
3617 0 : .await;
3618 :
3619 0 : cancel_guard.disarm();
3620 0 :
3621 0 : result
3622 0 : }
3623 :
3624 : /// Like api_handler, but returns an error response if the server is built without
3625 : /// the 'testing' feature.
3626 0 : async fn testing_api_handler<R, H>(
3627 0 : desc: &str,
3628 0 : request: Request<Body>,
3629 0 : handler: H,
3630 0 : ) -> Result<Response<Body>, ApiError>
3631 0 : where
3632 0 : R: std::future::Future<Output = Result<Response<Body>, ApiError>> + Send + 'static,
3633 0 : H: FnOnce(Request<Body>, CancellationToken) -> R + Send + Sync + 'static,
3634 0 : {
3635 0 : if cfg!(feature = "testing") {
3636 0 : api_handler(request, handler).await
3637 : } else {
3638 0 : std::future::ready(Err(ApiError::BadRequest(anyhow!(
3639 0 : "Cannot {desc} because pageserver was compiled without testing APIs",
3640 0 : ))))
3641 0 : .await
3642 : }
3643 0 : }
3644 :
3645 0 : pub fn make_router(
3646 0 : state: Arc<State>,
3647 0 : launch_ts: &'static LaunchTimestamp,
3648 0 : auth: Option<Arc<SwappableJwtAuth>>,
3649 0 : ) -> anyhow::Result<RouterBuilder<hyper::Body, ApiError>> {
3650 0 : let spec = include_bytes!("openapi_spec.yml");
3651 0 : let mut router = attach_openapi_ui(endpoint::make_router(), spec, "/swagger.yml", "/v1/doc");
3652 0 : if auth.is_some() {
3653 0 : router = router.middleware(auth_middleware(|request| {
3654 0 : let state = get_state(request);
3655 0 : if state.allowlist_routes.contains(&request.uri().path()) {
3656 0 : None
3657 : } else {
3658 0 : state.auth.as_deref()
3659 : }
3660 0 : }))
3661 0 : }
3662 :
3663 0 : router = router.middleware(
3664 0 : endpoint::add_response_header_middleware(
3665 0 : "PAGESERVER_LAUNCH_TIMESTAMP",
3666 0 : &launch_ts.to_string(),
3667 0 : )
3668 0 : .expect("construct launch timestamp header middleware"),
3669 0 : );
3670 0 :
3671 0 : Ok(router
3672 0 : .data(state)
3673 0 : .get("/metrics", |r| request_span(r, prometheus_metrics_handler))
3674 0 : .get("/profile/cpu", |r| request_span(r, profile_cpu_handler))
3675 0 : .get("/profile/heap", |r| request_span(r, profile_heap_handler))
3676 0 : .get("/v1/status", |r| api_handler(r, status_handler))
3677 0 : .put("/v1/failpoints", |r| {
3678 0 : testing_api_handler("manage failpoints", r, failpoints_handler)
3679 0 : })
3680 0 : .post("/v1/reload_auth_validation_keys", |r| {
3681 0 : api_handler(r, reload_auth_validation_keys_handler)
3682 0 : })
3683 0 : .get("/v1/tenant", |r| api_handler(r, tenant_list_handler))
3684 0 : .get("/v1/tenant/:tenant_shard_id", |r| {
3685 0 : api_handler(r, tenant_status)
3686 0 : })
3687 0 : .delete("/v1/tenant/:tenant_shard_id", |r| {
3688 0 : api_handler(r, tenant_delete_handler)
3689 0 : })
3690 0 : .get("/v1/tenant/:tenant_shard_id/synthetic_size", |r| {
3691 0 : api_handler(r, tenant_size_handler)
3692 0 : })
3693 0 : .patch("/v1/tenant/config", |r| {
3694 0 : api_handler(r, patch_tenant_config_handler)
3695 0 : })
3696 0 : .put("/v1/tenant/config", |r| {
3697 0 : api_handler(r, update_tenant_config_handler)
3698 0 : })
3699 0 : .put("/v1/tenant/:tenant_shard_id/shard_split", |r| {
3700 0 : api_handler(r, tenant_shard_split_handler)
3701 0 : })
3702 0 : .get("/v1/tenant/:tenant_shard_id/config", |r| {
3703 0 : api_handler(r, get_tenant_config_handler)
3704 0 : })
3705 0 : .put("/v1/tenant/:tenant_shard_id/location_config", |r| {
3706 0 : api_handler(r, put_tenant_location_config_handler)
3707 0 : })
3708 0 : .get("/v1/location_config", |r| {
3709 0 : api_handler(r, list_location_config_handler)
3710 0 : })
3711 0 : .get("/v1/location_config/:tenant_shard_id", |r| {
3712 0 : api_handler(r, get_location_config_handler)
3713 0 : })
3714 0 : .put(
3715 0 : "/v1/tenant/:tenant_shard_id/time_travel_remote_storage",
3716 0 : |r| api_handler(r, tenant_time_travel_remote_storage_handler),
3717 0 : )
3718 0 : .get("/v1/tenant/:tenant_shard_id/timeline", |r| {
3719 0 : api_handler(r, timeline_list_handler)
3720 0 : })
3721 0 : .get("/v1/tenant/:tenant_shard_id/timeline_and_offloaded", |r| {
3722 0 : api_handler(r, timeline_and_offloaded_list_handler)
3723 0 : })
3724 0 : .post("/v1/tenant/:tenant_shard_id/timeline", |r| {
3725 0 : api_handler(r, timeline_create_handler)
3726 0 : })
3727 0 : .post("/v1/tenant/:tenant_shard_id/reset", |r| {
3728 0 : api_handler(r, tenant_reset_handler)
3729 0 : })
3730 0 : .post(
3731 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/preserve_initdb_archive",
3732 0 : |r| api_handler(r, timeline_preserve_initdb_handler),
3733 0 : )
3734 0 : .put(
3735 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/archival_config",
3736 0 : |r| api_handler(r, timeline_archival_config_handler),
3737 0 : )
3738 0 : .get("/v1/tenant/:tenant_shard_id/timeline/:timeline_id", |r| {
3739 0 : api_handler(r, timeline_detail_handler)
3740 0 : })
3741 0 : .get(
3742 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_lsn_by_timestamp",
3743 0 : |r| api_handler(r, get_lsn_by_timestamp_handler),
3744 0 : )
3745 0 : .get(
3746 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/get_timestamp_of_lsn",
3747 0 : |r| api_handler(r, get_timestamp_of_lsn_handler),
3748 0 : )
3749 0 : .post(
3750 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/patch_index_part",
3751 0 : |r| api_handler(r, timeline_patch_index_part_handler),
3752 0 : )
3753 0 : .post(
3754 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/lsn_lease",
3755 0 : |r| api_handler(r, lsn_lease_handler),
3756 0 : )
3757 0 : .put(
3758 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/do_gc",
3759 0 : |r| api_handler(r, timeline_gc_handler),
3760 0 : )
3761 0 : .get(
3762 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
3763 0 : |r| api_handler(r, timeline_compact_info_handler),
3764 0 : )
3765 0 : .put(
3766 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
3767 0 : |r| api_handler(r, timeline_compact_handler),
3768 0 : )
3769 0 : .delete(
3770 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/compact",
3771 0 : |r| api_handler(r, timeline_cancel_compact_handler),
3772 0 : )
3773 0 : .put(
3774 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/offload",
3775 0 : |r| testing_api_handler("attempt timeline offload", r, timeline_offload_handler),
3776 0 : )
3777 0 : .put(
3778 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/mark_invisible",
3779 0 : |r| testing_api_handler("mark timeline invisible", r, timeline_mark_invisible_handler),
3780 0 : )
3781 0 : .put(
3782 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/checkpoint",
3783 0 : |r| testing_api_handler("run timeline checkpoint", r, timeline_checkpoint_handler),
3784 0 : )
3785 0 : .post(
3786 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_remote_layers",
3787 0 : |r| api_handler(r, timeline_download_remote_layers_handler_post),
3788 0 : )
3789 0 : .get(
3790 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_remote_layers",
3791 0 : |r| api_handler(r, timeline_download_remote_layers_handler_get),
3792 0 : )
3793 0 : .put(
3794 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/detach_ancestor",
3795 0 : |r| api_handler(r, timeline_detach_ancestor_handler),
3796 0 : )
3797 0 : .delete("/v1/tenant/:tenant_shard_id/timeline/:timeline_id", |r| {
3798 0 : api_handler(r, timeline_delete_handler)
3799 0 : })
3800 0 : .get(
3801 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer",
3802 0 : |r| api_handler(r, layer_map_info_handler),
3803 0 : )
3804 0 : .post(
3805 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_heatmap_layers",
3806 0 : |r| api_handler(r, timeline_download_heatmap_layers_handler),
3807 0 : )
3808 0 : .delete(
3809 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/download_heatmap_layers",
3810 0 : |r| api_handler(r, timeline_shutdown_download_heatmap_layers_handler),
3811 0 : )
3812 0 : .get(
3813 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_file_name",
3814 0 : |r| api_handler(r, layer_download_handler),
3815 0 : )
3816 0 : .delete(
3817 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_file_name",
3818 0 : |r| api_handler(r, evict_timeline_layer_handler),
3819 0 : )
3820 0 : .post(
3821 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/layer/:layer_name/scan_disposable_keys",
3822 0 : |r| testing_api_handler("timeline_layer_scan_disposable_keys", r, timeline_layer_scan_disposable_keys),
3823 0 : )
3824 0 : .post(
3825 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/block_gc",
3826 0 : |r| api_handler(r, timeline_gc_blocking_handler),
3827 0 : )
3828 0 : .post(
3829 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/unblock_gc",
3830 0 : |r| api_handler(r, timeline_gc_unblocking_handler),
3831 0 : )
3832 0 : .get(
3833 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/page_trace",
3834 0 : |r| api_handler(r, timeline_page_trace_handler),
3835 0 : )
3836 0 : .post("/v1/tenant/:tenant_shard_id/heatmap_upload", |r| {
3837 0 : api_handler(r, secondary_upload_handler)
3838 0 : })
3839 0 : .get("/v1/tenant/:tenant_id/scan_remote_storage", |r| {
3840 0 : api_handler(r, tenant_scan_remote_handler)
3841 0 : })
3842 0 : .put("/v1/disk_usage_eviction/run", |r| {
3843 0 : api_handler(r, disk_usage_eviction_run)
3844 0 : })
3845 0 : .put("/v1/deletion_queue/flush", |r| {
3846 0 : api_handler(r, deletion_queue_flush)
3847 0 : })
3848 0 : .get("/v1/tenant/:tenant_shard_id/secondary/status", |r| {
3849 0 : api_handler(r, secondary_status_handler)
3850 0 : })
3851 0 : .post("/v1/tenant/:tenant_shard_id/secondary/download", |r| {
3852 0 : api_handler(r, secondary_download_handler)
3853 0 : })
3854 0 : .post("/v1/tenant/:tenant_shard_id/wait_lsn", |r| {
3855 0 : api_handler(r, wait_lsn_handler)
3856 0 : })
3857 0 : .put("/v1/tenant/:tenant_shard_id/break", |r| {
3858 0 : testing_api_handler("set tenant state to broken", r, handle_tenant_break)
3859 0 : })
3860 0 : .get("/v1/panic", |r| api_handler(r, always_panic_handler))
3861 0 : .post("/v1/tracing/event", |r| {
3862 0 : testing_api_handler("emit a tracing event", r, post_tracing_event_handler)
3863 0 : })
3864 0 : .get(
3865 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/getpage",
3866 0 : |r| testing_api_handler("getpage@lsn", r, getpage_at_lsn_handler),
3867 0 : )
3868 0 : .get(
3869 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/touchpage",
3870 0 : |r| api_handler(r, touchpage_at_lsn_handler),
3871 0 : )
3872 0 : .get(
3873 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/keyspace",
3874 0 : |r| api_handler(r, timeline_collect_keyspace),
3875 0 : )
3876 0 : .put("/v1/io_engine", |r| api_handler(r, put_io_engine_handler))
3877 0 : .put("/v1/io_mode", |r| api_handler(r, put_io_mode_handler))
3878 0 : .get("/v1/utilization", |r| api_handler(r, get_utilization))
3879 0 : .post(
3880 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/ingest_aux_files",
3881 0 : |r| testing_api_handler("ingest_aux_files", r, ingest_aux_files),
3882 0 : )
3883 0 : .post(
3884 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/list_aux_files",
3885 0 : |r| testing_api_handler("list_aux_files", r, list_aux_files),
3886 0 : )
3887 0 : .post("/v1/top_tenants", |r| api_handler(r, post_top_tenants))
3888 0 : .post(
3889 0 : "/v1/tenant/:tenant_shard_id/timeline/:timeline_id/perf_info",
3890 0 : |r| testing_api_handler("perf_info", r, perf_info),
3891 0 : )
3892 0 : .put(
3893 0 : "/v1/tenant/:tenant_id/timeline/:timeline_id/import_basebackup",
3894 0 : |r| api_handler(r, put_tenant_timeline_import_basebackup),
3895 0 : )
3896 0 : .put(
3897 0 : "/v1/tenant/:tenant_id/timeline/:timeline_id/import_wal",
3898 0 : |r| api_handler(r, put_tenant_timeline_import_wal),
3899 0 : )
3900 0 : .any(handler_404))
3901 0 : }
|