LCOV - code coverage report
Current view: top level - pageserver/src - page_service.rs (source / functions) Coverage Total Hit
Test: 4f58e98c51285c7fa348e0b410c88a10caf68ad2.info Lines: 25.6 % 1108 284
Test Date: 2025-01-07 20:58:07 Functions: 7.3 % 109 8

            Line data    Source code
       1              : //! The Page Service listens for client connections and serves their GetPage@LSN
       2              : //! requests.
       3              : 
       4              : use anyhow::{bail, Context};
       5              : use async_compression::tokio::write::GzipEncoder;
       6              : use bytes::Buf;
       7              : use futures::FutureExt;
       8              : use itertools::Itertools;
       9              : use once_cell::sync::OnceCell;
      10              : use pageserver_api::config::{
      11              :     PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
      12              :     PageServiceProtocolPipelinedExecutionStrategy,
      13              : };
      14              : use pageserver_api::models::{self, TenantState};
      15              : use pageserver_api::models::{
      16              :     PagestreamBeMessage, PagestreamDbSizeRequest, PagestreamDbSizeResponse,
      17              :     PagestreamErrorResponse, PagestreamExistsRequest, PagestreamExistsResponse,
      18              :     PagestreamFeMessage, PagestreamGetPageRequest, PagestreamGetSlruSegmentRequest,
      19              :     PagestreamGetSlruSegmentResponse, PagestreamNblocksRequest, PagestreamNblocksResponse,
      20              :     PagestreamProtocolVersion,
      21              : };
      22              : use pageserver_api::shard::TenantShardId;
      23              : use postgres_backend::{
      24              :     is_expected_io_error, AuthType, PostgresBackend, PostgresBackendReader, QueryError,
      25              : };
      26              : use pq_proto::framed::ConnectionError;
      27              : use pq_proto::FeStartupPacket;
      28              : use pq_proto::{BeMessage, FeMessage, RowDescriptor};
      29              : use std::borrow::Cow;
      30              : use std::io;
      31              : use std::num::NonZeroUsize;
      32              : use std::str;
      33              : use std::str::FromStr;
      34              : use std::sync::Arc;
      35              : use std::time::SystemTime;
      36              : use std::time::{Duration, Instant};
      37              : use tokio::io::{AsyncRead, AsyncWrite};
      38              : use tokio::io::{AsyncWriteExt, BufWriter};
      39              : use tokio::task::JoinHandle;
      40              : use tokio_util::sync::CancellationToken;
      41              : use tracing::*;
      42              : use utils::sync::spsc_fold;
      43              : use utils::{
      44              :     auth::{Claims, Scope, SwappableJwtAuth},
      45              :     id::{TenantId, TimelineId},
      46              :     lsn::Lsn,
      47              :     simple_rcu::RcuReadGuard,
      48              : };
      49              : 
      50              : use crate::auth::check_permission;
      51              : use crate::basebackup::BasebackupError;
      52              : use crate::config::PageServerConf;
      53              : use crate::context::{DownloadBehavior, RequestContext};
      54              : use crate::metrics::{self, SmgrOpTimer};
      55              : use crate::metrics::{ComputeCommandKind, COMPUTE_COMMANDS_COUNTERS, LIVE_CONNECTIONS};
      56              : use crate::pgdatadir_mapping::Version;
      57              : use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
      58              : use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id;
      59              : use crate::task_mgr::TaskKind;
      60              : use crate::task_mgr::{self, COMPUTE_REQUEST_RUNTIME};
      61              : use crate::tenant::mgr::ShardSelector;
      62              : use crate::tenant::mgr::TenantManager;
      63              : use crate::tenant::mgr::{GetActiveTenantError, GetTenantError, ShardResolveResult};
      64              : use crate::tenant::timeline::{self, WaitLsnError};
      65              : use crate::tenant::GetTimelineError;
      66              : use crate::tenant::PageReconstructError;
      67              : use crate::tenant::Timeline;
      68              : use crate::{basebackup, timed_after_cancellation};
      69              : use pageserver_api::key::rel_block_to_key;
      70              : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
      71              : use postgres_ffi::pg_constants::DEFAULTTABLESPACE_OID;
      72              : use postgres_ffi::BLCKSZ;
      73              : 
      74              : /// How long we may wait for a [`crate::tenant::mgr::TenantSlot::InProgress`]` and/or a [`crate::tenant::Tenant`] which
      75              : /// is not yet in state [`TenantState::Active`].
      76              : ///
      77              : /// NB: this is a different value than [`crate::http::routes::ACTIVE_TENANT_TIMEOUT`].
      78              : const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);
      79              : 
      80              : ///////////////////////////////////////////////////////////////////////////////
      81              : 
      82              : pub struct Listener {
      83              :     cancel: CancellationToken,
      84              :     /// Cancel the listener task through `listen_cancel` to shut down the listener
      85              :     /// and get a handle on the existing connections.
      86              :     task: JoinHandle<Connections>,
      87              : }
      88              : 
      89              : pub struct Connections {
      90              :     cancel: CancellationToken,
      91              :     tasks: tokio::task::JoinSet<ConnectionHandlerResult>,
      92              : }
      93              : 
      94            0 : pub fn spawn(
      95            0 :     conf: &'static PageServerConf,
      96            0 :     tenant_manager: Arc<TenantManager>,
      97            0 :     pg_auth: Option<Arc<SwappableJwtAuth>>,
      98            0 :     tcp_listener: tokio::net::TcpListener,
      99            0 : ) -> Listener {
     100            0 :     let cancel = CancellationToken::new();
     101            0 :     let libpq_ctx = RequestContext::todo_child(
     102            0 :         TaskKind::LibpqEndpointListener,
     103            0 :         // listener task shouldn't need to download anything. (We will
     104            0 :         // create a separate sub-contexts for each connection, with their
     105            0 :         // own download behavior. This context is used only to listen and
     106            0 :         // accept connections.)
     107            0 :         DownloadBehavior::Error,
     108            0 :     );
     109            0 :     let task = COMPUTE_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
     110            0 :         "libpq listener",
     111            0 :         libpq_listener_main(
     112            0 :             tenant_manager,
     113            0 :             pg_auth,
     114            0 :             tcp_listener,
     115            0 :             conf.pg_auth_type,
     116            0 :             conf.page_service_pipelining.clone(),
     117            0 :             libpq_ctx,
     118            0 :             cancel.clone(),
     119            0 :         )
     120            0 :         .map(anyhow::Ok),
     121            0 :     ));
     122            0 : 
     123            0 :     Listener { cancel, task }
     124            0 : }
     125              : 
     126              : impl Listener {
     127            0 :     pub async fn stop_accepting(self) -> Connections {
     128            0 :         self.cancel.cancel();
     129            0 :         self.task
     130            0 :             .await
     131            0 :             .expect("unreachable: we wrap the listener task in task_mgr::exit_on_panic_or_error")
     132            0 :     }
     133              : }
     134              : impl Connections {
     135            0 :     pub(crate) async fn shutdown(self) {
     136            0 :         let Self { cancel, mut tasks } = self;
     137            0 :         cancel.cancel();
     138            0 :         while let Some(res) = tasks.join_next().await {
     139            0 :             Self::handle_connection_completion(res);
     140            0 :         }
     141            0 :     }
     142              : 
     143            0 :     fn handle_connection_completion(res: Result<anyhow::Result<()>, tokio::task::JoinError>) {
     144            0 :         match res {
     145            0 :             Ok(Ok(())) => {}
     146            0 :             Ok(Err(e)) => error!("error in page_service connection task: {:?}", e),
     147            0 :             Err(e) => error!("page_service connection task panicked: {:?}", e),
     148              :         }
     149            0 :     }
     150              : }
     151              : 
     152              : ///
     153              : /// Main loop of the page service.
     154              : ///
     155              : /// Listens for connections, and launches a new handler task for each.
     156              : ///
     157              : /// Returns Ok(()) upon cancellation via `cancel`, returning the set of
     158              : /// open connections.
     159              : ///
     160            0 : pub async fn libpq_listener_main(
     161            0 :     tenant_manager: Arc<TenantManager>,
     162            0 :     auth: Option<Arc<SwappableJwtAuth>>,
     163            0 :     listener: tokio::net::TcpListener,
     164            0 :     auth_type: AuthType,
     165            0 :     pipelining_config: PageServicePipeliningConfig,
     166            0 :     listener_ctx: RequestContext,
     167            0 :     listener_cancel: CancellationToken,
     168            0 : ) -> Connections {
     169            0 :     let connections_cancel = CancellationToken::new();
     170            0 :     let mut connection_handler_tasks = tokio::task::JoinSet::default();
     171              : 
     172              :     loop {
     173            0 :         let accepted = tokio::select! {
     174              :             biased;
     175            0 :             _ = listener_cancel.cancelled() => break,
     176            0 :             next = connection_handler_tasks.join_next(), if !connection_handler_tasks.is_empty() => {
     177            0 :                 let res = next.expect("we dont poll while empty");
     178            0 :                 Connections::handle_connection_completion(res);
     179            0 :                 continue;
     180              :             }
     181            0 :             accepted = listener.accept() => accepted,
     182            0 :         };
     183            0 : 
     184            0 :         match accepted {
     185            0 :             Ok((socket, peer_addr)) => {
     186            0 :                 // Connection established. Spawn a new task to handle it.
     187            0 :                 debug!("accepted connection from {}", peer_addr);
     188            0 :                 let local_auth = auth.clone();
     189            0 :                 let connection_ctx = listener_ctx
     190            0 :                     .detached_child(TaskKind::PageRequestHandler, DownloadBehavior::Download);
     191            0 :                 connection_handler_tasks.spawn(page_service_conn_main(
     192            0 :                     tenant_manager.clone(),
     193            0 :                     local_auth,
     194            0 :                     socket,
     195            0 :                     auth_type,
     196            0 :                     pipelining_config.clone(),
     197            0 :                     connection_ctx,
     198            0 :                     connections_cancel.child_token(),
     199            0 :                 ));
     200              :             }
     201            0 :             Err(err) => {
     202            0 :                 // accept() failed. Log the error, and loop back to retry on next connection.
     203            0 :                 error!("accept() failed: {:?}", err);
     204              :             }
     205              :         }
     206              :     }
     207              : 
     208            0 :     debug!("page_service listener loop terminated");
     209              : 
     210            0 :     Connections {
     211            0 :         cancel: connections_cancel,
     212            0 :         tasks: connection_handler_tasks,
     213            0 :     }
     214            0 : }
     215              : 
     216              : type ConnectionHandlerResult = anyhow::Result<()>;
     217              : 
     218            0 : #[instrument(skip_all, fields(peer_addr))]
     219              : async fn page_service_conn_main(
     220              :     tenant_manager: Arc<TenantManager>,
     221              :     auth: Option<Arc<SwappableJwtAuth>>,
     222              :     socket: tokio::net::TcpStream,
     223              :     auth_type: AuthType,
     224              :     pipelining_config: PageServicePipeliningConfig,
     225              :     connection_ctx: RequestContext,
     226              :     cancel: CancellationToken,
     227              : ) -> ConnectionHandlerResult {
     228              :     let _guard = LIVE_CONNECTIONS
     229              :         .with_label_values(&["page_service"])
     230              :         .guard();
     231              : 
     232              :     socket
     233              :         .set_nodelay(true)
     234              :         .context("could not set TCP_NODELAY")?;
     235              : 
     236              :     let peer_addr = socket.peer_addr().context("get peer address")?;
     237              :     tracing::Span::current().record("peer_addr", field::display(peer_addr));
     238              : 
     239              :     // setup read timeout of 10 minutes. the timeout is rather arbitrary for requirements:
     240              :     // - long enough for most valid compute connections
     241              :     // - less than infinite to stop us from "leaking" connections to long-gone computes
     242              :     //
     243              :     // no write timeout is used, because the kernel is assumed to error writes after some time.
     244              :     let mut socket = tokio_io_timeout::TimeoutReader::new(socket);
     245              : 
     246              :     let default_timeout_ms = 10 * 60 * 1000; // 10 minutes by default
     247            0 :     let socket_timeout_ms = (|| {
     248            0 :         fail::fail_point!("simulated-bad-compute-connection", |avg_timeout_ms| {
     249              :             // Exponential distribution for simulating
     250              :             // poor network conditions, expect about avg_timeout_ms to be around 15
     251              :             // in tests
     252            0 :             if let Some(avg_timeout_ms) = avg_timeout_ms {
     253            0 :                 let avg = avg_timeout_ms.parse::<i64>().unwrap() as f32;
     254            0 :                 let u = rand::random::<f32>();
     255            0 :                 ((1.0 - u).ln() / (-avg)) as u64
     256              :             } else {
     257            0 :                 default_timeout_ms
     258              :             }
     259            0 :         });
     260            0 :         default_timeout_ms
     261              :     })();
     262              : 
     263              :     // A timeout here does not mean the client died, it can happen if it's just idle for
     264              :     // a while: we will tear down this PageServerHandler and instantiate a new one if/when
     265              :     // they reconnect.
     266              :     socket.set_timeout(Some(std::time::Duration::from_millis(socket_timeout_ms)));
     267              :     let socket = Box::pin(socket);
     268              : 
     269              :     fail::fail_point!("ps::connection-start::pre-login");
     270              : 
     271              :     // XXX: pgbackend.run() should take the connection_ctx,
     272              :     // and create a child per-query context when it invokes process_query.
     273              :     // But it's in a shared crate, so, we store connection_ctx inside PageServerHandler
     274              :     // and create the per-query context in process_query ourselves.
     275              :     let mut conn_handler = PageServerHandler::new(
     276              :         tenant_manager,
     277              :         auth,
     278              :         pipelining_config,
     279              :         connection_ctx,
     280              :         cancel.clone(),
     281              :     );
     282              :     let pgbackend = PostgresBackend::new_from_io(socket, peer_addr, auth_type, None)?;
     283              : 
     284              :     match pgbackend.run(&mut conn_handler, &cancel).await {
     285              :         Ok(()) => {
     286              :             // we've been requested to shut down
     287              :             Ok(())
     288              :         }
     289              :         Err(QueryError::Disconnected(ConnectionError::Io(io_error))) => {
     290              :             if is_expected_io_error(&io_error) {
     291              :                 info!("Postgres client disconnected ({io_error})");
     292              :                 Ok(())
     293              :             } else {
     294              :                 let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id();
     295              :                 Err(io_error).context(format!(
     296              :                     "Postgres connection error for tenant_id={:?} client at peer_addr={}",
     297              :                     tenant_id, peer_addr
     298              :                 ))
     299              :             }
     300              :         }
     301              :         other => {
     302              :             let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id();
     303              :             other.context(format!(
     304              :                 "Postgres query error for tenant_id={:?} client peer_addr={}",
     305              :                 tenant_id, peer_addr
     306              :             ))
     307              :         }
     308              :     }
     309              : }
     310              : 
     311              : struct PageServerHandler {
     312              :     auth: Option<Arc<SwappableJwtAuth>>,
     313              :     claims: Option<Claims>,
     314              : 
     315              :     /// The context created for the lifetime of the connection
     316              :     /// services by this PageServerHandler.
     317              :     /// For each query received over the connection,
     318              :     /// `process_query` creates a child context from this one.
     319              :     connection_ctx: RequestContext,
     320              : 
     321              :     cancel: CancellationToken,
     322              : 
     323              :     /// None only while pagestream protocol is being processed.
     324              :     timeline_handles: Option<TimelineHandles>,
     325              : 
     326              :     pipelining_config: PageServicePipeliningConfig,
     327              : }
     328              : 
     329              : struct TimelineHandles {
     330              :     wrapper: TenantManagerWrapper,
     331              :     /// Note on size: the typical size of this map is 1.  The largest size we expect
     332              :     /// to see is the number of shards divided by the number of pageservers (typically < 2),
     333              :     /// or the ratio used when splitting shards (i.e. how many children created from one)
     334              :     /// parent shard, where a "large" number might be ~8.
     335              :     handles: timeline::handle::Cache<TenantManagerTypes>,
     336              : }
     337              : 
     338              : impl TimelineHandles {
     339            0 :     fn new(tenant_manager: Arc<TenantManager>) -> Self {
     340            0 :         Self {
     341            0 :             wrapper: TenantManagerWrapper {
     342            0 :                 tenant_manager,
     343            0 :                 tenant_id: OnceCell::new(),
     344            0 :             },
     345            0 :             handles: Default::default(),
     346            0 :         }
     347            0 :     }
     348            0 :     async fn get(
     349            0 :         &mut self,
     350            0 :         tenant_id: TenantId,
     351            0 :         timeline_id: TimelineId,
     352            0 :         shard_selector: ShardSelector,
     353            0 :     ) -> Result<timeline::handle::Handle<TenantManagerTypes>, GetActiveTimelineError> {
     354            0 :         if *self.wrapper.tenant_id.get_or_init(|| tenant_id) != tenant_id {
     355            0 :             return Err(GetActiveTimelineError::Tenant(
     356            0 :                 GetActiveTenantError::SwitchedTenant,
     357            0 :             ));
     358            0 :         }
     359            0 :         self.handles
     360            0 :             .get(timeline_id, shard_selector, &self.wrapper)
     361            0 :             .await
     362            0 :             .map_err(|e| match e {
     363            0 :                 timeline::handle::GetError::TenantManager(e) => e,
     364              :                 timeline::handle::GetError::TimelineGateClosed => {
     365            0 :                     trace!("timeline gate closed");
     366            0 :                     GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown)
     367              :                 }
     368              :                 timeline::handle::GetError::PerTimelineStateShutDown => {
     369            0 :                     trace!("per-timeline state shut down");
     370            0 :                     GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown)
     371              :                 }
     372            0 :             })
     373            0 :     }
     374              : 
     375            0 :     fn tenant_id(&self) -> Option<TenantId> {
     376            0 :         self.wrapper.tenant_id.get().copied()
     377            0 :     }
     378              : }
     379              : 
     380              : pub(crate) struct TenantManagerWrapper {
     381              :     tenant_manager: Arc<TenantManager>,
     382              :     // We do not support switching tenant_id on a connection at this point.
     383              :     // We can can add support for this later if needed without changing
     384              :     // the protocol.
     385              :     tenant_id: once_cell::sync::OnceCell<TenantId>,
     386              : }
     387              : 
     388              : #[derive(Debug)]
     389              : pub(crate) struct TenantManagerTypes;
     390              : 
     391              : impl timeline::handle::Types for TenantManagerTypes {
     392              :     type TenantManagerError = GetActiveTimelineError;
     393              :     type TenantManager = TenantManagerWrapper;
     394              :     type Timeline = Arc<Timeline>;
     395              : }
     396              : 
     397              : impl timeline::handle::ArcTimeline<TenantManagerTypes> for Arc<Timeline> {
     398            0 :     fn gate(&self) -> &utils::sync::gate::Gate {
     399            0 :         &self.gate
     400            0 :     }
     401              : 
     402            0 :     fn shard_timeline_id(&self) -> timeline::handle::ShardTimelineId {
     403            0 :         Timeline::shard_timeline_id(self)
     404            0 :     }
     405              : 
     406            0 :     fn per_timeline_state(&self) -> &timeline::handle::PerTimelineState<TenantManagerTypes> {
     407            0 :         &self.handles
     408            0 :     }
     409              : 
     410            0 :     fn get_shard_identity(&self) -> &pageserver_api::shard::ShardIdentity {
     411            0 :         Timeline::get_shard_identity(self)
     412            0 :     }
     413              : }
     414              : 
     415              : impl timeline::handle::TenantManager<TenantManagerTypes> for TenantManagerWrapper {
     416            0 :     async fn resolve(
     417            0 :         &self,
     418            0 :         timeline_id: TimelineId,
     419            0 :         shard_selector: ShardSelector,
     420            0 :     ) -> Result<Arc<Timeline>, GetActiveTimelineError> {
     421            0 :         let tenant_id = self.tenant_id.get().expect("we set this in get()");
     422            0 :         let timeout = ACTIVE_TENANT_TIMEOUT;
     423            0 :         let wait_start = Instant::now();
     424            0 :         let deadline = wait_start + timeout;
     425            0 :         let tenant_shard = loop {
     426            0 :             let resolved = self
     427            0 :                 .tenant_manager
     428            0 :                 .resolve_attached_shard(tenant_id, shard_selector);
     429            0 :             match resolved {
     430            0 :                 ShardResolveResult::Found(tenant_shard) => break tenant_shard,
     431              :                 ShardResolveResult::NotFound => {
     432            0 :                     return Err(GetActiveTimelineError::Tenant(
     433            0 :                         GetActiveTenantError::NotFound(GetTenantError::NotFound(*tenant_id)),
     434            0 :                     ));
     435              :                 }
     436            0 :                 ShardResolveResult::InProgress(barrier) => {
     437            0 :                     // We can't authoritatively answer right now: wait for InProgress state
     438            0 :                     // to end, then try again
     439            0 :                     tokio::select! {
     440            0 :                         _  = barrier.wait() => {
     441            0 :                             // The barrier completed: proceed around the loop to try looking up again
     442            0 :                         },
     443            0 :                         _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
     444            0 :                             return Err(GetActiveTimelineError::Tenant(GetActiveTenantError::WaitForActiveTimeout {
     445            0 :                                 latest_state: None,
     446            0 :                                 wait_time: timeout,
     447            0 :                             }));
     448              :                         }
     449              :                     }
     450              :                 }
     451              :             };
     452              :         };
     453              : 
     454            0 :         tracing::debug!("Waiting for tenant to enter active state...");
     455            0 :         tenant_shard
     456            0 :             .wait_to_become_active(deadline.duration_since(Instant::now()))
     457            0 :             .await
     458            0 :             .map_err(GetActiveTimelineError::Tenant)?;
     459              : 
     460            0 :         let timeline = tenant_shard
     461            0 :             .get_timeline(timeline_id, true)
     462            0 :             .map_err(GetActiveTimelineError::Timeline)?;
     463            0 :         set_tracing_field_shard_id(&timeline);
     464            0 :         Ok(timeline)
     465            0 :     }
     466              : }
     467              : 
     468              : #[derive(thiserror::Error, Debug)]
     469              : enum PageStreamError {
     470              :     /// We encountered an error that should prompt the client to reconnect:
     471              :     /// in practice this means we drop the connection without sending a response.
     472              :     #[error("Reconnect required: {0}")]
     473              :     Reconnect(Cow<'static, str>),
     474              : 
     475              :     /// We were instructed to shutdown while processing the query
     476              :     #[error("Shutting down")]
     477              :     Shutdown,
     478              : 
     479              :     /// Something went wrong reading a page: this likely indicates a pageserver bug
     480              :     #[error("Read error")]
     481              :     Read(#[source] PageReconstructError),
     482              : 
     483              :     /// Ran out of time waiting for an LSN
     484              :     #[error("LSN timeout: {0}")]
     485              :     LsnTimeout(WaitLsnError),
     486              : 
     487              :     /// The entity required to serve the request (tenant or timeline) is not found,
     488              :     /// or is not found in a suitable state to serve a request.
     489              :     #[error("Not found: {0}")]
     490              :     NotFound(Cow<'static, str>),
     491              : 
     492              :     /// Request asked for something that doesn't make sense, like an invalid LSN
     493              :     #[error("Bad request: {0}")]
     494              :     BadRequest(Cow<'static, str>),
     495              : }
     496              : 
     497              : impl From<PageReconstructError> for PageStreamError {
     498            0 :     fn from(value: PageReconstructError) -> Self {
     499            0 :         match value {
     500            0 :             PageReconstructError::Cancelled => Self::Shutdown,
     501            0 :             e => Self::Read(e),
     502              :         }
     503            0 :     }
     504              : }
     505              : 
     506              : impl From<GetActiveTimelineError> for PageStreamError {
     507            0 :     fn from(value: GetActiveTimelineError) -> Self {
     508            0 :         match value {
     509              :             GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled)
     510              :             | GetActiveTimelineError::Tenant(GetActiveTenantError::WillNotBecomeActive(
     511              :                 TenantState::Stopping { .. },
     512              :             ))
     513            0 :             | GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown) => Self::Shutdown,
     514            0 :             GetActiveTimelineError::Tenant(e) => Self::NotFound(format!("{e}").into()),
     515            0 :             GetActiveTimelineError::Timeline(e) => Self::NotFound(format!("{e}").into()),
     516              :         }
     517            0 :     }
     518              : }
     519              : 
     520              : impl From<WaitLsnError> for PageStreamError {
     521            0 :     fn from(value: WaitLsnError) -> Self {
     522            0 :         match value {
     523            0 :             e @ WaitLsnError::Timeout(_) => Self::LsnTimeout(e),
     524            0 :             WaitLsnError::Shutdown => Self::Shutdown,
     525            0 :             e @ WaitLsnError::BadState { .. } => Self::Reconnect(format!("{e}").into()),
     526              :         }
     527            0 :     }
     528              : }
     529              : 
     530              : impl From<WaitLsnError> for QueryError {
     531            0 :     fn from(value: WaitLsnError) -> Self {
     532            0 :         match value {
     533            0 :             e @ WaitLsnError::Timeout(_) => Self::Other(anyhow::Error::new(e)),
     534            0 :             WaitLsnError::Shutdown => Self::Shutdown,
     535            0 :             WaitLsnError::BadState { .. } => Self::Reconnect,
     536              :         }
     537            0 :     }
     538              : }
     539              : 
     540              : enum BatchedFeMessage {
     541              :     Exists {
     542              :         span: Span,
     543              :         timer: SmgrOpTimer,
     544              :         shard: timeline::handle::Handle<TenantManagerTypes>,
     545              :         req: models::PagestreamExistsRequest,
     546              :     },
     547              :     Nblocks {
     548              :         span: Span,
     549              :         timer: SmgrOpTimer,
     550              :         shard: timeline::handle::Handle<TenantManagerTypes>,
     551              :         req: models::PagestreamNblocksRequest,
     552              :     },
     553              :     GetPage {
     554              :         span: Span,
     555              :         shard: timeline::handle::Handle<TenantManagerTypes>,
     556              :         effective_request_lsn: Lsn,
     557              :         pages: smallvec::SmallVec<[(RelTag, BlockNumber, SmgrOpTimer); 1]>,
     558              :     },
     559              :     DbSize {
     560              :         span: Span,
     561              :         timer: SmgrOpTimer,
     562              :         shard: timeline::handle::Handle<TenantManagerTypes>,
     563              :         req: models::PagestreamDbSizeRequest,
     564              :     },
     565              :     GetSlruSegment {
     566              :         span: Span,
     567              :         timer: SmgrOpTimer,
     568              :         shard: timeline::handle::Handle<TenantManagerTypes>,
     569              :         req: models::PagestreamGetSlruSegmentRequest,
     570              :     },
     571              :     RespondError {
     572              :         span: Span,
     573              :         error: PageStreamError,
     574              :     },
     575              : }
     576              : 
     577              : impl BatchedFeMessage {
     578            0 :     async fn throttle_and_record_start_processing(
     579            0 :         &mut self,
     580            0 :         cancel: &CancellationToken,
     581            0 :     ) -> Result<(), QueryError> {
     582            0 :         let (shard, tokens, timers) = match self {
     583            0 :             BatchedFeMessage::Exists { shard, timer, .. }
     584            0 :             | BatchedFeMessage::Nblocks { shard, timer, .. }
     585            0 :             | BatchedFeMessage::DbSize { shard, timer, .. }
     586            0 :             | BatchedFeMessage::GetSlruSegment { shard, timer, .. } => {
     587            0 :                 (
     588            0 :                     shard,
     589            0 :                     // 1 token is probably under-estimating because these
     590            0 :                     // request handlers typically do several Timeline::get calls.
     591            0 :                     1,
     592            0 :                     itertools::Either::Left(std::iter::once(timer)),
     593            0 :                 )
     594              :             }
     595            0 :             BatchedFeMessage::GetPage { shard, pages, .. } => (
     596            0 :                 shard,
     597            0 :                 pages.len(),
     598            0 :                 itertools::Either::Right(pages.iter_mut().map(|(_, _, timer)| timer)),
     599            0 :             ),
     600            0 :             BatchedFeMessage::RespondError { .. } => return Ok(()),
     601              :         };
     602            0 :         let throttled = tokio::select! {
     603            0 :             throttled = shard.pagestream_throttle.throttle(tokens) => { throttled }
     604            0 :             _ = cancel.cancelled() => {
     605            0 :                 return Err(QueryError::Shutdown);
     606              :             }
     607              :         };
     608            0 :         for timer in timers {
     609            0 :             timer.observe_throttle_done_execution_starting(&throttled);
     610            0 :         }
     611            0 :         Ok(())
     612            0 :     }
     613              : }
     614              : 
     615              : impl PageServerHandler {
     616            0 :     pub fn new(
     617            0 :         tenant_manager: Arc<TenantManager>,
     618            0 :         auth: Option<Arc<SwappableJwtAuth>>,
     619            0 :         pipelining_config: PageServicePipeliningConfig,
     620            0 :         connection_ctx: RequestContext,
     621            0 :         cancel: CancellationToken,
     622            0 :     ) -> Self {
     623            0 :         PageServerHandler {
     624            0 :             auth,
     625            0 :             claims: None,
     626            0 :             connection_ctx,
     627            0 :             timeline_handles: Some(TimelineHandles::new(tenant_manager)),
     628            0 :             cancel,
     629            0 :             pipelining_config,
     630            0 :         }
     631            0 :     }
     632              : 
     633              :     /// This function always respects cancellation of any timeline in `[Self::shard_timelines]`.  Pass in
     634              :     /// a cancellation token at the next scope up (such as a tenant cancellation token) to ensure we respect
     635              :     /// cancellation if there aren't any timelines in the cache.
     636              :     ///
     637              :     /// If calling from a function that doesn't use the `[Self::shard_timelines]` cache, then pass in the
     638              :     /// timeline cancellation token.
     639            0 :     async fn flush_cancellable<IO>(
     640            0 :         &self,
     641            0 :         pgb: &mut PostgresBackend<IO>,
     642            0 :         cancel: &CancellationToken,
     643            0 :     ) -> Result<(), QueryError>
     644            0 :     where
     645            0 :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
     646            0 :     {
     647            0 :         tokio::select!(
     648            0 :             flush_r = pgb.flush() => {
     649            0 :                 Ok(flush_r?)
     650              :             },
     651            0 :             _ = cancel.cancelled() => {
     652            0 :                 Err(QueryError::Shutdown)
     653              :             }
     654              :         )
     655            0 :     }
     656              : 
     657            0 :     async fn pagestream_read_message<IO>(
     658            0 :         pgb: &mut PostgresBackendReader<IO>,
     659            0 :         tenant_id: TenantId,
     660            0 :         timeline_id: TimelineId,
     661            0 :         timeline_handles: &mut TimelineHandles,
     662            0 :         cancel: &CancellationToken,
     663            0 :         ctx: &RequestContext,
     664            0 :         parent_span: Span,
     665            0 :     ) -> Result<Option<BatchedFeMessage>, QueryError>
     666            0 :     where
     667            0 :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
     668            0 :     {
     669            0 :         let msg = tokio::select! {
     670              :             biased;
     671            0 :             _ = cancel.cancelled() => {
     672            0 :                 return Err(QueryError::Shutdown)
     673              :             }
     674            0 :             msg = pgb.read_message() => { msg }
     675            0 :         };
     676            0 : 
     677            0 :         let received_at = Instant::now();
     678              : 
     679            0 :         let copy_data_bytes = match msg? {
     680            0 :             Some(FeMessage::CopyData(bytes)) => bytes,
     681              :             Some(FeMessage::Terminate) => {
     682            0 :                 return Ok(None);
     683              :             }
     684            0 :             Some(m) => {
     685            0 :                 return Err(QueryError::Other(anyhow::anyhow!(
     686            0 :                     "unexpected message: {m:?} during COPY"
     687            0 :                 )));
     688              :             }
     689              :             None => {
     690            0 :                 return Ok(None);
     691              :             } // client disconnected
     692              :         };
     693            0 :         trace!("query: {copy_data_bytes:?}");
     694              : 
     695            0 :         fail::fail_point!("ps::handle-pagerequest-message");
     696              : 
     697              :         // parse request
     698            0 :         let neon_fe_msg = PagestreamFeMessage::parse(&mut copy_data_bytes.reader())?;
     699              : 
     700            0 :         let batched_msg = match neon_fe_msg {
     701            0 :             PagestreamFeMessage::Exists(req) => {
     702            0 :                 let span = tracing::info_span!(parent: parent_span, "handle_get_rel_exists_request", rel = %req.rel, req_lsn = %req.request_lsn);
     703            0 :                 let shard = timeline_handles
     704            0 :                     .get(tenant_id, timeline_id, ShardSelector::Zero)
     705            0 :                     .instrument(span.clone()) // sets `shard_id` field
     706            0 :                     .await?;
     707            0 :                 let timer = shard
     708            0 :                     .query_metrics
     709            0 :                     .start_smgr_op(metrics::SmgrQueryType::GetRelExists, received_at);
     710            0 :                 BatchedFeMessage::Exists {
     711            0 :                     span,
     712            0 :                     timer,
     713            0 :                     shard,
     714            0 :                     req,
     715            0 :                 }
     716              :             }
     717            0 :             PagestreamFeMessage::Nblocks(req) => {
     718            0 :                 let span = tracing::info_span!(parent: parent_span, "handle_get_nblocks_request", rel = %req.rel, req_lsn = %req.request_lsn);
     719            0 :                 let shard = timeline_handles
     720            0 :                     .get(tenant_id, timeline_id, ShardSelector::Zero)
     721            0 :                     .instrument(span.clone()) // sets `shard_id` field
     722            0 :                     .await?;
     723            0 :                 let timer = shard
     724            0 :                     .query_metrics
     725            0 :                     .start_smgr_op(metrics::SmgrQueryType::GetRelSize, received_at);
     726            0 :                 BatchedFeMessage::Nblocks {
     727            0 :                     span,
     728            0 :                     timer,
     729            0 :                     shard,
     730            0 :                     req,
     731            0 :                 }
     732              :             }
     733            0 :             PagestreamFeMessage::DbSize(req) => {
     734            0 :                 let span = tracing::info_span!(parent: parent_span, "handle_db_size_request", dbnode = %req.dbnode, req_lsn = %req.request_lsn);
     735            0 :                 let shard = timeline_handles
     736            0 :                     .get(tenant_id, timeline_id, ShardSelector::Zero)
     737            0 :                     .instrument(span.clone()) // sets `shard_id` field
     738            0 :                     .await?;
     739            0 :                 let timer = shard
     740            0 :                     .query_metrics
     741            0 :                     .start_smgr_op(metrics::SmgrQueryType::GetDbSize, received_at);
     742            0 :                 BatchedFeMessage::DbSize {
     743            0 :                     span,
     744            0 :                     timer,
     745            0 :                     shard,
     746            0 :                     req,
     747            0 :                 }
     748              :             }
     749            0 :             PagestreamFeMessage::GetSlruSegment(req) => {
     750            0 :                 let span = tracing::info_span!(parent: parent_span, "handle_get_slru_segment_request", kind = %req.kind, segno = %req.segno, req_lsn = %req.request_lsn);
     751            0 :                 let shard = timeline_handles
     752            0 :                     .get(tenant_id, timeline_id, ShardSelector::Zero)
     753            0 :                     .instrument(span.clone()) // sets `shard_id` field
     754            0 :                     .await?;
     755            0 :                 let timer = shard
     756            0 :                     .query_metrics
     757            0 :                     .start_smgr_op(metrics::SmgrQueryType::GetSlruSegment, received_at);
     758            0 :                 BatchedFeMessage::GetSlruSegment {
     759            0 :                     span,
     760            0 :                     timer,
     761            0 :                     shard,
     762            0 :                     req,
     763            0 :                 }
     764              :             }
     765              :             PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
     766            0 :                 request_lsn,
     767            0 :                 not_modified_since,
     768            0 :                 rel,
     769            0 :                 blkno,
     770              :             }) => {
     771            0 :                 let span = tracing::info_span!(parent: parent_span, "handle_get_page_at_lsn_request_batched", req_lsn = %request_lsn);
     772              : 
     773              :                 macro_rules! respond_error {
     774              :                     ($error:expr) => {{
     775              :                         let error = BatchedFeMessage::RespondError {
     776              :                             span,
     777              :                             error: $error,
     778              :                         };
     779              :                         Ok(Some(error))
     780              :                     }};
     781              :                 }
     782              : 
     783            0 :                 let key = rel_block_to_key(rel, blkno);
     784            0 :                 let shard = match timeline_handles
     785            0 :                     .get(tenant_id, timeline_id, ShardSelector::Page(key))
     786            0 :                     .instrument(span.clone()) // sets `shard_id` field
     787            0 :                     .await
     788              :                 {
     789            0 :                     Ok(tl) => tl,
     790              :                     Err(GetActiveTimelineError::Tenant(GetActiveTenantError::NotFound(_))) => {
     791              :                         // We already know this tenant exists in general, because we resolved it at
     792              :                         // start of connection.  Getting a NotFound here indicates that the shard containing
     793              :                         // the requested page is not present on this node: the client's knowledge of shard->pageserver
     794              :                         // mapping is out of date.
     795              :                         //
     796              :                         // Closing the connection by returning ``::Reconnect` has the side effect of rate-limiting above message, via
     797              :                         // client's reconnect backoff, as well as hopefully prompting the client to load its updated configuration
     798              :                         // and talk to a different pageserver.
     799            0 :                         return respond_error!(PageStreamError::Reconnect(
     800            0 :                             "getpage@lsn request routed to wrong shard".into()
     801            0 :                         ));
     802              :                     }
     803            0 :                     Err(e) => {
     804            0 :                         return respond_error!(e.into());
     805              :                     }
     806              :                 };
     807              : 
     808              :                 // It's important to start the timer before waiting for the LSN
     809              :                 // so that the _started counters are incremented before we do
     810              :                 // any serious waiting, e.g., for LSNs.
     811            0 :                 let timer = shard
     812            0 :                     .query_metrics
     813            0 :                     .start_smgr_op(metrics::SmgrQueryType::GetPageAtLsn, received_at);
     814              : 
     815            0 :                 let effective_request_lsn = match Self::wait_or_get_last_lsn(
     816            0 :                     &shard,
     817            0 :                     request_lsn,
     818            0 :                     not_modified_since,
     819            0 :                     &shard.get_latest_gc_cutoff_lsn(),
     820            0 :                     ctx,
     821            0 :                 )
     822            0 :                 // TODO: if we actually need to wait for lsn here, it delays the entire batch which doesn't need to wait
     823            0 :                 .await
     824              :                 {
     825            0 :                     Ok(lsn) => lsn,
     826            0 :                     Err(e) => {
     827            0 :                         return respond_error!(e);
     828              :                     }
     829              :                 };
     830              :                 BatchedFeMessage::GetPage {
     831            0 :                     span,
     832            0 :                     shard,
     833            0 :                     effective_request_lsn,
     834            0 :                     pages: smallvec::smallvec![(rel, blkno, timer)],
     835              :                 }
     836              :             }
     837              :         };
     838            0 :         Ok(Some(batched_msg))
     839            0 :     }
     840              : 
     841              :     /// Post-condition: `batch` is Some()
     842            0 :     #[instrument(skip_all, level = tracing::Level::TRACE)]
     843              :     #[allow(clippy::boxed_local)]
     844              :     fn pagestream_do_batch(
     845              :         max_batch_size: NonZeroUsize,
     846              :         batch: &mut Result<BatchedFeMessage, QueryError>,
     847              :         this_msg: Result<BatchedFeMessage, QueryError>,
     848              :     ) -> Result<(), Result<BatchedFeMessage, QueryError>> {
     849              :         debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
     850              : 
     851              :         let this_msg = match this_msg {
     852              :             Ok(this_msg) => this_msg,
     853              :             Err(e) => return Err(Err(e)),
     854              :         };
     855              : 
     856              :         match (&mut *batch, this_msg) {
     857              :             // something batched already, let's see if we can add this message to the batch
     858              :             (
     859              :                 Ok(BatchedFeMessage::GetPage {
     860              :                     span: _,
     861              :                     shard: accum_shard,
     862              :                     pages: ref mut accum_pages,
     863              :                     effective_request_lsn: accum_lsn,
     864              :                 }),
     865              :                 BatchedFeMessage::GetPage {
     866              :                     span: _,
     867              :                     shard: this_shard,
     868              :                     pages: this_pages,
     869              :                     effective_request_lsn: this_lsn,
     870              :                 },
     871            0 :             ) if (|| {
     872            0 :                 assert_eq!(this_pages.len(), 1);
     873            0 :                 if accum_pages.len() >= max_batch_size.get() {
     874            0 :                     trace!(%accum_lsn, %this_lsn, %max_batch_size, "stopping batching because of batch size");
     875            0 :                     assert_eq!(accum_pages.len(), max_batch_size.get());
     876            0 :                     return false;
     877            0 :                 }
     878            0 :                 if (accum_shard.tenant_shard_id, accum_shard.timeline_id)
     879            0 :                     != (this_shard.tenant_shard_id, this_shard.timeline_id)
     880              :                 {
     881            0 :                     trace!(%accum_lsn, %this_lsn, "stopping batching because timeline object mismatch");
     882              :                     // TODO: we _could_ batch & execute each shard seperately (and in parallel).
     883              :                     // But the current logic for keeping responses in order does not support that.
     884            0 :                     return false;
     885            0 :                 }
     886            0 :                 // the vectored get currently only supports a single LSN, so, bounce as soon
     887            0 :                 // as the effective request_lsn changes
     888            0 :                 if *accum_lsn != this_lsn {
     889            0 :                     trace!(%accum_lsn, %this_lsn, "stopping batching because LSN changed");
     890            0 :                     return false;
     891            0 :                 }
     892            0 :                 true
     893              :             })() =>
     894              :             {
     895              :                 // ok to batch
     896              :                 accum_pages.extend(this_pages);
     897              :                 Ok(())
     898              :             }
     899              :             // something batched already but this message is unbatchable
     900              :             (_, this_msg) => {
     901              :                 // by default, don't continue batching
     902              :                 Err(Ok(this_msg))
     903              :             }
     904              :         }
     905              :     }
     906              : 
     907            0 :     #[instrument(level = tracing::Level::DEBUG, skip_all)]
     908              :     async fn pagesteam_handle_batched_message<IO>(
     909              :         &mut self,
     910              :         pgb_writer: &mut PostgresBackend<IO>,
     911              :         batch: BatchedFeMessage,
     912              :         cancel: &CancellationToken,
     913              :         ctx: &RequestContext,
     914              :     ) -> Result<(), QueryError>
     915              :     where
     916              :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
     917              :     {
     918              :         // invoke handler function
     919              :         let (handler_results, span): (
     920              :             Vec<Result<(PagestreamBeMessage, SmgrOpTimer), PageStreamError>>,
     921              :             _,
     922              :         ) = match batch {
     923              :             BatchedFeMessage::Exists {
     924              :                 span,
     925              :                 timer,
     926              :                 shard,
     927              :                 req,
     928              :             } => {
     929              :                 fail::fail_point!("ps::handle-pagerequest-message::exists");
     930              :                 (
     931              :                     vec![self
     932              :                         .handle_get_rel_exists_request(&shard, &req, ctx)
     933              :                         .instrument(span.clone())
     934              :                         .await
     935            0 :                         .map(|msg| (msg, timer))],
     936              :                     span,
     937              :                 )
     938              :             }
     939              :             BatchedFeMessage::Nblocks {
     940              :                 span,
     941              :                 timer,
     942              :                 shard,
     943              :                 req,
     944              :             } => {
     945              :                 fail::fail_point!("ps::handle-pagerequest-message::nblocks");
     946              :                 (
     947              :                     vec![self
     948              :                         .handle_get_nblocks_request(&shard, &req, ctx)
     949              :                         .instrument(span.clone())
     950              :                         .await
     951            0 :                         .map(|msg| (msg, timer))],
     952              :                     span,
     953              :                 )
     954              :             }
     955              :             BatchedFeMessage::GetPage {
     956              :                 span,
     957              :                 shard,
     958              :                 effective_request_lsn,
     959              :                 pages,
     960              :             } => {
     961              :                 fail::fail_point!("ps::handle-pagerequest-message::getpage");
     962              :                 (
     963              :                     {
     964              :                         let npages = pages.len();
     965              :                         trace!(npages, "handling getpage request");
     966              :                         let res = self
     967              :                             .handle_get_page_at_lsn_request_batched(
     968              :                                 &shard,
     969              :                                 effective_request_lsn,
     970              :                                 pages,
     971              :                                 ctx,
     972              :                             )
     973              :                             .instrument(span.clone())
     974              :                             .await;
     975              :                         assert_eq!(res.len(), npages);
     976              :                         res
     977              :                     },
     978              :                     span,
     979              :                 )
     980              :             }
     981              :             BatchedFeMessage::DbSize {
     982              :                 span,
     983              :                 timer,
     984              :                 shard,
     985              :                 req,
     986              :             } => {
     987              :                 fail::fail_point!("ps::handle-pagerequest-message::dbsize");
     988              :                 (
     989              :                     vec![self
     990              :                         .handle_db_size_request(&shard, &req, ctx)
     991              :                         .instrument(span.clone())
     992              :                         .await
     993            0 :                         .map(|msg| (msg, timer))],
     994              :                     span,
     995              :                 )
     996              :             }
     997              :             BatchedFeMessage::GetSlruSegment {
     998              :                 span,
     999              :                 timer,
    1000              :                 shard,
    1001              :                 req,
    1002              :             } => {
    1003              :                 fail::fail_point!("ps::handle-pagerequest-message::slrusegment");
    1004              :                 (
    1005              :                     vec![self
    1006              :                         .handle_get_slru_segment_request(&shard, &req, ctx)
    1007              :                         .instrument(span.clone())
    1008              :                         .await
    1009            0 :                         .map(|msg| (msg, timer))],
    1010              :                     span,
    1011              :                 )
    1012              :             }
    1013              :             BatchedFeMessage::RespondError { span, error } => {
    1014              :                 // We've already decided to respond with an error, so we don't need to
    1015              :                 // call the handler.
    1016              :                 (vec![Err(error)], span)
    1017              :             }
    1018              :         };
    1019              : 
    1020              :         // Map handler result to protocol behavior.
    1021              :         // Some handler errors cause exit from pagestream protocol.
    1022              :         // Other handler errors are sent back as an error message and we stay in pagestream protocol.
    1023              :         for handler_result in handler_results {
    1024              :             let (response_msg, timer) = match handler_result {
    1025              :                 Err(e) => match &e {
    1026              :                     PageStreamError::Shutdown => {
    1027              :                         // If we fail to fulfil a request during shutdown, which may be _because_ of
    1028              :                         // shutdown, then do not send the error to the client.  Instead just drop the
    1029              :                         // connection.
    1030            0 :                         span.in_scope(|| info!("dropping connection due to shutdown"));
    1031              :                         return Err(QueryError::Shutdown);
    1032              :                     }
    1033              :                     PageStreamError::Reconnect(reason) => {
    1034            0 :                         span.in_scope(|| info!("handler requested reconnect: {reason}"));
    1035              :                         return Err(QueryError::Reconnect);
    1036              :                     }
    1037              :                     PageStreamError::Read(_)
    1038              :                     | PageStreamError::LsnTimeout(_)
    1039              :                     | PageStreamError::NotFound(_)
    1040              :                     | PageStreamError::BadRequest(_) => {
    1041              :                         // print the all details to the log with {:#}, but for the client the
    1042              :                         // error message is enough.  Do not log if shutting down, as the anyhow::Error
    1043              :                         // here includes cancellation which is not an error.
    1044              :                         let full = utils::error::report_compact_sources(&e);
    1045            0 :                         span.in_scope(|| {
    1046            0 :                             error!("error reading relation or page version: {full:#}")
    1047            0 :                         });
    1048              :                         (
    1049              :                             PagestreamBeMessage::Error(PagestreamErrorResponse {
    1050              :                                 message: e.to_string(),
    1051              :                             }),
    1052              :                             None, // TODO: measure errors
    1053              :                         )
    1054              :                     }
    1055              :                 },
    1056              :                 Ok((response_msg, timer)) => (response_msg, Some(timer)),
    1057              :             };
    1058              : 
    1059              :             //
    1060              :             // marshal & transmit response message
    1061              :             //
    1062              : 
    1063              :             pgb_writer.write_message_noflush(&BeMessage::CopyData(&response_msg.serialize()))?;
    1064              : 
    1065              :             // We purposefully don't count flush time into the timer.
    1066              :             //
    1067              :             // The reason is that current compute client will not perform protocol processing
    1068              :             // if the postgres backend process is doing things other than `->smgr_read()`.
    1069              :             // This is especially the case for prefetch.
    1070              :             //
    1071              :             // If the compute doesn't read from the connection, eventually TCP will backpressure
    1072              :             // all the way into our flush call below.
    1073              :             //
    1074              :             // The timer's underlying metric is used for a storage-internal latency SLO and
    1075              :             // we don't want to include latency in it that we can't control.
    1076              :             // And as pointed out above, in this case, we don't control the time that flush will take.
    1077              :             let flushing_timer =
    1078            0 :                 timer.map(|timer| timer.observe_smgr_op_completion_and_start_flushing());
    1079              : 
    1080              :             // what we want to do
    1081              :             let flush_fut = pgb_writer.flush();
    1082              :             // metric for how long flushing takes
    1083              :             let flush_fut = match flushing_timer {
    1084              :                 Some(flushing_timer) => {
    1085              :                     futures::future::Either::Left(flushing_timer.measure(flush_fut))
    1086              :                 }
    1087              :                 None => futures::future::Either::Right(flush_fut),
    1088              :             };
    1089              :             // do it while respecting cancellation
    1090            0 :             let _: () = async move {
    1091            0 :                 tokio::select! {
    1092              :                     biased;
    1093            0 :                     _ = cancel.cancelled() => {
    1094              :                         // We were requested to shut down.
    1095            0 :                         info!("shutdown request received in page handler");
    1096            0 :                         return Err(QueryError::Shutdown)
    1097              :                     }
    1098            0 :                     res = flush_fut => {
    1099            0 :                         res?;
    1100              :                     }
    1101              :                 }
    1102            0 :                 Ok(())
    1103            0 :             }
    1104              :             // and log the info! line inside the request span
    1105              :             .instrument(span.clone())
    1106              :             .await?;
    1107              :         }
    1108              :         Ok(())
    1109              :     }
    1110              : 
    1111              :     /// Pagestream sub-protocol handler.
    1112              :     ///
    1113              :     /// It is a simple request-response protocol inside a COPYBOTH session.
    1114              :     ///
    1115              :     /// # Coding Discipline
    1116              :     ///
    1117              :     /// Coding discipline within this function: all interaction with the `pgb` connection
    1118              :     /// needs to be sensitive to connection shutdown, currently signalled via [`Self::cancel`].
    1119              :     /// This is so that we can shutdown page_service quickly.
    1120            0 :     #[instrument(skip_all)]
    1121              :     async fn handle_pagerequests<IO>(
    1122              :         &mut self,
    1123              :         pgb: &mut PostgresBackend<IO>,
    1124              :         tenant_id: TenantId,
    1125              :         timeline_id: TimelineId,
    1126              :         _protocol_version: PagestreamProtocolVersion,
    1127              :         ctx: RequestContext,
    1128              :     ) -> Result<(), QueryError>
    1129              :     where
    1130              :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
    1131              :     {
    1132              :         debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
    1133              : 
    1134              :         // switch client to COPYBOTH
    1135              :         pgb.write_message_noflush(&BeMessage::CopyBothResponse)?;
    1136              :         tokio::select! {
    1137              :             biased;
    1138              :             _ = self.cancel.cancelled() => {
    1139              :                 return Err(QueryError::Shutdown)
    1140              :             }
    1141              :             res = pgb.flush() => {
    1142              :                 res?;
    1143              :             }
    1144              :         }
    1145              : 
    1146              :         let pgb_reader = pgb
    1147              :             .split()
    1148              :             .context("implementation error: split pgb into reader and writer")?;
    1149              : 
    1150              :         let timeline_handles = self
    1151              :             .timeline_handles
    1152              :             .take()
    1153              :             .expect("implementation error: timeline_handles should not be locked");
    1154              : 
    1155              :         let request_span = info_span!("request", shard_id = tracing::field::Empty);
    1156              :         let ((pgb_reader, timeline_handles), result) = match self.pipelining_config.clone() {
    1157              :             PageServicePipeliningConfig::Pipelined(pipelining_config) => {
    1158              :                 self.handle_pagerequests_pipelined(
    1159              :                     pgb,
    1160              :                     pgb_reader,
    1161              :                     tenant_id,
    1162              :                     timeline_id,
    1163              :                     timeline_handles,
    1164              :                     request_span,
    1165              :                     pipelining_config,
    1166              :                     &ctx,
    1167              :                 )
    1168              :                 .await
    1169              :             }
    1170              :             PageServicePipeliningConfig::Serial => {
    1171              :                 self.handle_pagerequests_serial(
    1172              :                     pgb,
    1173              :                     pgb_reader,
    1174              :                     tenant_id,
    1175              :                     timeline_id,
    1176              :                     timeline_handles,
    1177              :                     request_span,
    1178              :                     &ctx,
    1179              :                 )
    1180              :                 .await
    1181              :             }
    1182              :         };
    1183              : 
    1184              :         debug!("pagestream subprotocol shut down cleanly");
    1185              : 
    1186              :         pgb.unsplit(pgb_reader)
    1187              :             .context("implementation error: unsplit pgb")?;
    1188              : 
    1189              :         let replaced = self.timeline_handles.replace(timeline_handles);
    1190              :         assert!(replaced.is_none());
    1191              : 
    1192              :         result
    1193              :     }
    1194              : 
    1195              :     #[allow(clippy::too_many_arguments)]
    1196            0 :     async fn handle_pagerequests_serial<IO>(
    1197            0 :         &mut self,
    1198            0 :         pgb_writer: &mut PostgresBackend<IO>,
    1199            0 :         mut pgb_reader: PostgresBackendReader<IO>,
    1200            0 :         tenant_id: TenantId,
    1201            0 :         timeline_id: TimelineId,
    1202            0 :         mut timeline_handles: TimelineHandles,
    1203            0 :         request_span: Span,
    1204            0 :         ctx: &RequestContext,
    1205            0 :     ) -> (
    1206            0 :         (PostgresBackendReader<IO>, TimelineHandles),
    1207            0 :         Result<(), QueryError>,
    1208            0 :     )
    1209            0 :     where
    1210            0 :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
    1211            0 :     {
    1212            0 :         let cancel = self.cancel.clone();
    1213            0 :         let err = loop {
    1214            0 :             let msg = Self::pagestream_read_message(
    1215            0 :                 &mut pgb_reader,
    1216            0 :                 tenant_id,
    1217            0 :                 timeline_id,
    1218            0 :                 &mut timeline_handles,
    1219            0 :                 &cancel,
    1220            0 :                 ctx,
    1221            0 :                 request_span.clone(),
    1222            0 :             )
    1223            0 :             .await;
    1224            0 :             let msg = match msg {
    1225            0 :                 Ok(msg) => msg,
    1226            0 :                 Err(e) => break e,
    1227              :             };
    1228            0 :             let mut msg = match msg {
    1229            0 :                 Some(msg) => msg,
    1230              :                 None => {
    1231            0 :                     debug!("pagestream subprotocol end observed");
    1232            0 :                     return ((pgb_reader, timeline_handles), Ok(()));
    1233              :                 }
    1234              :             };
    1235              : 
    1236            0 :             if let Err(cancelled) = msg.throttle_and_record_start_processing(&self.cancel).await {
    1237            0 :                 break cancelled;
    1238            0 :             }
    1239              : 
    1240            0 :             let err = self
    1241            0 :                 .pagesteam_handle_batched_message(pgb_writer, msg, &cancel, ctx)
    1242            0 :                 .await;
    1243            0 :             match err {
    1244            0 :                 Ok(()) => {}
    1245            0 :                 Err(e) => break e,
    1246              :             }
    1247              :         };
    1248            0 :         ((pgb_reader, timeline_handles), Err(err))
    1249            0 :     }
    1250              : 
    1251              :     /// # Cancel-Safety
    1252              :     ///
    1253              :     /// May leak tokio tasks if not polled to completion.
    1254              :     #[allow(clippy::too_many_arguments)]
    1255            0 :     async fn handle_pagerequests_pipelined<IO>(
    1256            0 :         &mut self,
    1257            0 :         pgb_writer: &mut PostgresBackend<IO>,
    1258            0 :         pgb_reader: PostgresBackendReader<IO>,
    1259            0 :         tenant_id: TenantId,
    1260            0 :         timeline_id: TimelineId,
    1261            0 :         mut timeline_handles: TimelineHandles,
    1262            0 :         request_span: Span,
    1263            0 :         pipelining_config: PageServicePipeliningConfigPipelined,
    1264            0 :         ctx: &RequestContext,
    1265            0 :     ) -> (
    1266            0 :         (PostgresBackendReader<IO>, TimelineHandles),
    1267            0 :         Result<(), QueryError>,
    1268            0 :     )
    1269            0 :     where
    1270            0 :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
    1271            0 :     {
    1272            0 :         //
    1273            0 :         // Pipelined pagestream handling consists of
    1274            0 :         // - a Batcher that reads requests off the wire and
    1275            0 :         //   and batches them if possible,
    1276            0 :         // - an Executor that processes the batched requests.
    1277            0 :         //
    1278            0 :         // The batch is built up inside an `spsc_fold` channel,
    1279            0 :         // shared betwen Batcher (Sender) and Executor (Receiver).
    1280            0 :         //
    1281            0 :         // The Batcher continously folds client requests into the batch,
    1282            0 :         // while the Executor can at any time take out what's in the batch
    1283            0 :         // in order to process it.
    1284            0 :         // This means the next batch builds up while the Executor
    1285            0 :         // executes the last batch.
    1286            0 :         //
    1287            0 :         // CANCELLATION
    1288            0 :         //
    1289            0 :         // We run both Batcher and Executor futures to completion before
    1290            0 :         // returning from this function.
    1291            0 :         //
    1292            0 :         // If Executor exits first, it signals cancellation to the Batcher
    1293            0 :         // via a CancellationToken that is child of `self.cancel`.
    1294            0 :         // If Batcher exits first, it signals cancellation to the Executor
    1295            0 :         // by dropping the spsc_fold channel Sender.
    1296            0 :         //
    1297            0 :         // CLEAN SHUTDOWN
    1298            0 :         //
    1299            0 :         // Clean shutdown means that the client ends the COPYBOTH session.
    1300            0 :         // In response to such a client message, the Batcher exits.
    1301            0 :         // The Executor continues to run, draining the spsc_fold channel.
    1302            0 :         // Once drained, the spsc_fold recv will fail with a distinct error
    1303            0 :         // indicating that the sender disconnected.
    1304            0 :         // The Executor exits with Ok(()) in response to that error.
    1305            0 :         //
    1306            0 :         // Server initiated shutdown is not clean shutdown, but instead
    1307            0 :         // is an error Err(QueryError::Shutdown) that is propagated through
    1308            0 :         // error propagation.
    1309            0 :         //
    1310            0 :         // ERROR PROPAGATION
    1311            0 :         //
    1312            0 :         // When the Batcher encounter an error, it sends it as a value
    1313            0 :         // through the spsc_fold channel and exits afterwards.
    1314            0 :         // When the Executor observes such an error in the channel,
    1315            0 :         // it exits returning that error value.
    1316            0 :         //
    1317            0 :         // This design ensures that the Executor stage will still process
    1318            0 :         // the batch that was in flight when the Batcher encountered an error,
    1319            0 :         // thereby beahving identical to a serial implementation.
    1320            0 : 
    1321            0 :         let PageServicePipeliningConfigPipelined {
    1322            0 :             max_batch_size,
    1323            0 :             execution,
    1324            0 :         } = pipelining_config;
    1325              : 
    1326              :         // Macro to _define_ a pipeline stage.
    1327              :         macro_rules! pipeline_stage {
    1328              :             ($name:literal, $cancel:expr, $make_fut:expr) => {{
    1329              :                 let cancel: CancellationToken = $cancel;
    1330              :                 let stage_fut = $make_fut(cancel.clone());
    1331            0 :                 async move {
    1332            0 :                     scopeguard::defer! {
    1333            0 :                         debug!("exiting");
    1334            0 :                     }
    1335            0 :                     timed_after_cancellation(stage_fut, $name, Duration::from_millis(100), &cancel)
    1336            0 :                         .await
    1337            0 :                 }
    1338              :                 .instrument(tracing::info_span!($name))
    1339              :             }};
    1340              :         }
    1341              : 
    1342              :         //
    1343              :         // Batcher
    1344              :         //
    1345              : 
    1346            0 :         let cancel_batcher = self.cancel.child_token();
    1347            0 :         let (mut batch_tx, mut batch_rx) = spsc_fold::channel();
    1348            0 :         let batcher = pipeline_stage!("batcher", cancel_batcher.clone(), move |cancel_batcher| {
    1349            0 :             let ctx = ctx.attached_child();
    1350            0 :             async move {
    1351            0 :                 let mut pgb_reader = pgb_reader;
    1352            0 :                 let mut exit = false;
    1353            0 :                 while !exit {
    1354            0 :                     let read_res = Self::pagestream_read_message(
    1355            0 :                         &mut pgb_reader,
    1356            0 :                         tenant_id,
    1357            0 :                         timeline_id,
    1358            0 :                         &mut timeline_handles,
    1359            0 :                         &cancel_batcher,
    1360            0 :                         &ctx,
    1361            0 :                         request_span.clone(),
    1362            0 :                     )
    1363            0 :                     .await;
    1364            0 :                     let Some(read_res) = read_res.transpose() else {
    1365            0 :                         debug!("client-initiated shutdown");
    1366            0 :                         break;
    1367              :                     };
    1368            0 :                     exit |= read_res.is_err();
    1369            0 :                     let could_send = batch_tx
    1370            0 :                         .send(read_res, |batch, res| {
    1371            0 :                             Self::pagestream_do_batch(max_batch_size, batch, res)
    1372            0 :                         })
    1373            0 :                         .await;
    1374            0 :                     exit |= could_send.is_err();
    1375              :                 }
    1376            0 :                 (pgb_reader, timeline_handles)
    1377            0 :             }
    1378            0 :         });
    1379              : 
    1380              :         //
    1381              :         // Executor
    1382              :         //
    1383              : 
    1384            0 :         let executor = pipeline_stage!("executor", self.cancel.clone(), move |cancel| {
    1385            0 :             let ctx = ctx.attached_child();
    1386            0 :             async move {
    1387            0 :                 let _cancel_batcher = cancel_batcher.drop_guard();
    1388              :                 loop {
    1389            0 :                     let maybe_batch = batch_rx.recv().await;
    1390            0 :                     let batch = match maybe_batch {
    1391            0 :                         Ok(batch) => batch,
    1392              :                         Err(spsc_fold::RecvError::SenderGone) => {
    1393            0 :                             debug!("upstream gone");
    1394            0 :                             return Ok(());
    1395              :                         }
    1396              :                     };
    1397            0 :                     let mut batch = match batch {
    1398            0 :                         Ok(batch) => batch,
    1399            0 :                         Err(e) => {
    1400            0 :                             return Err(e);
    1401              :                         }
    1402              :                     };
    1403            0 :                     batch
    1404            0 :                         .throttle_and_record_start_processing(&self.cancel)
    1405            0 :                         .await?;
    1406            0 :                     self.pagesteam_handle_batched_message(pgb_writer, batch, &cancel, &ctx)
    1407            0 :                         .await?;
    1408              :                 }
    1409            0 :             }
    1410            0 :         });
    1411              : 
    1412              :         //
    1413              :         // Execute the stages.
    1414              :         //
    1415              : 
    1416            0 :         match execution {
    1417              :             PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
    1418            0 :                 tokio::join!(batcher, executor)
    1419              :             }
    1420              :             PageServiceProtocolPipelinedExecutionStrategy::Tasks => {
    1421              :                 // These tasks are not tracked anywhere.
    1422            0 :                 let read_messages_task = tokio::spawn(batcher);
    1423            0 :                 let (read_messages_task_res, executor_res_) =
    1424            0 :                     tokio::join!(read_messages_task, executor,);
    1425            0 :                 (
    1426            0 :                     read_messages_task_res.expect("propagated panic from read_messages"),
    1427            0 :                     executor_res_,
    1428            0 :                 )
    1429              :             }
    1430              :         }
    1431            0 :     }
    1432              : 
    1433              :     /// Helper function to handle the LSN from client request.
    1434              :     ///
    1435              :     /// Each GetPage (and Exists and Nblocks) request includes information about
    1436              :     /// which version of the page is being requested. The primary compute node
    1437              :     /// will always request the latest page version, by setting 'request_lsn' to
    1438              :     /// the last inserted or flushed WAL position, while a standby will request
    1439              :     /// a version at the LSN that it's currently caught up to.
    1440              :     ///
    1441              :     /// In either case, if the page server hasn't received the WAL up to the
    1442              :     /// requested LSN yet, we will wait for it to arrive. The return value is
    1443              :     /// the LSN that should be used to look up the page versions.
    1444              :     ///
    1445              :     /// In addition to the request LSN, each request carries another LSN,
    1446              :     /// 'not_modified_since', which is a hint to the pageserver that the client
    1447              :     /// knows that the page has not been modified between 'not_modified_since'
    1448              :     /// and the request LSN. This allows skipping the wait, as long as the WAL
    1449              :     /// up to 'not_modified_since' has arrived. If the client doesn't have any
    1450              :     /// information about when the page was modified, it will use
    1451              :     /// not_modified_since == lsn. If the client lies and sends a too low
    1452              :     /// not_modified_hint such that there are in fact later page versions, the
    1453              :     /// behavior is undefined: the pageserver may return any of the page versions
    1454              :     /// or an error.
    1455            0 :     async fn wait_or_get_last_lsn(
    1456            0 :         timeline: &Timeline,
    1457            0 :         request_lsn: Lsn,
    1458            0 :         not_modified_since: Lsn,
    1459            0 :         latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
    1460            0 :         ctx: &RequestContext,
    1461            0 :     ) -> Result<Lsn, PageStreamError> {
    1462            0 :         let last_record_lsn = timeline.get_last_record_lsn();
    1463            0 : 
    1464            0 :         // Sanity check the request
    1465            0 :         if request_lsn < not_modified_since {
    1466            0 :             return Err(PageStreamError::BadRequest(
    1467            0 :                 format!(
    1468            0 :                     "invalid request with request LSN {} and not_modified_since {}",
    1469            0 :                     request_lsn, not_modified_since,
    1470            0 :                 )
    1471            0 :                 .into(),
    1472            0 :             ));
    1473            0 :         }
    1474            0 : 
    1475            0 :         // Check explicitly for INVALID just to get a less scary error message if the request is obviously bogus
    1476            0 :         if request_lsn == Lsn::INVALID {
    1477            0 :             return Err(PageStreamError::BadRequest(
    1478            0 :                 "invalid LSN(0) in request".into(),
    1479            0 :             ));
    1480            0 :         }
    1481            0 : 
    1482            0 :         // Clients should only read from recent LSNs on their timeline, or from locations holding an LSN lease.
    1483            0 :         //
    1484            0 :         // We may have older data available, but we make a best effort to detect this case and return an error,
    1485            0 :         // to distinguish a misbehaving client (asking for old LSN) from a storage issue (data missing at a legitimate LSN).
    1486            0 :         if request_lsn < **latest_gc_cutoff_lsn && !timeline.is_gc_blocked_by_lsn_lease_deadline() {
    1487            0 :             let gc_info = &timeline.gc_info.read().unwrap();
    1488            0 :             if !gc_info.leases.contains_key(&request_lsn) {
    1489            0 :                 return Err(
    1490            0 :                     PageStreamError::BadRequest(format!(
    1491            0 :                         "tried to request a page version that was garbage collected. requested at {} gc cutoff {}",
    1492            0 :                         request_lsn, **latest_gc_cutoff_lsn
    1493            0 :                     ).into())
    1494            0 :                 );
    1495            0 :             }
    1496            0 :         }
    1497              : 
    1498              :         // Wait for WAL up to 'not_modified_since' to arrive, if necessary
    1499            0 :         if not_modified_since > last_record_lsn {
    1500            0 :             timeline
    1501            0 :                 .wait_lsn(
    1502            0 :                     not_modified_since,
    1503            0 :                     crate::tenant::timeline::WaitLsnWaiter::PageService,
    1504            0 :                     ctx,
    1505            0 :                 )
    1506            0 :                 .await?;
    1507              :             // Since we waited for 'not_modified_since' to arrive, that is now the last
    1508              :             // record LSN. (Or close enough for our purposes; the last-record LSN can
    1509              :             // advance immediately after we return anyway)
    1510            0 :             Ok(not_modified_since)
    1511              :         } else {
    1512              :             // It might be better to use max(not_modified_since, latest_gc_cutoff_lsn)
    1513              :             // here instead. That would give the same result, since we know that there
    1514              :             // haven't been any modifications since 'not_modified_since'. Using an older
    1515              :             // LSN might be faster, because that could allow skipping recent layers when
    1516              :             // finding the page. However, we have historically used 'last_record_lsn', so
    1517              :             // stick to that for now.
    1518            0 :             Ok(std::cmp::min(last_record_lsn, request_lsn))
    1519              :         }
    1520            0 :     }
    1521              : 
    1522              :     /// Handles the lsn lease request.
    1523              :     /// If a lease cannot be obtained, the client will receive NULL.
    1524            0 :     #[instrument(skip_all, fields(shard_id, %lsn))]
    1525              :     async fn handle_make_lsn_lease<IO>(
    1526              :         &mut self,
    1527              :         pgb: &mut PostgresBackend<IO>,
    1528              :         tenant_shard_id: TenantShardId,
    1529              :         timeline_id: TimelineId,
    1530              :         lsn: Lsn,
    1531              :         ctx: &RequestContext,
    1532              :     ) -> Result<(), QueryError>
    1533              :     where
    1534              :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
    1535              :     {
    1536              :         let timeline = self
    1537              :             .timeline_handles
    1538              :             .as_mut()
    1539              :             .unwrap()
    1540              :             .get(
    1541              :                 tenant_shard_id.tenant_id,
    1542              :                 timeline_id,
    1543              :                 ShardSelector::Known(tenant_shard_id.to_index()),
    1544              :             )
    1545              :             .await?;
    1546              :         set_tracing_field_shard_id(&timeline);
    1547              : 
    1548              :         let lease = timeline
    1549              :             .renew_lsn_lease(lsn, timeline.get_lsn_lease_length(), ctx)
    1550            0 :             .inspect_err(|e| {
    1551            0 :                 warn!("{e}");
    1552            0 :             })
    1553              :             .ok();
    1554            0 :         let valid_until_str = lease.map(|l| {
    1555            0 :             l.valid_until
    1556            0 :                 .duration_since(SystemTime::UNIX_EPOCH)
    1557            0 :                 .expect("valid_until is earlier than UNIX_EPOCH")
    1558            0 :                 .as_millis()
    1559            0 :                 .to_string()
    1560            0 :         });
    1561            0 :         let bytes = valid_until_str.as_ref().map(|x| x.as_bytes());
    1562              : 
    1563              :         pgb.write_message_noflush(&BeMessage::RowDescription(&[RowDescriptor::text_col(
    1564              :             b"valid_until",
    1565              :         )]))?
    1566              :         .write_message_noflush(&BeMessage::DataRow(&[bytes]))?;
    1567              : 
    1568              :         Ok(())
    1569              :     }
    1570              : 
    1571            0 :     #[instrument(skip_all, fields(shard_id))]
    1572              :     async fn handle_get_rel_exists_request(
    1573              :         &mut self,
    1574              :         timeline: &Timeline,
    1575              :         req: &PagestreamExistsRequest,
    1576              :         ctx: &RequestContext,
    1577              :     ) -> Result<PagestreamBeMessage, PageStreamError> {
    1578              :         let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
    1579              :         let lsn = Self::wait_or_get_last_lsn(
    1580              :             timeline,
    1581              :             req.request_lsn,
    1582              :             req.not_modified_since,
    1583              :             &latest_gc_cutoff_lsn,
    1584              :             ctx,
    1585              :         )
    1586              :         .await?;
    1587              : 
    1588              :         let exists = timeline
    1589              :             .get_rel_exists(req.rel, Version::Lsn(lsn), ctx)
    1590              :             .await?;
    1591              : 
    1592              :         Ok(PagestreamBeMessage::Exists(PagestreamExistsResponse {
    1593              :             exists,
    1594              :         }))
    1595              :     }
    1596              : 
    1597            0 :     #[instrument(skip_all, fields(shard_id))]
    1598              :     async fn handle_get_nblocks_request(
    1599              :         &mut self,
    1600              :         timeline: &Timeline,
    1601              :         req: &PagestreamNblocksRequest,
    1602              :         ctx: &RequestContext,
    1603              :     ) -> Result<PagestreamBeMessage, PageStreamError> {
    1604              :         let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
    1605              :         let lsn = Self::wait_or_get_last_lsn(
    1606              :             timeline,
    1607              :             req.request_lsn,
    1608              :             req.not_modified_since,
    1609              :             &latest_gc_cutoff_lsn,
    1610              :             ctx,
    1611              :         )
    1612              :         .await?;
    1613              : 
    1614              :         let n_blocks = timeline
    1615              :             .get_rel_size(req.rel, Version::Lsn(lsn), ctx)
    1616              :             .await?;
    1617              : 
    1618              :         Ok(PagestreamBeMessage::Nblocks(PagestreamNblocksResponse {
    1619              :             n_blocks,
    1620              :         }))
    1621              :     }
    1622              : 
    1623            0 :     #[instrument(skip_all, fields(shard_id))]
    1624              :     async fn handle_db_size_request(
    1625              :         &mut self,
    1626              :         timeline: &Timeline,
    1627              :         req: &PagestreamDbSizeRequest,
    1628              :         ctx: &RequestContext,
    1629              :     ) -> Result<PagestreamBeMessage, PageStreamError> {
    1630              :         let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
    1631              :         let lsn = Self::wait_or_get_last_lsn(
    1632              :             timeline,
    1633              :             req.request_lsn,
    1634              :             req.not_modified_since,
    1635              :             &latest_gc_cutoff_lsn,
    1636              :             ctx,
    1637              :         )
    1638              :         .await?;
    1639              : 
    1640              :         let total_blocks = timeline
    1641              :             .get_db_size(DEFAULTTABLESPACE_OID, req.dbnode, Version::Lsn(lsn), ctx)
    1642              :             .await?;
    1643              :         let db_size = total_blocks as i64 * BLCKSZ as i64;
    1644              : 
    1645              :         Ok(PagestreamBeMessage::DbSize(PagestreamDbSizeResponse {
    1646              :             db_size,
    1647              :         }))
    1648              :     }
    1649              : 
    1650            0 :     #[instrument(skip_all)]
    1651              :     async fn handle_get_page_at_lsn_request_batched(
    1652              :         &mut self,
    1653              :         timeline: &Timeline,
    1654              :         effective_lsn: Lsn,
    1655              :         requests: smallvec::SmallVec<[(RelTag, BlockNumber, SmgrOpTimer); 1]>,
    1656              :         ctx: &RequestContext,
    1657              :     ) -> Vec<Result<(PagestreamBeMessage, SmgrOpTimer), PageStreamError>> {
    1658              :         debug_assert_current_span_has_tenant_and_timeline_id();
    1659              : 
    1660              :         timeline
    1661              :             .query_metrics
    1662              :             .observe_getpage_batch_start(requests.len());
    1663              : 
    1664              :         let results = timeline
    1665              :             .get_rel_page_at_lsn_batched(
    1666            0 :                 requests.iter().map(|(reltag, blkno, _)| (reltag, blkno)),
    1667              :                 effective_lsn,
    1668              :                 ctx,
    1669              :             )
    1670              :             .await;
    1671              :         assert_eq!(results.len(), requests.len());
    1672              : 
    1673              :         // TODO: avoid creating the new Vec here
    1674              :         Vec::from_iter(
    1675              :             requests
    1676              :                 .into_iter()
    1677              :                 .zip(results.into_iter())
    1678            0 :                 .map(|((_, _, timer), res)| {
    1679            0 :                     res.map(|page| {
    1680            0 :                         (
    1681            0 :                             PagestreamBeMessage::GetPage(models::PagestreamGetPageResponse {
    1682            0 :                                 page,
    1683            0 :                             }),
    1684            0 :                             timer,
    1685            0 :                         )
    1686            0 :                     })
    1687            0 :                     .map_err(PageStreamError::from)
    1688            0 :                 }),
    1689              :         )
    1690              :     }
    1691              : 
    1692            0 :     #[instrument(skip_all, fields(shard_id))]
    1693              :     async fn handle_get_slru_segment_request(
    1694              :         &mut self,
    1695              :         timeline: &Timeline,
    1696              :         req: &PagestreamGetSlruSegmentRequest,
    1697              :         ctx: &RequestContext,
    1698              :     ) -> Result<PagestreamBeMessage, PageStreamError> {
    1699              :         let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
    1700              :         let lsn = Self::wait_or_get_last_lsn(
    1701              :             timeline,
    1702              :             req.request_lsn,
    1703              :             req.not_modified_since,
    1704              :             &latest_gc_cutoff_lsn,
    1705              :             ctx,
    1706              :         )
    1707              :         .await?;
    1708              : 
    1709              :         let kind = SlruKind::from_repr(req.kind)
    1710              :             .ok_or(PageStreamError::BadRequest("invalid SLRU kind".into()))?;
    1711              :         let segment = timeline.get_slru_segment(kind, req.segno, lsn, ctx).await?;
    1712              : 
    1713              :         Ok(PagestreamBeMessage::GetSlruSegment(
    1714              :             PagestreamGetSlruSegmentResponse { segment },
    1715              :         ))
    1716              :     }
    1717              : 
    1718              :     /// Note on "fullbackup":
    1719              :     /// Full basebackups should only be used for debugging purposes.
    1720              :     /// Originally, it was introduced to enable breaking storage format changes,
    1721              :     /// but that is not applicable anymore.
    1722              :     ///
    1723              :     /// # Coding Discipline
    1724              :     ///
    1725              :     /// Coding discipline within this function: all interaction with the `pgb` connection
    1726              :     /// needs to be sensitive to connection shutdown, currently signalled via [`Self::cancel`].
    1727              :     /// This is so that we can shutdown page_service quickly.
    1728              :     ///
    1729              :     /// TODO: wrap the pgb that we pass to the basebackup handler so that it's sensitive
    1730              :     /// to connection cancellation.
    1731              :     #[allow(clippy::too_many_arguments)]
    1732            0 :     #[instrument(skip_all, fields(shard_id, ?lsn, ?prev_lsn, %full_backup))]
    1733              :     async fn handle_basebackup_request<IO>(
    1734              :         &mut self,
    1735              :         pgb: &mut PostgresBackend<IO>,
    1736              :         tenant_id: TenantId,
    1737              :         timeline_id: TimelineId,
    1738              :         lsn: Option<Lsn>,
    1739              :         prev_lsn: Option<Lsn>,
    1740              :         full_backup: bool,
    1741              :         gzip: bool,
    1742              :         replica: bool,
    1743              :         ctx: &RequestContext,
    1744              :     ) -> Result<(), QueryError>
    1745              :     where
    1746              :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
    1747              :     {
    1748            0 :         fn map_basebackup_error(err: BasebackupError) -> QueryError {
    1749            0 :             match err {
    1750            0 :                 BasebackupError::Client(e) => QueryError::Disconnected(ConnectionError::Io(e)),
    1751            0 :                 BasebackupError::Server(e) => QueryError::Other(e),
    1752              :             }
    1753            0 :         }
    1754              : 
    1755              :         let started = std::time::Instant::now();
    1756              : 
    1757              :         let timeline = self
    1758              :             .timeline_handles
    1759              :             .as_mut()
    1760              :             .unwrap()
    1761              :             .get(tenant_id, timeline_id, ShardSelector::Zero)
    1762              :             .await?;
    1763              : 
    1764              :         let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
    1765              :         if let Some(lsn) = lsn {
    1766              :             // Backup was requested at a particular LSN. Wait for it to arrive.
    1767              :             info!("waiting for {}", lsn);
    1768              :             timeline
    1769              :                 .wait_lsn(
    1770              :                     lsn,
    1771              :                     crate::tenant::timeline::WaitLsnWaiter::PageService,
    1772              :                     ctx,
    1773              :                 )
    1774              :                 .await?;
    1775              :             timeline
    1776              :                 .check_lsn_is_in_scope(lsn, &latest_gc_cutoff_lsn)
    1777              :                 .context("invalid basebackup lsn")?;
    1778              :         }
    1779              : 
    1780              :         let lsn_awaited_after = started.elapsed();
    1781              : 
    1782              :         // switch client to COPYOUT
    1783              :         pgb.write_message_noflush(&BeMessage::CopyOutResponse)
    1784              :             .map_err(QueryError::Disconnected)?;
    1785              :         self.flush_cancellable(pgb, &self.cancel).await?;
    1786              : 
    1787              :         // Send a tarball of the latest layer on the timeline. Compress if not
    1788              :         // fullbackup. TODO Compress in that case too (tests need to be updated)
    1789              :         if full_backup {
    1790              :             let mut writer = pgb.copyout_writer();
    1791              :             basebackup::send_basebackup_tarball(
    1792              :                 &mut writer,
    1793              :                 &timeline,
    1794              :                 lsn,
    1795              :                 prev_lsn,
    1796              :                 full_backup,
    1797              :                 replica,
    1798              :                 ctx,
    1799              :             )
    1800              :             .await
    1801              :             .map_err(map_basebackup_error)?;
    1802              :         } else {
    1803              :             let mut writer = BufWriter::new(pgb.copyout_writer());
    1804              :             if gzip {
    1805              :                 let mut encoder = GzipEncoder::with_quality(
    1806              :                     &mut writer,
    1807              :                     // NOTE using fast compression because it's on the critical path
    1808              :                     //      for compute startup. For an empty database, we get
    1809              :                     //      <100KB with this method. The Level::Best compression method
    1810              :                     //      gives us <20KB, but maybe we should add basebackup caching
    1811              :                     //      on compute shutdown first.
    1812              :                     async_compression::Level::Fastest,
    1813              :                 );
    1814              :                 basebackup::send_basebackup_tarball(
    1815              :                     &mut encoder,
    1816              :                     &timeline,
    1817              :                     lsn,
    1818              :                     prev_lsn,
    1819              :                     full_backup,
    1820              :                     replica,
    1821              :                     ctx,
    1822              :                 )
    1823              :                 .await
    1824              :                 .map_err(map_basebackup_error)?;
    1825              :                 // shutdown the encoder to ensure the gzip footer is written
    1826              :                 encoder
    1827              :                     .shutdown()
    1828              :                     .await
    1829            0 :                     .map_err(|e| QueryError::Disconnected(ConnectionError::Io(e)))?;
    1830              :             } else {
    1831              :                 basebackup::send_basebackup_tarball(
    1832              :                     &mut writer,
    1833              :                     &timeline,
    1834              :                     lsn,
    1835              :                     prev_lsn,
    1836              :                     full_backup,
    1837              :                     replica,
    1838              :                     ctx,
    1839              :                 )
    1840              :                 .await
    1841              :                 .map_err(map_basebackup_error)?;
    1842              :             }
    1843              :             writer
    1844              :                 .flush()
    1845              :                 .await
    1846            0 :                 .map_err(|e| map_basebackup_error(BasebackupError::Client(e)))?;
    1847              :         }
    1848              : 
    1849              :         pgb.write_message_noflush(&BeMessage::CopyDone)
    1850              :             .map_err(QueryError::Disconnected)?;
    1851              :         self.flush_cancellable(pgb, &timeline.cancel).await?;
    1852              : 
    1853              :         let basebackup_after = started
    1854              :             .elapsed()
    1855              :             .checked_sub(lsn_awaited_after)
    1856              :             .unwrap_or(Duration::ZERO);
    1857              : 
    1858              :         info!(
    1859              :             lsn_await_millis = lsn_awaited_after.as_millis(),
    1860              :             basebackup_millis = basebackup_after.as_millis(),
    1861              :             "basebackup complete"
    1862              :         );
    1863              : 
    1864              :         Ok(())
    1865              :     }
    1866              : 
    1867              :     // when accessing management api supply None as an argument
    1868              :     // when using to authorize tenant pass corresponding tenant id
    1869            0 :     fn check_permission(&self, tenant_id: Option<TenantId>) -> Result<(), QueryError> {
    1870            0 :         if self.auth.is_none() {
    1871              :             // auth is set to Trust, nothing to check so just return ok
    1872            0 :             return Ok(());
    1873            0 :         }
    1874            0 :         // auth is some, just checked above, when auth is some
    1875            0 :         // then claims are always present because of checks during connection init
    1876            0 :         // so this expect won't trigger
    1877            0 :         let claims = self
    1878            0 :             .claims
    1879            0 :             .as_ref()
    1880            0 :             .expect("claims presence already checked");
    1881            0 :         check_permission(claims, tenant_id).map_err(|e| QueryError::Unauthorized(e.0))
    1882            0 :     }
    1883              : }
    1884              : 
    1885              : /// `basebackup tenant timeline [lsn] [--gzip] [--replica]`
    1886              : #[derive(Debug, Clone, Eq, PartialEq)]
    1887              : struct BaseBackupCmd {
    1888              :     tenant_id: TenantId,
    1889              :     timeline_id: TimelineId,
    1890              :     lsn: Option<Lsn>,
    1891              :     gzip: bool,
    1892              :     replica: bool,
    1893              : }
    1894              : 
    1895              : /// `fullbackup tenant timeline [lsn] [prev_lsn]`
    1896              : #[derive(Debug, Clone, Eq, PartialEq)]
    1897              : struct FullBackupCmd {
    1898              :     tenant_id: TenantId,
    1899              :     timeline_id: TimelineId,
    1900              :     lsn: Option<Lsn>,
    1901              :     prev_lsn: Option<Lsn>,
    1902              : }
    1903              : 
    1904              : /// `pagestream_v2 tenant timeline`
    1905              : #[derive(Debug, Clone, Eq, PartialEq)]
    1906              : struct PageStreamCmd {
    1907              :     tenant_id: TenantId,
    1908              :     timeline_id: TimelineId,
    1909              : }
    1910              : 
    1911              : /// `lease lsn tenant timeline lsn`
    1912              : #[derive(Debug, Clone, Eq, PartialEq)]
    1913              : struct LeaseLsnCmd {
    1914              :     tenant_shard_id: TenantShardId,
    1915              :     timeline_id: TimelineId,
    1916              :     lsn: Lsn,
    1917              : }
    1918              : 
    1919              : #[derive(Debug, Clone, Eq, PartialEq)]
    1920              : enum PageServiceCmd {
    1921              :     Set,
    1922              :     PageStream(PageStreamCmd),
    1923              :     BaseBackup(BaseBackupCmd),
    1924              :     FullBackup(FullBackupCmd),
    1925              :     LeaseLsn(LeaseLsnCmd),
    1926              : }
    1927              : 
    1928              : impl PageStreamCmd {
    1929            6 :     fn parse(query: &str) -> anyhow::Result<Self> {
    1930            6 :         let parameters = query.split_whitespace().collect_vec();
    1931            6 :         if parameters.len() != 2 {
    1932            2 :             bail!(
    1933            2 :                 "invalid number of parameters for pagestream command: {}",
    1934            2 :                 query
    1935            2 :             );
    1936            4 :         }
    1937            4 :         let tenant_id = TenantId::from_str(parameters[0])
    1938            4 :             .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
    1939            2 :         let timeline_id = TimelineId::from_str(parameters[1])
    1940            2 :             .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
    1941            2 :         Ok(Self {
    1942            2 :             tenant_id,
    1943            2 :             timeline_id,
    1944            2 :         })
    1945            6 :     }
    1946              : }
    1947              : 
    1948              : impl FullBackupCmd {
    1949            4 :     fn parse(query: &str) -> anyhow::Result<Self> {
    1950            4 :         let parameters = query.split_whitespace().collect_vec();
    1951            4 :         if parameters.len() < 2 || parameters.len() > 4 {
    1952            0 :             bail!(
    1953            0 :                 "invalid number of parameters for basebackup command: {}",
    1954            0 :                 query
    1955            0 :             );
    1956            4 :         }
    1957            4 :         let tenant_id = TenantId::from_str(parameters[0])
    1958            4 :             .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
    1959            4 :         let timeline_id = TimelineId::from_str(parameters[1])
    1960            4 :             .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
    1961              :         // The caller is responsible for providing correct lsn and prev_lsn.
    1962            4 :         let lsn = if let Some(lsn_str) = parameters.get(2) {
    1963              :             Some(
    1964            2 :                 Lsn::from_str(lsn_str)
    1965            2 :                     .with_context(|| format!("Failed to parse Lsn from {lsn_str}"))?,
    1966              :             )
    1967              :         } else {
    1968            2 :             None
    1969              :         };
    1970            4 :         let prev_lsn = if let Some(prev_lsn_str) = parameters.get(3) {
    1971              :             Some(
    1972            2 :                 Lsn::from_str(prev_lsn_str)
    1973            2 :                     .with_context(|| format!("Failed to parse Lsn from {prev_lsn_str}"))?,
    1974              :             )
    1975              :         } else {
    1976            2 :             None
    1977              :         };
    1978            4 :         Ok(Self {
    1979            4 :             tenant_id,
    1980            4 :             timeline_id,
    1981            4 :             lsn,
    1982            4 :             prev_lsn,
    1983            4 :         })
    1984            4 :     }
    1985              : }
    1986              : 
    1987              : impl BaseBackupCmd {
    1988           18 :     fn parse(query: &str) -> anyhow::Result<Self> {
    1989           18 :         let parameters = query.split_whitespace().collect_vec();
    1990           18 :         if parameters.len() < 2 {
    1991            0 :             bail!(
    1992            0 :                 "invalid number of parameters for basebackup command: {}",
    1993            0 :                 query
    1994            0 :             );
    1995           18 :         }
    1996           18 :         let tenant_id = TenantId::from_str(parameters[0])
    1997           18 :             .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
    1998           18 :         let timeline_id = TimelineId::from_str(parameters[1])
    1999           18 :             .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
    2000              :         let lsn;
    2001              :         let flags_parse_from;
    2002           18 :         if let Some(maybe_lsn) = parameters.get(2) {
    2003           16 :             if *maybe_lsn == "latest" {
    2004            2 :                 lsn = None;
    2005            2 :                 flags_parse_from = 3;
    2006           14 :             } else if maybe_lsn.starts_with("--") {
    2007           10 :                 lsn = None;
    2008           10 :                 flags_parse_from = 2;
    2009           10 :             } else {
    2010              :                 lsn = Some(
    2011            4 :                     Lsn::from_str(maybe_lsn)
    2012            4 :                         .with_context(|| format!("Failed to parse lsn from {maybe_lsn}"))?,
    2013              :                 );
    2014            4 :                 flags_parse_from = 3;
    2015              :             }
    2016            2 :         } else {
    2017            2 :             lsn = None;
    2018            2 :             flags_parse_from = 2;
    2019            2 :         }
    2020              : 
    2021           18 :         let mut gzip = false;
    2022           18 :         let mut replica = false;
    2023              : 
    2024           22 :         for &param in &parameters[flags_parse_from..] {
    2025           22 :             match param {
    2026           22 :                 "--gzip" => {
    2027           14 :                     if gzip {
    2028            2 :                         bail!("duplicate parameter for basebackup command: {param}")
    2029           12 :                     }
    2030           12 :                     gzip = true
    2031              :                 }
    2032            8 :                 "--replica" => {
    2033            4 :                     if replica {
    2034            0 :                         bail!("duplicate parameter for basebackup command: {param}")
    2035            4 :                     }
    2036            4 :                     replica = true
    2037              :                 }
    2038            4 :                 _ => bail!("invalid parameter for basebackup command: {param}"),
    2039              :             }
    2040              :         }
    2041           12 :         Ok(Self {
    2042           12 :             tenant_id,
    2043           12 :             timeline_id,
    2044           12 :             lsn,
    2045           12 :             gzip,
    2046           12 :             replica,
    2047           12 :         })
    2048           18 :     }
    2049              : }
    2050              : 
    2051              : impl LeaseLsnCmd {
    2052            4 :     fn parse(query: &str) -> anyhow::Result<Self> {
    2053            4 :         let parameters = query.split_whitespace().collect_vec();
    2054            4 :         if parameters.len() != 3 {
    2055            0 :             bail!(
    2056            0 :                 "invalid number of parameters for lease lsn command: {}",
    2057            0 :                 query
    2058            0 :             );
    2059            4 :         }
    2060            4 :         let tenant_shard_id = TenantShardId::from_str(parameters[0])
    2061            4 :             .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
    2062            4 :         let timeline_id = TimelineId::from_str(parameters[1])
    2063            4 :             .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
    2064            4 :         let lsn = Lsn::from_str(parameters[2])
    2065            4 :             .with_context(|| format!("Failed to parse lsn from {}", parameters[2]))?;
    2066            4 :         Ok(Self {
    2067            4 :             tenant_shard_id,
    2068            4 :             timeline_id,
    2069            4 :             lsn,
    2070            4 :         })
    2071            4 :     }
    2072              : }
    2073              : 
    2074              : impl PageServiceCmd {
    2075           42 :     fn parse(query: &str) -> anyhow::Result<Self> {
    2076           42 :         let query = query.trim();
    2077           42 :         let Some((cmd, other)) = query.split_once(' ') else {
    2078            4 :             bail!("cannot parse query: {query}")
    2079              :         };
    2080           38 :         match cmd.to_ascii_lowercase().as_str() {
    2081           38 :             "pagestream_v2" => Ok(Self::PageStream(PageStreamCmd::parse(other)?)),
    2082           32 :             "basebackup" => Ok(Self::BaseBackup(BaseBackupCmd::parse(other)?)),
    2083           14 :             "fullbackup" => Ok(Self::FullBackup(FullBackupCmd::parse(other)?)),
    2084           10 :             "lease" => {
    2085            6 :                 let Some((cmd2, other)) = other.split_once(' ') else {
    2086            0 :                     bail!("invalid lease command: {cmd}");
    2087              :                 };
    2088            6 :                 let cmd2 = cmd2.to_ascii_lowercase();
    2089            6 :                 if cmd2 == "lsn" {
    2090            4 :                     Ok(Self::LeaseLsn(LeaseLsnCmd::parse(other)?))
    2091              :                 } else {
    2092            2 :                     bail!("invalid lease command: {cmd}");
    2093              :                 }
    2094              :             }
    2095            4 :             "set" => Ok(Self::Set),
    2096            0 :             _ => Err(anyhow::anyhow!("unsupported command {cmd} in {query}")),
    2097              :         }
    2098           42 :     }
    2099              : }
    2100              : 
    2101              : impl<IO> postgres_backend::Handler<IO> for PageServerHandler
    2102              : where
    2103              :     IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
    2104              : {
    2105            0 :     fn check_auth_jwt(
    2106            0 :         &mut self,
    2107            0 :         _pgb: &mut PostgresBackend<IO>,
    2108            0 :         jwt_response: &[u8],
    2109            0 :     ) -> Result<(), QueryError> {
    2110              :         // this unwrap is never triggered, because check_auth_jwt only called when auth_type is NeonJWT
    2111              :         // which requires auth to be present
    2112            0 :         let data = self
    2113            0 :             .auth
    2114            0 :             .as_ref()
    2115            0 :             .unwrap()
    2116            0 :             .decode(str::from_utf8(jwt_response).context("jwt response is not UTF-8")?)
    2117            0 :             .map_err(|e| QueryError::Unauthorized(e.0))?;
    2118              : 
    2119            0 :         if matches!(data.claims.scope, Scope::Tenant) && data.claims.tenant_id.is_none() {
    2120            0 :             return Err(QueryError::Unauthorized(
    2121            0 :                 "jwt token scope is Tenant, but tenant id is missing".into(),
    2122            0 :             ));
    2123            0 :         }
    2124            0 : 
    2125            0 :         debug!(
    2126            0 :             "jwt scope check succeeded for scope: {:#?} by tenant id: {:?}",
    2127              :             data.claims.scope, data.claims.tenant_id,
    2128              :         );
    2129              : 
    2130            0 :         self.claims = Some(data.claims);
    2131            0 :         Ok(())
    2132            0 :     }
    2133              : 
    2134            0 :     fn startup(
    2135            0 :         &mut self,
    2136            0 :         _pgb: &mut PostgresBackend<IO>,
    2137            0 :         _sm: &FeStartupPacket,
    2138            0 :     ) -> Result<(), QueryError> {
    2139            0 :         fail::fail_point!("ps::connection-start::startup-packet");
    2140            0 :         Ok(())
    2141            0 :     }
    2142              : 
    2143            0 :     #[instrument(skip_all, fields(tenant_id, timeline_id))]
    2144              :     async fn process_query(
    2145              :         &mut self,
    2146              :         pgb: &mut PostgresBackend<IO>,
    2147              :         query_string: &str,
    2148              :     ) -> Result<(), QueryError> {
    2149            0 :         fail::fail_point!("simulated-bad-compute-connection", |_| {
    2150            0 :             info!("Hit failpoint for bad connection");
    2151            0 :             Err(QueryError::SimulatedConnectionError)
    2152            0 :         });
    2153              : 
    2154              :         fail::fail_point!("ps::connection-start::process-query");
    2155              : 
    2156              :         let ctx = self.connection_ctx.attached_child();
    2157              :         debug!("process query {query_string}");
    2158              :         let query = PageServiceCmd::parse(query_string)?;
    2159              :         match query {
    2160              :             PageServiceCmd::PageStream(PageStreamCmd {
    2161              :                 tenant_id,
    2162              :                 timeline_id,
    2163              :             }) => {
    2164              :                 tracing::Span::current()
    2165              :                     .record("tenant_id", field::display(tenant_id))
    2166              :                     .record("timeline_id", field::display(timeline_id));
    2167              : 
    2168              :                 self.check_permission(Some(tenant_id))?;
    2169              : 
    2170              :                 COMPUTE_COMMANDS_COUNTERS
    2171              :                     .for_command(ComputeCommandKind::PageStreamV2)
    2172              :                     .inc();
    2173              : 
    2174              :                 self.handle_pagerequests(
    2175              :                     pgb,
    2176              :                     tenant_id,
    2177              :                     timeline_id,
    2178              :                     PagestreamProtocolVersion::V2,
    2179              :                     ctx,
    2180              :                 )
    2181              :                 .await?;
    2182              :             }
    2183              :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2184              :                 tenant_id,
    2185              :                 timeline_id,
    2186              :                 lsn,
    2187              :                 gzip,
    2188              :                 replica,
    2189              :             }) => {
    2190              :                 tracing::Span::current()
    2191              :                     .record("tenant_id", field::display(tenant_id))
    2192              :                     .record("timeline_id", field::display(timeline_id));
    2193              : 
    2194              :                 self.check_permission(Some(tenant_id))?;
    2195              : 
    2196              :                 COMPUTE_COMMANDS_COUNTERS
    2197              :                     .for_command(ComputeCommandKind::Basebackup)
    2198              :                     .inc();
    2199              :                 let metric_recording = metrics::BASEBACKUP_QUERY_TIME.start_recording();
    2200            0 :                 let res = async {
    2201            0 :                     self.handle_basebackup_request(
    2202            0 :                         pgb,
    2203            0 :                         tenant_id,
    2204            0 :                         timeline_id,
    2205            0 :                         lsn,
    2206            0 :                         None,
    2207            0 :                         false,
    2208            0 :                         gzip,
    2209            0 :                         replica,
    2210            0 :                         &ctx,
    2211            0 :                     )
    2212            0 :                     .await?;
    2213            0 :                     pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
    2214            0 :                     Result::<(), QueryError>::Ok(())
    2215            0 :                 }
    2216              :                 .await;
    2217              :                 metric_recording.observe(&res);
    2218              :                 res?;
    2219              :             }
    2220              :             // same as basebackup, but result includes relational data as well
    2221              :             PageServiceCmd::FullBackup(FullBackupCmd {
    2222              :                 tenant_id,
    2223              :                 timeline_id,
    2224              :                 lsn,
    2225              :                 prev_lsn,
    2226              :             }) => {
    2227              :                 tracing::Span::current()
    2228              :                     .record("tenant_id", field::display(tenant_id))
    2229              :                     .record("timeline_id", field::display(timeline_id));
    2230              : 
    2231              :                 self.check_permission(Some(tenant_id))?;
    2232              : 
    2233              :                 COMPUTE_COMMANDS_COUNTERS
    2234              :                     .for_command(ComputeCommandKind::Fullbackup)
    2235              :                     .inc();
    2236              : 
    2237              :                 // Check that the timeline exists
    2238              :                 self.handle_basebackup_request(
    2239              :                     pgb,
    2240              :                     tenant_id,
    2241              :                     timeline_id,
    2242              :                     lsn,
    2243              :                     prev_lsn,
    2244              :                     true,
    2245              :                     false,
    2246              :                     false,
    2247              :                     &ctx,
    2248              :                 )
    2249              :                 .await?;
    2250              :                 pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
    2251              :             }
    2252              :             PageServiceCmd::Set => {
    2253              :                 // important because psycopg2 executes "SET datestyle TO 'ISO'"
    2254              :                 // on connect
    2255              :                 pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
    2256              :             }
    2257              :             PageServiceCmd::LeaseLsn(LeaseLsnCmd {
    2258              :                 tenant_shard_id,
    2259              :                 timeline_id,
    2260              :                 lsn,
    2261              :             }) => {
    2262              :                 tracing::Span::current()
    2263              :                     .record("tenant_id", field::display(tenant_shard_id))
    2264              :                     .record("timeline_id", field::display(timeline_id));
    2265              : 
    2266              :                 self.check_permission(Some(tenant_shard_id.tenant_id))?;
    2267              : 
    2268              :                 COMPUTE_COMMANDS_COUNTERS
    2269              :                     .for_command(ComputeCommandKind::LeaseLsn)
    2270              :                     .inc();
    2271              : 
    2272              :                 match self
    2273              :                     .handle_make_lsn_lease(pgb, tenant_shard_id, timeline_id, lsn, &ctx)
    2274              :                     .await
    2275              :                 {
    2276              :                     Ok(()) => {
    2277              :                         pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?
    2278              :                     }
    2279              :                     Err(e) => {
    2280              :                         error!("error obtaining lsn lease for {lsn}: {e:?}");
    2281              :                         pgb.write_message_noflush(&BeMessage::ErrorResponse(
    2282              :                             &e.to_string(),
    2283              :                             Some(e.pg_error_code()),
    2284              :                         ))?
    2285              :                     }
    2286              :                 };
    2287              :             }
    2288              :         }
    2289              : 
    2290              :         Ok(())
    2291              :     }
    2292              : }
    2293              : 
    2294              : impl From<GetActiveTenantError> for QueryError {
    2295            0 :     fn from(e: GetActiveTenantError) -> Self {
    2296            0 :         match e {
    2297            0 :             GetActiveTenantError::WaitForActiveTimeout { .. } => QueryError::Disconnected(
    2298            0 :                 ConnectionError::Io(io::Error::new(io::ErrorKind::TimedOut, e.to_string())),
    2299            0 :             ),
    2300              :             GetActiveTenantError::Cancelled
    2301              :             | GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => {
    2302            0 :                 QueryError::Shutdown
    2303              :             }
    2304            0 :             e @ GetActiveTenantError::NotFound(_) => QueryError::NotFound(format!("{e}").into()),
    2305            0 :             e => QueryError::Other(anyhow::anyhow!(e)),
    2306              :         }
    2307            0 :     }
    2308              : }
    2309              : 
    2310              : #[derive(Debug, thiserror::Error)]
    2311              : pub(crate) enum GetActiveTimelineError {
    2312              :     #[error(transparent)]
    2313              :     Tenant(GetActiveTenantError),
    2314              :     #[error(transparent)]
    2315              :     Timeline(#[from] GetTimelineError),
    2316              : }
    2317              : 
    2318              : impl From<GetActiveTimelineError> for QueryError {
    2319            0 :     fn from(e: GetActiveTimelineError) -> Self {
    2320            0 :         match e {
    2321            0 :             GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled) => QueryError::Shutdown,
    2322            0 :             GetActiveTimelineError::Tenant(e) => e.into(),
    2323            0 :             GetActiveTimelineError::Timeline(e) => QueryError::NotFound(format!("{e}").into()),
    2324              :         }
    2325            0 :     }
    2326              : }
    2327              : 
    2328            0 : fn set_tracing_field_shard_id(timeline: &Timeline) {
    2329            0 :     debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
    2330            0 :     tracing::Span::current().record(
    2331            0 :         "shard_id",
    2332            0 :         tracing::field::display(timeline.tenant_shard_id.shard_slug()),
    2333            0 :     );
    2334            0 :     debug_assert_current_span_has_tenant_and_timeline_id();
    2335            0 : }
    2336              : 
    2337              : struct WaitedForLsn(Lsn);
    2338              : impl From<WaitedForLsn> for Lsn {
    2339            0 :     fn from(WaitedForLsn(lsn): WaitedForLsn) -> Self {
    2340            0 :         lsn
    2341            0 :     }
    2342              : }
    2343              : 
    2344              : #[cfg(test)]
    2345              : mod tests {
    2346              :     use utils::shard::ShardCount;
    2347              : 
    2348              :     use super::*;
    2349              : 
    2350              :     #[test]
    2351            2 :     fn pageservice_cmd_parse() {
    2352            2 :         let tenant_id = TenantId::generate();
    2353            2 :         let timeline_id = TimelineId::generate();
    2354            2 :         let cmd =
    2355            2 :             PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id} {timeline_id}")).unwrap();
    2356            2 :         assert_eq!(
    2357            2 :             cmd,
    2358            2 :             PageServiceCmd::PageStream(PageStreamCmd {
    2359            2 :                 tenant_id,
    2360            2 :                 timeline_id
    2361            2 :             })
    2362            2 :         );
    2363            2 :         let cmd = PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id}")).unwrap();
    2364            2 :         assert_eq!(
    2365            2 :             cmd,
    2366            2 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2367            2 :                 tenant_id,
    2368            2 :                 timeline_id,
    2369            2 :                 lsn: None,
    2370            2 :                 gzip: false,
    2371            2 :                 replica: false
    2372            2 :             })
    2373            2 :         );
    2374            2 :         let cmd =
    2375            2 :             PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} --gzip")).unwrap();
    2376            2 :         assert_eq!(
    2377            2 :             cmd,
    2378            2 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2379            2 :                 tenant_id,
    2380            2 :                 timeline_id,
    2381            2 :                 lsn: None,
    2382            2 :                 gzip: true,
    2383            2 :                 replica: false
    2384            2 :             })
    2385            2 :         );
    2386            2 :         let cmd =
    2387            2 :             PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} latest")).unwrap();
    2388            2 :         assert_eq!(
    2389            2 :             cmd,
    2390            2 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2391            2 :                 tenant_id,
    2392            2 :                 timeline_id,
    2393            2 :                 lsn: None,
    2394            2 :                 gzip: false,
    2395            2 :                 replica: false
    2396            2 :             })
    2397            2 :         );
    2398            2 :         let cmd = PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} 0/16ABCDE"))
    2399            2 :             .unwrap();
    2400            2 :         assert_eq!(
    2401            2 :             cmd,
    2402            2 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2403            2 :                 tenant_id,
    2404            2 :                 timeline_id,
    2405            2 :                 lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
    2406            2 :                 gzip: false,
    2407            2 :                 replica: false
    2408            2 :             })
    2409            2 :         );
    2410            2 :         let cmd = PageServiceCmd::parse(&format!(
    2411            2 :             "basebackup {tenant_id} {timeline_id} --replica --gzip"
    2412            2 :         ))
    2413            2 :         .unwrap();
    2414            2 :         assert_eq!(
    2415            2 :             cmd,
    2416            2 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2417            2 :                 tenant_id,
    2418            2 :                 timeline_id,
    2419            2 :                 lsn: None,
    2420            2 :                 gzip: true,
    2421            2 :                 replica: true
    2422            2 :             })
    2423            2 :         );
    2424            2 :         let cmd = PageServiceCmd::parse(&format!(
    2425            2 :             "basebackup {tenant_id} {timeline_id} 0/16ABCDE --replica --gzip"
    2426            2 :         ))
    2427            2 :         .unwrap();
    2428            2 :         assert_eq!(
    2429            2 :             cmd,
    2430            2 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2431            2 :                 tenant_id,
    2432            2 :                 timeline_id,
    2433            2 :                 lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
    2434            2 :                 gzip: true,
    2435            2 :                 replica: true
    2436            2 :             })
    2437            2 :         );
    2438            2 :         let cmd = PageServiceCmd::parse(&format!("fullbackup {tenant_id} {timeline_id}")).unwrap();
    2439            2 :         assert_eq!(
    2440            2 :             cmd,
    2441            2 :             PageServiceCmd::FullBackup(FullBackupCmd {
    2442            2 :                 tenant_id,
    2443            2 :                 timeline_id,
    2444            2 :                 lsn: None,
    2445            2 :                 prev_lsn: None
    2446            2 :             })
    2447            2 :         );
    2448            2 :         let cmd = PageServiceCmd::parse(&format!(
    2449            2 :             "fullbackup {tenant_id} {timeline_id} 0/16ABCDE 0/16ABCDF"
    2450            2 :         ))
    2451            2 :         .unwrap();
    2452            2 :         assert_eq!(
    2453            2 :             cmd,
    2454            2 :             PageServiceCmd::FullBackup(FullBackupCmd {
    2455            2 :                 tenant_id,
    2456            2 :                 timeline_id,
    2457            2 :                 lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
    2458            2 :                 prev_lsn: Some(Lsn::from_str("0/16ABCDF").unwrap()),
    2459            2 :             })
    2460            2 :         );
    2461            2 :         let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    2462            2 :         let cmd = PageServiceCmd::parse(&format!(
    2463            2 :             "lease lsn {tenant_shard_id} {timeline_id} 0/16ABCDE"
    2464            2 :         ))
    2465            2 :         .unwrap();
    2466            2 :         assert_eq!(
    2467            2 :             cmd,
    2468            2 :             PageServiceCmd::LeaseLsn(LeaseLsnCmd {
    2469            2 :                 tenant_shard_id,
    2470            2 :                 timeline_id,
    2471            2 :                 lsn: Lsn::from_str("0/16ABCDE").unwrap(),
    2472            2 :             })
    2473            2 :         );
    2474            2 :         let tenant_shard_id = TenantShardId::split(&tenant_shard_id, ShardCount(8))[1];
    2475            2 :         let cmd = PageServiceCmd::parse(&format!(
    2476            2 :             "lease lsn {tenant_shard_id} {timeline_id} 0/16ABCDE"
    2477            2 :         ))
    2478            2 :         .unwrap();
    2479            2 :         assert_eq!(
    2480            2 :             cmd,
    2481            2 :             PageServiceCmd::LeaseLsn(LeaseLsnCmd {
    2482            2 :                 tenant_shard_id,
    2483            2 :                 timeline_id,
    2484            2 :                 lsn: Lsn::from_str("0/16ABCDE").unwrap(),
    2485            2 :             })
    2486            2 :         );
    2487            2 :         let cmd = PageServiceCmd::parse("set a = b").unwrap();
    2488            2 :         assert_eq!(cmd, PageServiceCmd::Set);
    2489            2 :         let cmd = PageServiceCmd::parse("SET foo").unwrap();
    2490            2 :         assert_eq!(cmd, PageServiceCmd::Set);
    2491            2 :     }
    2492              : 
    2493              :     #[test]
    2494            2 :     fn pageservice_cmd_err_handling() {
    2495            2 :         let tenant_id = TenantId::generate();
    2496            2 :         let timeline_id = TimelineId::generate();
    2497            2 :         let cmd = PageServiceCmd::parse("unknown_command");
    2498            2 :         assert!(cmd.is_err());
    2499            2 :         let cmd = PageServiceCmd::parse("pagestream_v2");
    2500            2 :         assert!(cmd.is_err());
    2501            2 :         let cmd = PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id}xxx"));
    2502            2 :         assert!(cmd.is_err());
    2503            2 :         let cmd = PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id}xxx {timeline_id}xxx"));
    2504            2 :         assert!(cmd.is_err());
    2505            2 :         let cmd = PageServiceCmd::parse(&format!(
    2506            2 :             "basebackup {tenant_id} {timeline_id} --gzip --gzip"
    2507            2 :         ));
    2508            2 :         assert!(cmd.is_err());
    2509            2 :         let cmd = PageServiceCmd::parse(&format!(
    2510            2 :             "basebackup {tenant_id} {timeline_id} --gzip --unknown"
    2511            2 :         ));
    2512            2 :         assert!(cmd.is_err());
    2513            2 :         let cmd = PageServiceCmd::parse(&format!(
    2514            2 :             "basebackup {tenant_id} {timeline_id} --gzip 0/16ABCDE"
    2515            2 :         ));
    2516            2 :         assert!(cmd.is_err());
    2517            2 :         let cmd = PageServiceCmd::parse(&format!("lease {tenant_id} {timeline_id} gzip 0/16ABCDE"));
    2518            2 :         assert!(cmd.is_err());
    2519            2 :     }
    2520              : }
        

Generated by: LCOV version 2.1-beta