LCOV - code coverage report
Current view: top level - pageserver/src - page_service.rs (source / functions) Coverage Total Hit
Test: 1b0a6a0c05cee5a7de360813c8034804e105ce1c.info Lines: 21.5 % 1349 290
Test Date: 2025-03-12 00:01:28 Functions: 7.3 % 110 8

            Line data    Source code
       1              : //! The Page Service listens for client connections and serves their GetPage@LSN
       2              : //! requests.
       3              : 
       4              : use std::borrow::Cow;
       5              : use std::num::NonZeroUsize;
       6              : use std::os::fd::AsRawFd;
       7              : use std::str::FromStr;
       8              : use std::sync::Arc;
       9              : use std::time::{Duration, Instant, SystemTime};
      10              : use std::{io, str};
      11              : 
      12              : use anyhow::{Context, bail};
      13              : use async_compression::tokio::write::GzipEncoder;
      14              : use bytes::Buf;
      15              : use futures::FutureExt;
      16              : use itertools::Itertools;
      17              : use once_cell::sync::OnceCell;
      18              : use pageserver_api::config::{
      19              :     PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
      20              :     PageServiceProtocolPipelinedExecutionStrategy,
      21              : };
      22              : use pageserver_api::key::rel_block_to_key;
      23              : use pageserver_api::models::{
      24              :     self, PageTraceEvent, PagestreamBeMessage, PagestreamDbSizeRequest, PagestreamDbSizeResponse,
      25              :     PagestreamErrorResponse, PagestreamExistsRequest, PagestreamExistsResponse,
      26              :     PagestreamFeMessage, PagestreamGetPageRequest, PagestreamGetSlruSegmentRequest,
      27              :     PagestreamGetSlruSegmentResponse, PagestreamNblocksRequest, PagestreamNblocksResponse,
      28              :     PagestreamProtocolVersion, PagestreamRequest, TenantState,
      29              : };
      30              : use pageserver_api::reltag::SlruKind;
      31              : use pageserver_api::shard::TenantShardId;
      32              : use postgres_backend::{
      33              :     AuthType, PostgresBackend, PostgresBackendReader, QueryError, is_expected_io_error,
      34              : };
      35              : use postgres_ffi::BLCKSZ;
      36              : use postgres_ffi::pg_constants::DEFAULTTABLESPACE_OID;
      37              : use pq_proto::framed::ConnectionError;
      38              : use pq_proto::{BeMessage, FeMessage, FeStartupPacket, RowDescriptor};
      39              : use strum_macros::IntoStaticStr;
      40              : use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, BufWriter};
      41              : use tokio::task::JoinHandle;
      42              : use tokio_util::sync::CancellationToken;
      43              : use tracing::*;
      44              : use utils::auth::{Claims, Scope, SwappableJwtAuth};
      45              : use utils::failpoint_support;
      46              : use utils::id::{TenantId, TimelineId};
      47              : use utils::logging::log_slow;
      48              : use utils::lsn::Lsn;
      49              : use utils::simple_rcu::RcuReadGuard;
      50              : use utils::sync::gate::{Gate, GateGuard};
      51              : use utils::sync::spsc_fold;
      52              : 
      53              : use crate::auth::check_permission;
      54              : use crate::basebackup::BasebackupError;
      55              : use crate::config::PageServerConf;
      56              : use crate::context::{DownloadBehavior, RequestContext};
      57              : use crate::metrics::{
      58              :     self, COMPUTE_COMMANDS_COUNTERS, ComputeCommandKind, LIVE_CONNECTIONS, SmgrOpTimer,
      59              :     TimelineMetrics,
      60              : };
      61              : use crate::pgdatadir_mapping::Version;
      62              : use crate::span::{
      63              :     debug_assert_current_span_has_tenant_and_timeline_id,
      64              :     debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id,
      65              : };
      66              : use crate::task_mgr::{self, COMPUTE_REQUEST_RUNTIME, TaskKind};
      67              : use crate::tenant::mgr::{
      68              :     GetActiveTenantError, GetTenantError, ShardResolveResult, ShardSelector, TenantManager,
      69              : };
      70              : use crate::tenant::storage_layer::IoConcurrency;
      71              : use crate::tenant::timeline::{self, WaitLsnError};
      72              : use crate::tenant::{GetTimelineError, PageReconstructError, Timeline};
      73              : use crate::{basebackup, timed_after_cancellation};
      74              : 
      75              : /// How long we may wait for a [`crate::tenant::mgr::TenantSlot::InProgress`]` and/or a [`crate::tenant::Tenant`] which
      76              : /// is not yet in state [`TenantState::Active`].
      77              : ///
      78              : /// NB: this is a different value than [`crate::http::routes::ACTIVE_TENANT_TIMEOUT`].
      79              : const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);
      80              : 
      81              : /// Threshold at which to log slow GetPage requests.
      82              : const LOG_SLOW_GETPAGE_THRESHOLD: Duration = Duration::from_secs(30);
      83              : 
      84              : ///////////////////////////////////////////////////////////////////////////////
      85              : 
      86              : pub struct Listener {
      87              :     cancel: CancellationToken,
      88              :     /// Cancel the listener task through `listen_cancel` to shut down the listener
      89              :     /// and get a handle on the existing connections.
      90              :     task: JoinHandle<Connections>,
      91              : }
      92              : 
      93              : pub struct Connections {
      94              :     cancel: CancellationToken,
      95              :     tasks: tokio::task::JoinSet<ConnectionHandlerResult>,
      96              :     gate: Gate,
      97              : }
      98              : 
      99            0 : pub fn spawn(
     100            0 :     conf: &'static PageServerConf,
     101            0 :     tenant_manager: Arc<TenantManager>,
     102            0 :     pg_auth: Option<Arc<SwappableJwtAuth>>,
     103            0 :     tcp_listener: tokio::net::TcpListener,
     104            0 : ) -> Listener {
     105            0 :     let cancel = CancellationToken::new();
     106            0 :     let libpq_ctx = RequestContext::todo_child(
     107            0 :         TaskKind::LibpqEndpointListener,
     108            0 :         // listener task shouldn't need to download anything. (We will
     109            0 :         // create a separate sub-contexts for each connection, with their
     110            0 :         // own download behavior. This context is used only to listen and
     111            0 :         // accept connections.)
     112            0 :         DownloadBehavior::Error,
     113            0 :     );
     114            0 :     let task = COMPUTE_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
     115            0 :         "libpq listener",
     116            0 :         libpq_listener_main(
     117            0 :             conf,
     118            0 :             tenant_manager,
     119            0 :             pg_auth,
     120            0 :             tcp_listener,
     121            0 :             conf.pg_auth_type,
     122            0 :             conf.page_service_pipelining.clone(),
     123            0 :             libpq_ctx,
     124            0 :             cancel.clone(),
     125            0 :         )
     126            0 :         .map(anyhow::Ok),
     127            0 :     ));
     128            0 : 
     129            0 :     Listener { cancel, task }
     130            0 : }
     131              : 
     132              : impl Listener {
     133            0 :     pub async fn stop_accepting(self) -> Connections {
     134            0 :         self.cancel.cancel();
     135            0 :         self.task
     136            0 :             .await
     137            0 :             .expect("unreachable: we wrap the listener task in task_mgr::exit_on_panic_or_error")
     138            0 :     }
     139              : }
     140              : impl Connections {
     141            0 :     pub(crate) async fn shutdown(self) {
     142            0 :         let Self {
     143            0 :             cancel,
     144            0 :             mut tasks,
     145            0 :             gate,
     146            0 :         } = self;
     147            0 :         cancel.cancel();
     148            0 :         while let Some(res) = tasks.join_next().await {
     149            0 :             Self::handle_connection_completion(res);
     150            0 :         }
     151            0 :         gate.close().await;
     152            0 :     }
     153              : 
     154            0 :     fn handle_connection_completion(res: Result<anyhow::Result<()>, tokio::task::JoinError>) {
     155            0 :         match res {
     156            0 :             Ok(Ok(())) => {}
     157            0 :             Ok(Err(e)) => error!("error in page_service connection task: {:?}", e),
     158            0 :             Err(e) => error!("page_service connection task panicked: {:?}", e),
     159              :         }
     160            0 :     }
     161              : }
     162              : 
     163              : ///
     164              : /// Main loop of the page service.
     165              : ///
     166              : /// Listens for connections, and launches a new handler task for each.
     167              : ///
     168              : /// Returns Ok(()) upon cancellation via `cancel`, returning the set of
     169              : /// open connections.
     170              : ///
     171              : #[allow(clippy::too_many_arguments)]
     172            0 : pub async fn libpq_listener_main(
     173            0 :     conf: &'static PageServerConf,
     174            0 :     tenant_manager: Arc<TenantManager>,
     175            0 :     auth: Option<Arc<SwappableJwtAuth>>,
     176            0 :     listener: tokio::net::TcpListener,
     177            0 :     auth_type: AuthType,
     178            0 :     pipelining_config: PageServicePipeliningConfig,
     179            0 :     listener_ctx: RequestContext,
     180            0 :     listener_cancel: CancellationToken,
     181            0 : ) -> Connections {
     182            0 :     let connections_cancel = CancellationToken::new();
     183            0 :     let connections_gate = Gate::default();
     184            0 :     let mut connection_handler_tasks = tokio::task::JoinSet::default();
     185              : 
     186              :     loop {
     187            0 :         let gate_guard = match connections_gate.enter() {
     188            0 :             Ok(guard) => guard,
     189            0 :             Err(_) => break,
     190              :         };
     191              : 
     192            0 :         let accepted = tokio::select! {
     193              :             biased;
     194            0 :             _ = listener_cancel.cancelled() => break,
     195            0 :             next = connection_handler_tasks.join_next(), if !connection_handler_tasks.is_empty() => {
     196            0 :                 let res = next.expect("we dont poll while empty");
     197            0 :                 Connections::handle_connection_completion(res);
     198            0 :                 continue;
     199              :             }
     200            0 :             accepted = listener.accept() => accepted,
     201            0 :         };
     202            0 : 
     203            0 :         match accepted {
     204            0 :             Ok((socket, peer_addr)) => {
     205            0 :                 // Connection established. Spawn a new task to handle it.
     206            0 :                 debug!("accepted connection from {}", peer_addr);
     207            0 :                 let local_auth = auth.clone();
     208            0 :                 let connection_ctx = listener_ctx
     209            0 :                     .detached_child(TaskKind::PageRequestHandler, DownloadBehavior::Download);
     210            0 :                 connection_handler_tasks.spawn(page_service_conn_main(
     211            0 :                     conf,
     212            0 :                     tenant_manager.clone(),
     213            0 :                     local_auth,
     214            0 :                     socket,
     215            0 :                     auth_type,
     216            0 :                     pipelining_config.clone(),
     217            0 :                     connection_ctx,
     218            0 :                     connections_cancel.child_token(),
     219            0 :                     gate_guard,
     220            0 :                 ));
     221              :             }
     222            0 :             Err(err) => {
     223            0 :                 // accept() failed. Log the error, and loop back to retry on next connection.
     224            0 :                 error!("accept() failed: {:?}", err);
     225              :             }
     226              :         }
     227              :     }
     228              : 
     229            0 :     debug!("page_service listener loop terminated");
     230              : 
     231            0 :     Connections {
     232            0 :         cancel: connections_cancel,
     233            0 :         tasks: connection_handler_tasks,
     234            0 :         gate: connections_gate,
     235            0 :     }
     236            0 : }
     237              : 
     238              : type ConnectionHandlerResult = anyhow::Result<()>;
     239              : 
     240              : #[instrument(skip_all, fields(peer_addr, application_name))]
     241              : #[allow(clippy::too_many_arguments)]
     242              : async fn page_service_conn_main(
     243              :     conf: &'static PageServerConf,
     244              :     tenant_manager: Arc<TenantManager>,
     245              :     auth: Option<Arc<SwappableJwtAuth>>,
     246              :     socket: tokio::net::TcpStream,
     247              :     auth_type: AuthType,
     248              :     pipelining_config: PageServicePipeliningConfig,
     249              :     connection_ctx: RequestContext,
     250              :     cancel: CancellationToken,
     251              :     gate_guard: GateGuard,
     252              : ) -> ConnectionHandlerResult {
     253              :     let _guard = LIVE_CONNECTIONS
     254              :         .with_label_values(&["page_service"])
     255              :         .guard();
     256              : 
     257              :     socket
     258              :         .set_nodelay(true)
     259              :         .context("could not set TCP_NODELAY")?;
     260              : 
     261              :     let socket_fd = socket.as_raw_fd();
     262              : 
     263              :     let peer_addr = socket.peer_addr().context("get peer address")?;
     264              :     tracing::Span::current().record("peer_addr", field::display(peer_addr));
     265              : 
     266              :     // setup read timeout of 10 minutes. the timeout is rather arbitrary for requirements:
     267              :     // - long enough for most valid compute connections
     268              :     // - less than infinite to stop us from "leaking" connections to long-gone computes
     269              :     //
     270              :     // no write timeout is used, because the kernel is assumed to error writes after some time.
     271              :     let mut socket = tokio_io_timeout::TimeoutReader::new(socket);
     272              : 
     273              :     let default_timeout_ms = 10 * 60 * 1000; // 10 minutes by default
     274            0 :     let socket_timeout_ms = (|| {
     275            0 :         fail::fail_point!("simulated-bad-compute-connection", |avg_timeout_ms| {
     276              :             // Exponential distribution for simulating
     277              :             // poor network conditions, expect about avg_timeout_ms to be around 15
     278              :             // in tests
     279            0 :             if let Some(avg_timeout_ms) = avg_timeout_ms {
     280            0 :                 let avg = avg_timeout_ms.parse::<i64>().unwrap() as f32;
     281            0 :                 let u = rand::random::<f32>();
     282            0 :                 ((1.0 - u).ln() / (-avg)) as u64
     283              :             } else {
     284            0 :                 default_timeout_ms
     285              :             }
     286            0 :         });
     287            0 :         default_timeout_ms
     288              :     })();
     289              : 
     290              :     // A timeout here does not mean the client died, it can happen if it's just idle for
     291              :     // a while: we will tear down this PageServerHandler and instantiate a new one if/when
     292              :     // they reconnect.
     293              :     socket.set_timeout(Some(std::time::Duration::from_millis(socket_timeout_ms)));
     294              :     let socket = Box::pin(socket);
     295              : 
     296              :     fail::fail_point!("ps::connection-start::pre-login");
     297              : 
     298              :     // XXX: pgbackend.run() should take the connection_ctx,
     299              :     // and create a child per-query context when it invokes process_query.
     300              :     // But it's in a shared crate, so, we store connection_ctx inside PageServerHandler
     301              :     // and create the per-query context in process_query ourselves.
     302              :     let mut conn_handler = PageServerHandler::new(
     303              :         conf,
     304              :         tenant_manager,
     305              :         auth,
     306              :         pipelining_config,
     307              :         connection_ctx,
     308              :         cancel.clone(),
     309              :         gate_guard,
     310              :     );
     311              :     let pgbackend = PostgresBackend::new_from_io(socket_fd, socket, peer_addr, auth_type, None)?;
     312              : 
     313              :     match pgbackend.run(&mut conn_handler, &cancel).await {
     314              :         Ok(()) => {
     315              :             // we've been requested to shut down
     316              :             Ok(())
     317              :         }
     318              :         Err(QueryError::Disconnected(ConnectionError::Io(io_error))) => {
     319              :             if is_expected_io_error(&io_error) {
     320              :                 info!("Postgres client disconnected ({io_error})");
     321              :                 Ok(())
     322              :             } else {
     323              :                 let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id();
     324              :                 Err(io_error).context(format!(
     325              :                     "Postgres connection error for tenant_id={:?} client at peer_addr={}",
     326              :                     tenant_id, peer_addr
     327              :                 ))
     328              :             }
     329              :         }
     330              :         other => {
     331              :             let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id();
     332              :             other.context(format!(
     333              :                 "Postgres query error for tenant_id={:?} client peer_addr={}",
     334              :                 tenant_id, peer_addr
     335              :             ))
     336              :         }
     337              :     }
     338              : }
     339              : 
     340              : struct PageServerHandler {
     341              :     conf: &'static PageServerConf,
     342              :     auth: Option<Arc<SwappableJwtAuth>>,
     343              :     claims: Option<Claims>,
     344              : 
     345              :     /// The context created for the lifetime of the connection
     346              :     /// services by this PageServerHandler.
     347              :     /// For each query received over the connection,
     348              :     /// `process_query` creates a child context from this one.
     349              :     connection_ctx: RequestContext,
     350              : 
     351              :     cancel: CancellationToken,
     352              : 
     353              :     /// None only while pagestream protocol is being processed.
     354              :     timeline_handles: Option<TimelineHandles>,
     355              : 
     356              :     pipelining_config: PageServicePipeliningConfig,
     357              : 
     358              :     gate_guard: GateGuard,
     359              : }
     360              : 
     361              : struct TimelineHandles {
     362              :     wrapper: TenantManagerWrapper,
     363              :     /// Note on size: the typical size of this map is 1.  The largest size we expect
     364              :     /// to see is the number of shards divided by the number of pageservers (typically < 2),
     365              :     /// or the ratio used when splitting shards (i.e. how many children created from one)
     366              :     /// parent shard, where a "large" number might be ~8.
     367              :     handles: timeline::handle::Cache<TenantManagerTypes>,
     368              : }
     369              : 
     370              : impl TimelineHandles {
     371            0 :     fn new(tenant_manager: Arc<TenantManager>) -> Self {
     372            0 :         Self {
     373            0 :             wrapper: TenantManagerWrapper {
     374            0 :                 tenant_manager,
     375            0 :                 tenant_id: OnceCell::new(),
     376            0 :             },
     377            0 :             handles: Default::default(),
     378            0 :         }
     379            0 :     }
     380            0 :     async fn get(
     381            0 :         &mut self,
     382            0 :         tenant_id: TenantId,
     383            0 :         timeline_id: TimelineId,
     384            0 :         shard_selector: ShardSelector,
     385            0 :     ) -> Result<timeline::handle::Handle<TenantManagerTypes>, GetActiveTimelineError> {
     386            0 :         if *self.wrapper.tenant_id.get_or_init(|| tenant_id) != tenant_id {
     387            0 :             return Err(GetActiveTimelineError::Tenant(
     388            0 :                 GetActiveTenantError::SwitchedTenant,
     389            0 :             ));
     390            0 :         }
     391            0 :         self.handles
     392            0 :             .get(timeline_id, shard_selector, &self.wrapper)
     393            0 :             .await
     394            0 :             .map_err(|e| match e {
     395            0 :                 timeline::handle::GetError::TenantManager(e) => e,
     396              :                 timeline::handle::GetError::PerTimelineStateShutDown => {
     397            0 :                     trace!("per-timeline state shut down");
     398            0 :                     GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown)
     399              :                 }
     400            0 :             })
     401            0 :     }
     402              : 
     403            0 :     fn tenant_id(&self) -> Option<TenantId> {
     404            0 :         self.wrapper.tenant_id.get().copied()
     405            0 :     }
     406              : }
     407              : 
     408              : pub(crate) struct TenantManagerWrapper {
     409              :     tenant_manager: Arc<TenantManager>,
     410              :     // We do not support switching tenant_id on a connection at this point.
     411              :     // We can can add support for this later if needed without changing
     412              :     // the protocol.
     413              :     tenant_id: once_cell::sync::OnceCell<TenantId>,
     414              : }
     415              : 
     416              : #[derive(Debug)]
     417              : pub(crate) struct TenantManagerTypes;
     418              : 
     419              : impl timeline::handle::Types for TenantManagerTypes {
     420              :     type TenantManagerError = GetActiveTimelineError;
     421              :     type TenantManager = TenantManagerWrapper;
     422              :     type Timeline = TenantManagerCacheItem;
     423              : }
     424              : 
     425              : pub(crate) struct TenantManagerCacheItem {
     426              :     pub(crate) timeline: Arc<Timeline>,
     427              :     // allow() for cheap propagation through RequestContext inside a task
     428              :     #[allow(clippy::redundant_allocation)]
     429              :     pub(crate) metrics: Arc<Arc<TimelineMetrics>>,
     430              :     #[allow(dead_code)] // we store it to keep the gate open
     431              :     pub(crate) gate_guard: GateGuard,
     432              : }
     433              : 
     434              : impl std::ops::Deref for TenantManagerCacheItem {
     435              :     type Target = Arc<Timeline>;
     436            0 :     fn deref(&self) -> &Self::Target {
     437            0 :         &self.timeline
     438            0 :     }
     439              : }
     440              : 
     441              : impl timeline::handle::Timeline<TenantManagerTypes> for TenantManagerCacheItem {
     442            0 :     fn shard_timeline_id(&self) -> timeline::handle::ShardTimelineId {
     443            0 :         Timeline::shard_timeline_id(&self.timeline)
     444            0 :     }
     445              : 
     446            0 :     fn per_timeline_state(&self) -> &timeline::handle::PerTimelineState<TenantManagerTypes> {
     447            0 :         &self.timeline.handles
     448            0 :     }
     449              : 
     450            0 :     fn get_shard_identity(&self) -> &pageserver_api::shard::ShardIdentity {
     451            0 :         Timeline::get_shard_identity(&self.timeline)
     452            0 :     }
     453              : }
     454              : 
     455              : impl timeline::handle::TenantManager<TenantManagerTypes> for TenantManagerWrapper {
     456            0 :     async fn resolve(
     457            0 :         &self,
     458            0 :         timeline_id: TimelineId,
     459            0 :         shard_selector: ShardSelector,
     460            0 :     ) -> Result<TenantManagerCacheItem, GetActiveTimelineError> {
     461            0 :         let tenant_id = self.tenant_id.get().expect("we set this in get()");
     462            0 :         let timeout = ACTIVE_TENANT_TIMEOUT;
     463            0 :         let wait_start = Instant::now();
     464            0 :         let deadline = wait_start + timeout;
     465            0 :         let tenant_shard = loop {
     466            0 :             let resolved = self
     467            0 :                 .tenant_manager
     468            0 :                 .resolve_attached_shard(tenant_id, shard_selector);
     469            0 :             match resolved {
     470            0 :                 ShardResolveResult::Found(tenant_shard) => break tenant_shard,
     471              :                 ShardResolveResult::NotFound => {
     472            0 :                     return Err(GetActiveTimelineError::Tenant(
     473            0 :                         GetActiveTenantError::NotFound(GetTenantError::NotFound(*tenant_id)),
     474            0 :                     ));
     475              :                 }
     476            0 :                 ShardResolveResult::InProgress(barrier) => {
     477            0 :                     // We can't authoritatively answer right now: wait for InProgress state
     478            0 :                     // to end, then try again
     479            0 :                     tokio::select! {
     480            0 :                         _  = barrier.wait() => {
     481            0 :                             // The barrier completed: proceed around the loop to try looking up again
     482            0 :                         },
     483            0 :                         _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
     484            0 :                             return Err(GetActiveTimelineError::Tenant(GetActiveTenantError::WaitForActiveTimeout {
     485            0 :                                 latest_state: None,
     486            0 :                                 wait_time: timeout,
     487            0 :                             }));
     488              :                         }
     489              :                     }
     490              :                 }
     491              :             };
     492              :         };
     493              : 
     494            0 :         tracing::debug!("Waiting for tenant to enter active state...");
     495            0 :         tenant_shard
     496            0 :             .wait_to_become_active(deadline.duration_since(Instant::now()))
     497            0 :             .await
     498            0 :             .map_err(GetActiveTimelineError::Tenant)?;
     499              : 
     500            0 :         let timeline = tenant_shard
     501            0 :             .get_timeline(timeline_id, true)
     502            0 :             .map_err(GetActiveTimelineError::Timeline)?;
     503              : 
     504            0 :         let gate_guard = match timeline.gate.enter() {
     505            0 :             Ok(guard) => guard,
     506              :             Err(_) => {
     507            0 :                 return Err(GetActiveTimelineError::Timeline(
     508            0 :                     GetTimelineError::ShuttingDown,
     509            0 :                 ));
     510              :             }
     511              :         };
     512              : 
     513            0 :         let metrics = Arc::new(Arc::clone(&timeline.metrics));
     514            0 : 
     515            0 :         Ok(TenantManagerCacheItem {
     516            0 :             timeline,
     517            0 :             metrics,
     518            0 :             gate_guard,
     519            0 :         })
     520            0 :     }
     521              : }
     522              : 
     523              : #[derive(thiserror::Error, Debug)]
     524              : enum PageStreamError {
     525              :     /// We encountered an error that should prompt the client to reconnect:
     526              :     /// in practice this means we drop the connection without sending a response.
     527              :     #[error("Reconnect required: {0}")]
     528              :     Reconnect(Cow<'static, str>),
     529              : 
     530              :     /// We were instructed to shutdown while processing the query
     531              :     #[error("Shutting down")]
     532              :     Shutdown,
     533              : 
     534              :     /// Something went wrong reading a page: this likely indicates a pageserver bug
     535              :     #[error("Read error")]
     536              :     Read(#[source] PageReconstructError),
     537              : 
     538              :     /// Ran out of time waiting for an LSN
     539              :     #[error("LSN timeout: {0}")]
     540              :     LsnTimeout(WaitLsnError),
     541              : 
     542              :     /// The entity required to serve the request (tenant or timeline) is not found,
     543              :     /// or is not found in a suitable state to serve a request.
     544              :     #[error("Not found: {0}")]
     545              :     NotFound(Cow<'static, str>),
     546              : 
     547              :     /// Request asked for something that doesn't make sense, like an invalid LSN
     548              :     #[error("Bad request: {0}")]
     549              :     BadRequest(Cow<'static, str>),
     550              : }
     551              : 
     552              : impl From<PageReconstructError> for PageStreamError {
     553            0 :     fn from(value: PageReconstructError) -> Self {
     554            0 :         match value {
     555            0 :             PageReconstructError::Cancelled => Self::Shutdown,
     556            0 :             e => Self::Read(e),
     557              :         }
     558            0 :     }
     559              : }
     560              : 
     561              : impl From<GetActiveTimelineError> for PageStreamError {
     562            0 :     fn from(value: GetActiveTimelineError) -> Self {
     563            0 :         match value {
     564              :             GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled)
     565              :             | GetActiveTimelineError::Tenant(GetActiveTenantError::WillNotBecomeActive(
     566              :                 TenantState::Stopping { .. },
     567              :             ))
     568            0 :             | GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown) => Self::Shutdown,
     569            0 :             GetActiveTimelineError::Tenant(e) => Self::NotFound(format!("{e}").into()),
     570            0 :             GetActiveTimelineError::Timeline(e) => Self::NotFound(format!("{e}").into()),
     571              :         }
     572            0 :     }
     573              : }
     574              : 
     575              : impl From<WaitLsnError> for PageStreamError {
     576            0 :     fn from(value: WaitLsnError) -> Self {
     577            0 :         match value {
     578            0 :             e @ WaitLsnError::Timeout(_) => Self::LsnTimeout(e),
     579            0 :             WaitLsnError::Shutdown => Self::Shutdown,
     580            0 :             e @ WaitLsnError::BadState { .. } => Self::Reconnect(format!("{e}").into()),
     581              :         }
     582            0 :     }
     583              : }
     584              : 
     585              : impl From<WaitLsnError> for QueryError {
     586            0 :     fn from(value: WaitLsnError) -> Self {
     587            0 :         match value {
     588            0 :             e @ WaitLsnError::Timeout(_) => Self::Other(anyhow::Error::new(e)),
     589            0 :             WaitLsnError::Shutdown => Self::Shutdown,
     590            0 :             WaitLsnError::BadState { .. } => Self::Reconnect,
     591              :         }
     592            0 :     }
     593              : }
     594              : 
     595              : #[derive(thiserror::Error, Debug)]
     596              : struct BatchedPageStreamError {
     597              :     req: PagestreamRequest,
     598              :     err: PageStreamError,
     599              : }
     600              : 
     601              : impl std::fmt::Display for BatchedPageStreamError {
     602            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     603            0 :         self.err.fmt(f)
     604            0 :     }
     605              : }
     606              : 
     607              : struct BatchedGetPageRequest {
     608              :     req: PagestreamGetPageRequest,
     609              :     timer: SmgrOpTimer,
     610              : }
     611              : 
     612              : #[cfg(feature = "testing")]
     613              : struct BatchedTestRequest {
     614              :     req: models::PagestreamTestRequest,
     615              :     timer: SmgrOpTimer,
     616              : }
     617              : 
     618              : /// NB: we only hold [`timeline::handle::WeakHandle`] inside this enum,
     619              : /// so that we don't keep the [`Timeline::gate`] open while the batch
     620              : /// is being built up inside the [`spsc_fold`] (pagestream pipelining).
     621              : #[derive(IntoStaticStr)]
     622              : enum BatchedFeMessage {
     623              :     Exists {
     624              :         span: Span,
     625              :         timer: SmgrOpTimer,
     626              :         shard: timeline::handle::WeakHandle<TenantManagerTypes>,
     627              :         req: models::PagestreamExistsRequest,
     628              :     },
     629              :     Nblocks {
     630              :         span: Span,
     631              :         timer: SmgrOpTimer,
     632              :         shard: timeline::handle::WeakHandle<TenantManagerTypes>,
     633              :         req: models::PagestreamNblocksRequest,
     634              :     },
     635              :     GetPage {
     636              :         span: Span,
     637              :         shard: timeline::handle::WeakHandle<TenantManagerTypes>,
     638              :         effective_request_lsn: Lsn,
     639              :         pages: smallvec::SmallVec<[BatchedGetPageRequest; 1]>,
     640              :     },
     641              :     DbSize {
     642              :         span: Span,
     643              :         timer: SmgrOpTimer,
     644              :         shard: timeline::handle::WeakHandle<TenantManagerTypes>,
     645              :         req: models::PagestreamDbSizeRequest,
     646              :     },
     647              :     GetSlruSegment {
     648              :         span: Span,
     649              :         timer: SmgrOpTimer,
     650              :         shard: timeline::handle::WeakHandle<TenantManagerTypes>,
     651              :         req: models::PagestreamGetSlruSegmentRequest,
     652              :     },
     653              :     #[cfg(feature = "testing")]
     654              :     Test {
     655              :         span: Span,
     656              :         shard: timeline::handle::WeakHandle<TenantManagerTypes>,
     657              :         requests: Vec<BatchedTestRequest>,
     658              :     },
     659              :     RespondError {
     660              :         span: Span,
     661              :         error: BatchedPageStreamError,
     662              :     },
     663              : }
     664              : 
     665              : impl BatchedFeMessage {
     666            0 :     fn as_static_str(&self) -> &'static str {
     667            0 :         self.into()
     668            0 :     }
     669              : 
     670            0 :     fn observe_execution_start(&mut self, at: Instant) {
     671            0 :         match self {
     672            0 :             BatchedFeMessage::Exists { timer, .. }
     673            0 :             | BatchedFeMessage::Nblocks { timer, .. }
     674            0 :             | BatchedFeMessage::DbSize { timer, .. }
     675            0 :             | BatchedFeMessage::GetSlruSegment { timer, .. } => {
     676            0 :                 timer.observe_execution_start(at);
     677            0 :             }
     678            0 :             BatchedFeMessage::GetPage { pages, .. } => {
     679            0 :                 for page in pages {
     680            0 :                     page.timer.observe_execution_start(at);
     681            0 :                 }
     682              :             }
     683              :             #[cfg(feature = "testing")]
     684            0 :             BatchedFeMessage::Test { requests, .. } => {
     685            0 :                 for req in requests {
     686            0 :                     req.timer.observe_execution_start(at);
     687            0 :                 }
     688              :             }
     689            0 :             BatchedFeMessage::RespondError { .. } => {}
     690              :         }
     691            0 :     }
     692              : }
     693              : 
     694              : impl PageServerHandler {
     695            0 :     pub fn new(
     696            0 :         conf: &'static PageServerConf,
     697            0 :         tenant_manager: Arc<TenantManager>,
     698            0 :         auth: Option<Arc<SwappableJwtAuth>>,
     699            0 :         pipelining_config: PageServicePipeliningConfig,
     700            0 :         connection_ctx: RequestContext,
     701            0 :         cancel: CancellationToken,
     702            0 :         gate_guard: GateGuard,
     703            0 :     ) -> Self {
     704            0 :         PageServerHandler {
     705            0 :             conf,
     706            0 :             auth,
     707            0 :             claims: None,
     708            0 :             connection_ctx,
     709            0 :             timeline_handles: Some(TimelineHandles::new(tenant_manager)),
     710            0 :             cancel,
     711            0 :             pipelining_config,
     712            0 :             gate_guard,
     713            0 :         }
     714            0 :     }
     715              : 
     716              :     /// This function always respects cancellation of any timeline in `[Self::shard_timelines]`.  Pass in
     717              :     /// a cancellation token at the next scope up (such as a tenant cancellation token) to ensure we respect
     718              :     /// cancellation if there aren't any timelines in the cache.
     719              :     ///
     720              :     /// If calling from a function that doesn't use the `[Self::shard_timelines]` cache, then pass in the
     721              :     /// timeline cancellation token.
     722            0 :     async fn flush_cancellable<IO>(
     723            0 :         &self,
     724            0 :         pgb: &mut PostgresBackend<IO>,
     725            0 :         cancel: &CancellationToken,
     726            0 :     ) -> Result<(), QueryError>
     727            0 :     where
     728            0 :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
     729            0 :     {
     730            0 :         tokio::select!(
     731            0 :             flush_r = pgb.flush() => {
     732            0 :                 Ok(flush_r?)
     733              :             },
     734            0 :             _ = cancel.cancelled() => {
     735            0 :                 Err(QueryError::Shutdown)
     736              :             }
     737              :         )
     738            0 :     }
     739              : 
     740              :     #[allow(clippy::too_many_arguments)]
     741            0 :     async fn pagestream_read_message<IO>(
     742            0 :         pgb: &mut PostgresBackendReader<IO>,
     743            0 :         tenant_id: TenantId,
     744            0 :         timeline_id: TimelineId,
     745            0 :         timeline_handles: &mut TimelineHandles,
     746            0 :         cancel: &CancellationToken,
     747            0 :         ctx: &RequestContext,
     748            0 :         protocol_version: PagestreamProtocolVersion,
     749            0 :         parent_span: Span,
     750            0 :     ) -> Result<Option<BatchedFeMessage>, QueryError>
     751            0 :     where
     752            0 :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
     753            0 :     {
     754            0 :         let msg = tokio::select! {
     755              :             biased;
     756            0 :             _ = cancel.cancelled() => {
     757            0 :                 return Err(QueryError::Shutdown)
     758              :             }
     759            0 :             msg = pgb.read_message() => { msg }
     760            0 :         };
     761            0 : 
     762            0 :         let received_at = Instant::now();
     763              : 
     764            0 :         let copy_data_bytes = match msg? {
     765            0 :             Some(FeMessage::CopyData(bytes)) => bytes,
     766              :             Some(FeMessage::Terminate) => {
     767            0 :                 return Ok(None);
     768              :             }
     769            0 :             Some(m) => {
     770            0 :                 return Err(QueryError::Other(anyhow::anyhow!(
     771            0 :                     "unexpected message: {m:?} during COPY"
     772            0 :                 )));
     773              :             }
     774              :             None => {
     775            0 :                 return Ok(None);
     776              :             } // client disconnected
     777              :         };
     778            0 :         trace!("query: {copy_data_bytes:?}");
     779              : 
     780            0 :         fail::fail_point!("ps::handle-pagerequest-message");
     781              : 
     782              :         // parse request
     783            0 :         let neon_fe_msg =
     784            0 :             PagestreamFeMessage::parse(&mut copy_data_bytes.reader(), protocol_version)?;
     785              : 
     786              :         // TODO: turn in to async closure once available to avoid repeating received_at
     787            0 :         async fn record_op_start_and_throttle(
     788            0 :             shard: &timeline::handle::Handle<TenantManagerTypes>,
     789            0 :             op: metrics::SmgrQueryType,
     790            0 :             received_at: Instant,
     791            0 :         ) -> Result<SmgrOpTimer, QueryError> {
     792            0 :             // It's important to start the smgr op metric recorder as early as possible
     793            0 :             // so that the _started counters are incremented before we do
     794            0 :             // any serious waiting, e.g., for throttle, batching, or actual request handling.
     795            0 :             let mut timer = shard.query_metrics.start_smgr_op(op, received_at);
     796            0 :             let now = Instant::now();
     797            0 :             timer.observe_throttle_start(now);
     798            0 :             let throttled = tokio::select! {
     799            0 :                 res = shard.pagestream_throttle.throttle(1, now) => res,
     800            0 :                 _ = shard.cancel.cancelled() => return Err(QueryError::Shutdown),
     801              :             };
     802            0 :             timer.observe_throttle_done(throttled);
     803            0 :             Ok(timer)
     804            0 :         }
     805              : 
     806            0 :         let batched_msg = match neon_fe_msg {
     807            0 :             PagestreamFeMessage::Exists(req) => {
     808            0 :                 let shard = timeline_handles
     809            0 :                     .get(tenant_id, timeline_id, ShardSelector::Zero)
     810            0 :                     .await?;
     811            0 :                 debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
     812            0 :                 let span = tracing::info_span!(parent: &parent_span, "handle_get_rel_exists_request", rel = %req.rel, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
     813            0 :                 let timer = record_op_start_and_throttle(
     814            0 :                     &shard,
     815            0 :                     metrics::SmgrQueryType::GetRelExists,
     816            0 :                     received_at,
     817            0 :                 )
     818            0 :                 .await?;
     819            0 :                 BatchedFeMessage::Exists {
     820            0 :                     span,
     821            0 :                     timer,
     822            0 :                     shard: shard.downgrade(),
     823            0 :                     req,
     824            0 :                 }
     825              :             }
     826            0 :             PagestreamFeMessage::Nblocks(req) => {
     827            0 :                 let shard = timeline_handles
     828            0 :                     .get(tenant_id, timeline_id, ShardSelector::Zero)
     829            0 :                     .await?;
     830            0 :                 let span = tracing::info_span!(parent: &parent_span, "handle_get_nblocks_request", rel = %req.rel, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
     831            0 :                 let timer = record_op_start_and_throttle(
     832            0 :                     &shard,
     833            0 :                     metrics::SmgrQueryType::GetRelSize,
     834            0 :                     received_at,
     835            0 :                 )
     836            0 :                 .await?;
     837            0 :                 BatchedFeMessage::Nblocks {
     838            0 :                     span,
     839            0 :                     timer,
     840            0 :                     shard: shard.downgrade(),
     841            0 :                     req,
     842            0 :                 }
     843              :             }
     844            0 :             PagestreamFeMessage::DbSize(req) => {
     845            0 :                 let shard = timeline_handles
     846            0 :                     .get(tenant_id, timeline_id, ShardSelector::Zero)
     847            0 :                     .await?;
     848            0 :                 let span = tracing::info_span!(parent: &parent_span, "handle_db_size_request", dbnode = %req.dbnode, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
     849            0 :                 let timer = record_op_start_and_throttle(
     850            0 :                     &shard,
     851            0 :                     metrics::SmgrQueryType::GetDbSize,
     852            0 :                     received_at,
     853            0 :                 )
     854            0 :                 .await?;
     855            0 :                 BatchedFeMessage::DbSize {
     856            0 :                     span,
     857            0 :                     timer,
     858            0 :                     shard: shard.downgrade(),
     859            0 :                     req,
     860            0 :                 }
     861              :             }
     862            0 :             PagestreamFeMessage::GetSlruSegment(req) => {
     863            0 :                 let shard = timeline_handles
     864            0 :                     .get(tenant_id, timeline_id, ShardSelector::Zero)
     865            0 :                     .await?;
     866            0 :                 let span = tracing::info_span!(parent: &parent_span, "handle_get_slru_segment_request", kind = %req.kind, segno = %req.segno, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
     867            0 :                 let timer = record_op_start_and_throttle(
     868            0 :                     &shard,
     869            0 :                     metrics::SmgrQueryType::GetSlruSegment,
     870            0 :                     received_at,
     871            0 :                 )
     872            0 :                 .await?;
     873            0 :                 BatchedFeMessage::GetSlruSegment {
     874            0 :                     span,
     875            0 :                     timer,
     876            0 :                     shard: shard.downgrade(),
     877            0 :                     req,
     878            0 :                 }
     879              :             }
     880            0 :             PagestreamFeMessage::GetPage(req) => {
     881              :                 // avoid a somewhat costly Span::record() by constructing the entire span in one go.
     882              :                 macro_rules! mkspan {
     883              :                     (before shard routing) => {{
     884              :                         tracing::info_span!(parent: &parent_span, "handle_get_page_request", rel = %req.rel, blkno = %req.blkno, req_lsn = %req.hdr.request_lsn)
     885              :                     }};
     886              :                     ($shard_id:expr) => {{
     887              :                         tracing::info_span!(parent: &parent_span, "handle_get_page_request", rel = %req.rel, blkno = %req.blkno, req_lsn = %req.hdr.request_lsn, shard_id = %$shard_id)
     888              :                     }};
     889              :                 }
     890              : 
     891              :                 macro_rules! respond_error {
     892              :                     ($span:expr, $error:expr) => {{
     893              :                         let error = BatchedFeMessage::RespondError {
     894              :                             span: $span,
     895              :                             error: BatchedPageStreamError {
     896              :                                 req: req.hdr,
     897              :                                 err: $error,
     898              :                             },
     899              :                         };
     900              :                         Ok(Some(error))
     901              :                     }};
     902              :                 }
     903              : 
     904            0 :                 let key = rel_block_to_key(req.rel, req.blkno);
     905            0 :                 let shard = match timeline_handles
     906            0 :                     .get(tenant_id, timeline_id, ShardSelector::Page(key))
     907            0 :                     .await
     908              :                 {
     909            0 :                     Ok(tl) => tl,
     910            0 :                     Err(e) => {
     911            0 :                         let span = mkspan!(before shard routing);
     912            0 :                         match e {
     913              :                             GetActiveTimelineError::Tenant(GetActiveTenantError::NotFound(_)) => {
     914              :                                 // We already know this tenant exists in general, because we resolved it at
     915              :                                 // start of connection.  Getting a NotFound here indicates that the shard containing
     916              :                                 // the requested page is not present on this node: the client's knowledge of shard->pageserver
     917              :                                 // mapping is out of date.
     918              :                                 //
     919              :                                 // Closing the connection by returning ``::Reconnect` has the side effect of rate-limiting above message, via
     920              :                                 // client's reconnect backoff, as well as hopefully prompting the client to load its updated configuration
     921              :                                 // and talk to a different pageserver.
     922            0 :                                 return respond_error!(
     923            0 :                                     span,
     924            0 :                                     PageStreamError::Reconnect(
     925            0 :                                         "getpage@lsn request routed to wrong shard".into()
     926            0 :                                     )
     927            0 :                                 );
     928              :                             }
     929            0 :                             e => {
     930            0 :                                 return respond_error!(span, e.into());
     931              :                             }
     932              :                         }
     933              :                     }
     934              :                 };
     935            0 :                 let span = mkspan!(shard.tenant_shard_id.shard_slug());
     936              : 
     937            0 :                 let timer = record_op_start_and_throttle(
     938            0 :                     &shard,
     939            0 :                     metrics::SmgrQueryType::GetPageAtLsn,
     940            0 :                     received_at,
     941            0 :                 )
     942            0 :                 .await?;
     943              : 
     944              :                 // We're holding the Handle
     945            0 :                 let effective_request_lsn = match Self::wait_or_get_last_lsn(
     946            0 :                     &shard,
     947            0 :                     req.hdr.request_lsn,
     948            0 :                     req.hdr.not_modified_since,
     949            0 :                     &shard.get_applied_gc_cutoff_lsn(),
     950            0 :                     ctx,
     951            0 :                 )
     952            0 :                 // TODO: if we actually need to wait for lsn here, it delays the entire batch which doesn't need to wait
     953            0 :                 .await
     954              :                 {
     955            0 :                     Ok(lsn) => lsn,
     956            0 :                     Err(e) => {
     957            0 :                         return respond_error!(span, e);
     958              :                     }
     959              :                 };
     960              :                 BatchedFeMessage::GetPage {
     961            0 :                     span,
     962            0 :                     shard: shard.downgrade(),
     963            0 :                     effective_request_lsn,
     964            0 :                     pages: smallvec::smallvec![BatchedGetPageRequest { req, timer }],
     965              :                 }
     966              :             }
     967              :             #[cfg(feature = "testing")]
     968            0 :             PagestreamFeMessage::Test(req) => {
     969            0 :                 let shard = timeline_handles
     970            0 :                     .get(tenant_id, timeline_id, ShardSelector::Zero)
     971            0 :                     .await?;
     972            0 :                 let span = tracing::info_span!(parent: &parent_span, "handle_test_request", shard_id = %shard.tenant_shard_id.shard_slug());
     973            0 :                 let timer =
     974            0 :                     record_op_start_and_throttle(&shard, metrics::SmgrQueryType::Test, received_at)
     975            0 :                         .await?;
     976            0 :                 BatchedFeMessage::Test {
     977            0 :                     span,
     978            0 :                     shard: shard.downgrade(),
     979            0 :                     requests: vec![BatchedTestRequest { req, timer }],
     980            0 :                 }
     981              :             }
     982              :         };
     983            0 :         Ok(Some(batched_msg))
     984            0 :     }
     985              : 
     986              :     /// Post-condition: `batch` is Some()
     987              :     #[instrument(skip_all, level = tracing::Level::TRACE)]
     988              :     #[allow(clippy::boxed_local)]
     989              :     fn pagestream_do_batch(
     990              :         max_batch_size: NonZeroUsize,
     991              :         batch: &mut Result<BatchedFeMessage, QueryError>,
     992              :         this_msg: Result<BatchedFeMessage, QueryError>,
     993              :     ) -> Result<(), Result<BatchedFeMessage, QueryError>> {
     994              :         debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
     995              : 
     996              :         let this_msg = match this_msg {
     997              :             Ok(this_msg) => this_msg,
     998              :             Err(e) => return Err(Err(e)),
     999              :         };
    1000              : 
    1001              :         match (&mut *batch, this_msg) {
    1002              :             // something batched already, let's see if we can add this message to the batch
    1003              :             (
    1004              :                 Ok(BatchedFeMessage::GetPage {
    1005              :                     span: _,
    1006              :                     shard: accum_shard,
    1007              :                     pages: accum_pages,
    1008              :                     effective_request_lsn: accum_lsn,
    1009              :                 }),
    1010              :                 BatchedFeMessage::GetPage {
    1011              :                     span: _,
    1012              :                     shard: this_shard,
    1013              :                     pages: this_pages,
    1014              :                     effective_request_lsn: this_lsn,
    1015              :                 },
    1016            0 :             ) if (|| {
    1017            0 :                 assert_eq!(this_pages.len(), 1);
    1018            0 :                 if accum_pages.len() >= max_batch_size.get() {
    1019            0 :                     trace!(%accum_lsn, %this_lsn, %max_batch_size, "stopping batching because of batch size");
    1020            0 :                     assert_eq!(accum_pages.len(), max_batch_size.get());
    1021            0 :                     return false;
    1022            0 :                 }
    1023            0 :                 if !accum_shard.is_same_handle_as(&this_shard) {
    1024            0 :                     trace!(%accum_lsn, %this_lsn, "stopping batching because timeline object mismatch");
    1025              :                     // TODO: we _could_ batch & execute each shard seperately (and in parallel).
    1026              :                     // But the current logic for keeping responses in order does not support that.
    1027            0 :                     return false;
    1028            0 :                 }
    1029            0 :                 // the vectored get currently only supports a single LSN, so, bounce as soon
    1030            0 :                 // as the effective request_lsn changes
    1031            0 :                 if *accum_lsn != this_lsn {
    1032            0 :                     trace!(%accum_lsn, %this_lsn, "stopping batching because LSN changed");
    1033            0 :                     return false;
    1034            0 :                 }
    1035            0 :                 true
    1036              :             })() =>
    1037              :             {
    1038              :                 // ok to batch
    1039              :                 accum_pages.extend(this_pages);
    1040              :                 Ok(())
    1041              :             }
    1042              :             #[cfg(feature = "testing")]
    1043              :             (
    1044              :                 Ok(BatchedFeMessage::Test {
    1045              :                     shard: accum_shard,
    1046              :                     requests: accum_requests,
    1047              :                     ..
    1048              :                 }),
    1049              :                 BatchedFeMessage::Test {
    1050              :                     shard: this_shard,
    1051              :                     requests: this_requests,
    1052              :                     ..
    1053              :                 },
    1054            0 :             ) if (|| {
    1055            0 :                 assert!(this_requests.len() == 1);
    1056            0 :                 if accum_requests.len() >= max_batch_size.get() {
    1057            0 :                     trace!(%max_batch_size, "stopping batching because of batch size");
    1058            0 :                     assert_eq!(accum_requests.len(), max_batch_size.get());
    1059            0 :                     return false;
    1060            0 :                 }
    1061            0 :                 if !accum_shard.is_same_handle_as(&this_shard) {
    1062            0 :                     trace!("stopping batching because timeline object mismatch");
    1063              :                     // TODO: we _could_ batch & execute each shard seperately (and in parallel).
    1064              :                     // But the current logic for keeping responses in order does not support that.
    1065            0 :                     return false;
    1066            0 :                 }
    1067            0 :                 let this_batch_key = this_requests[0].req.batch_key;
    1068            0 :                 let accum_batch_key = accum_requests[0].req.batch_key;
    1069            0 :                 if this_requests[0].req.batch_key != accum_requests[0].req.batch_key {
    1070            0 :                     trace!(%accum_batch_key, %this_batch_key, "stopping batching because batch key changed");
    1071            0 :                     return false;
    1072            0 :                 }
    1073            0 :                 true
    1074              :             })() =>
    1075              :             {
    1076              :                 // ok to batch
    1077              :                 accum_requests.extend(this_requests);
    1078              :                 Ok(())
    1079              :             }
    1080              :             // something batched already but this message is unbatchable
    1081              :             (_, this_msg) => {
    1082              :                 // by default, don't continue batching
    1083              :                 Err(Ok(this_msg))
    1084              :             }
    1085              :         }
    1086              :     }
    1087              : 
    1088            0 :     #[instrument(level = tracing::Level::DEBUG, skip_all)]
    1089              :     async fn pagesteam_handle_batched_message<IO>(
    1090              :         &mut self,
    1091              :         pgb_writer: &mut PostgresBackend<IO>,
    1092              :         batch: BatchedFeMessage,
    1093              :         io_concurrency: IoConcurrency,
    1094              :         cancel: &CancellationToken,
    1095              :         protocol_version: PagestreamProtocolVersion,
    1096              :         ctx: &RequestContext,
    1097              :     ) -> Result<(), QueryError>
    1098              :     where
    1099              :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
    1100              :     {
    1101              :         let started_at = Instant::now();
    1102              :         let batch = {
    1103              :             let mut batch = batch;
    1104              :             batch.observe_execution_start(started_at);
    1105              :             batch
    1106              :         };
    1107              : 
    1108              :         // Dispatch the batch to the appropriate request handler.
    1109              :         let (mut handler_results, span) = log_slow(
    1110              :             batch.as_static_str(),
    1111              :             LOG_SLOW_GETPAGE_THRESHOLD,
    1112              :             self.pagestream_dispatch_batched_message(batch, io_concurrency, ctx),
    1113              :         )
    1114              :         .await?;
    1115              : 
    1116              :         // We purposefully don't count flush time into the smgr operation timer.
    1117              :         //
    1118              :         // The reason is that current compute client will not perform protocol processing
    1119              :         // if the postgres backend process is doing things other than `->smgr_read()`.
    1120              :         // This is especially the case for prefetch.
    1121              :         //
    1122              :         // If the compute doesn't read from the connection, eventually TCP will backpressure
    1123              :         // all the way into our flush call below.
    1124              :         //
    1125              :         // The timer's underlying metric is used for a storage-internal latency SLO and
    1126              :         // we don't want to include latency in it that we can't control.
    1127              :         // And as pointed out above, in this case, we don't control the time that flush will take.
    1128              :         //
    1129              :         // We put each response in the batch onto the wire in a separate pgb_writer.flush()
    1130              :         // call, which (all unmeasured) adds syscall overhead but reduces time to first byte
    1131              :         // and avoids building up a "giant" contiguous userspace buffer to hold the entire response.
    1132              :         // TODO: vectored socket IO would be great, but pgb_writer doesn't support that.
    1133              :         let flush_timers = {
    1134              :             let flushing_start_time = Instant::now();
    1135              :             let mut flush_timers = Vec::with_capacity(handler_results.len());
    1136              :             for handler_result in &mut handler_results {
    1137              :                 let flush_timer = match handler_result {
    1138              :                     Ok((_, timer)) => Some(
    1139              :                         timer
    1140              :                             .observe_execution_end(flushing_start_time)
    1141              :                             .expect("we are the first caller"),
    1142              :                     ),
    1143              :                     Err(_) => {
    1144              :                         // TODO: measure errors
    1145              :                         None
    1146              :                     }
    1147              :                 };
    1148              :                 flush_timers.push(flush_timer);
    1149              :             }
    1150              :             assert_eq!(flush_timers.len(), handler_results.len());
    1151              :             flush_timers
    1152              :         };
    1153              : 
    1154              :         // Map handler result to protocol behavior.
    1155              :         // Some handler errors cause exit from pagestream protocol.
    1156              :         // Other handler errors are sent back as an error message and we stay in pagestream protocol.
    1157              :         for (handler_result, flushing_timer) in handler_results.into_iter().zip(flush_timers) {
    1158              :             let response_msg = match handler_result {
    1159              :                 Err(e) => match &e.err {
    1160              :                     PageStreamError::Shutdown => {
    1161              :                         // If we fail to fulfil a request during shutdown, which may be _because_ of
    1162              :                         // shutdown, then do not send the error to the client.  Instead just drop the
    1163              :                         // connection.
    1164            0 :                         span.in_scope(|| info!("dropping connection due to shutdown"));
    1165              :                         return Err(QueryError::Shutdown);
    1166              :                     }
    1167              :                     PageStreamError::Reconnect(reason) => {
    1168            0 :                         span.in_scope(|| info!("handler requested reconnect: {reason}"));
    1169              :                         return Err(QueryError::Reconnect);
    1170              :                     }
    1171              :                     PageStreamError::Read(_)
    1172              :                     | PageStreamError::LsnTimeout(_)
    1173              :                     | PageStreamError::NotFound(_)
    1174              :                     | PageStreamError::BadRequest(_) => {
    1175              :                         // print the all details to the log with {:#}, but for the client the
    1176              :                         // error message is enough.  Do not log if shutting down, as the anyhow::Error
    1177              :                         // here includes cancellation which is not an error.
    1178              :                         let full = utils::error::report_compact_sources(&e.err);
    1179            0 :                         span.in_scope(|| {
    1180            0 :                             error!("error reading relation or page version: {full:#}")
    1181            0 :                         });
    1182              : 
    1183              :                         PagestreamBeMessage::Error(PagestreamErrorResponse {
    1184              :                             req: e.req,
    1185              :                             message: e.err.to_string(),
    1186              :                         })
    1187              :                     }
    1188              :                 },
    1189              :                 Ok((response_msg, _op_timer_already_observed)) => response_msg,
    1190              :             };
    1191              : 
    1192              :             //
    1193              :             // marshal & transmit response message
    1194              :             //
    1195              : 
    1196              :             pgb_writer.write_message_noflush(&BeMessage::CopyData(
    1197              :                 &response_msg.serialize(protocol_version),
    1198              :             ))?;
    1199              : 
    1200              :             failpoint_support::sleep_millis_async!("before-pagestream-msg-flush", cancel);
    1201              : 
    1202              :             // what we want to do
    1203              :             let socket_fd = pgb_writer.socket_fd;
    1204              :             let flush_fut = pgb_writer.flush();
    1205              :             // metric for how long flushing takes
    1206              :             let flush_fut = match flushing_timer {
    1207              :                 Some(flushing_timer) => futures::future::Either::Left(flushing_timer.measure(
    1208              :                     Instant::now(),
    1209              :                     flush_fut,
    1210              :                     socket_fd,
    1211              :                 )),
    1212              :                 None => futures::future::Either::Right(flush_fut),
    1213              :             };
    1214              :             // do it while respecting cancellation
    1215            0 :             let _: () = async move {
    1216            0 :                 tokio::select! {
    1217              :                     biased;
    1218            0 :                     _ = cancel.cancelled() => {
    1219              :                         // We were requested to shut down.
    1220            0 :                         info!("shutdown request received in page handler");
    1221            0 :                         return Err(QueryError::Shutdown)
    1222              :                     }
    1223            0 :                     res = flush_fut => {
    1224            0 :                         res?;
    1225              :                     }
    1226              :                 }
    1227            0 :                 Ok(())
    1228            0 :             }
    1229              :             .await?;
    1230              :         }
    1231              :         Ok(())
    1232              :     }
    1233              : 
    1234              :     /// Helper which dispatches a batched message to the appropriate handler.
    1235              :     /// Returns a vec of results, along with the extracted trace span.
    1236            0 :     async fn pagestream_dispatch_batched_message(
    1237            0 :         &mut self,
    1238            0 :         batch: BatchedFeMessage,
    1239            0 :         io_concurrency: IoConcurrency,
    1240            0 :         ctx: &RequestContext,
    1241            0 :     ) -> Result<
    1242            0 :         (
    1243            0 :             Vec<Result<(PagestreamBeMessage, SmgrOpTimer), BatchedPageStreamError>>,
    1244            0 :             Span,
    1245            0 :         ),
    1246            0 :         QueryError,
    1247            0 :     > {
    1248              :         macro_rules! upgrade_handle_and_set_context {
    1249              :             ($shard:ident) => {{
    1250              :                 let weak_handle = &$shard;
    1251              :                 let handle = weak_handle.upgrade()?;
    1252              :                 let ctx = ctx.with_scope_page_service_pagestream(&handle);
    1253              :                 (handle, ctx)
    1254              :             }};
    1255              :         }
    1256            0 :         Ok(match batch {
    1257              :             BatchedFeMessage::Exists {
    1258            0 :                 span,
    1259            0 :                 timer,
    1260            0 :                 shard,
    1261            0 :                 req,
    1262            0 :             } => {
    1263            0 :                 fail::fail_point!("ps::handle-pagerequest-message::exists");
    1264            0 :                 let (shard, ctx) = upgrade_handle_and_set_context!(shard);
    1265              :                 (
    1266            0 :                     vec![
    1267            0 :                         self.handle_get_rel_exists_request(&shard, &req, &ctx)
    1268            0 :                             .instrument(span.clone())
    1269            0 :                             .await
    1270            0 :                             .map(|msg| (msg, timer))
    1271            0 :                             .map_err(|err| BatchedPageStreamError { err, req: req.hdr }),
    1272            0 :                     ],
    1273            0 :                     span,
    1274              :                 )
    1275              :             }
    1276              :             BatchedFeMessage::Nblocks {
    1277            0 :                 span,
    1278            0 :                 timer,
    1279            0 :                 shard,
    1280            0 :                 req,
    1281            0 :             } => {
    1282            0 :                 fail::fail_point!("ps::handle-pagerequest-message::nblocks");
    1283            0 :                 let (shard, ctx) = upgrade_handle_and_set_context!(shard);
    1284              :                 (
    1285            0 :                     vec![
    1286            0 :                         self.handle_get_nblocks_request(&shard, &req, &ctx)
    1287            0 :                             .instrument(span.clone())
    1288            0 :                             .await
    1289            0 :                             .map(|msg| (msg, timer))
    1290            0 :                             .map_err(|err| BatchedPageStreamError { err, req: req.hdr }),
    1291            0 :                     ],
    1292            0 :                     span,
    1293              :                 )
    1294              :             }
    1295              :             BatchedFeMessage::GetPage {
    1296            0 :                 span,
    1297            0 :                 shard,
    1298            0 :                 effective_request_lsn,
    1299            0 :                 pages,
    1300            0 :             } => {
    1301            0 :                 fail::fail_point!("ps::handle-pagerequest-message::getpage");
    1302            0 :                 let (shard, ctx) = upgrade_handle_and_set_context!(shard);
    1303              :                 (
    1304              :                     {
    1305            0 :                         let npages = pages.len();
    1306            0 :                         trace!(npages, "handling getpage request");
    1307            0 :                         let res = self
    1308            0 :                             .handle_get_page_at_lsn_request_batched(
    1309            0 :                                 &shard,
    1310            0 :                                 effective_request_lsn,
    1311            0 :                                 pages,
    1312            0 :                                 io_concurrency,
    1313            0 :                                 &ctx,
    1314            0 :                             )
    1315            0 :                             .instrument(span.clone())
    1316            0 :                             .await;
    1317            0 :                         assert_eq!(res.len(), npages);
    1318            0 :                         res
    1319            0 :                     },
    1320            0 :                     span,
    1321              :                 )
    1322              :             }
    1323              :             BatchedFeMessage::DbSize {
    1324            0 :                 span,
    1325            0 :                 timer,
    1326            0 :                 shard,
    1327            0 :                 req,
    1328            0 :             } => {
    1329            0 :                 fail::fail_point!("ps::handle-pagerequest-message::dbsize");
    1330            0 :                 let (shard, ctx) = upgrade_handle_and_set_context!(shard);
    1331              :                 (
    1332            0 :                     vec![
    1333            0 :                         self.handle_db_size_request(&shard, &req, &ctx)
    1334            0 :                             .instrument(span.clone())
    1335            0 :                             .await
    1336            0 :                             .map(|msg| (msg, timer))
    1337            0 :                             .map_err(|err| BatchedPageStreamError { err, req: req.hdr }),
    1338            0 :                     ],
    1339            0 :                     span,
    1340              :                 )
    1341              :             }
    1342              :             BatchedFeMessage::GetSlruSegment {
    1343            0 :                 span,
    1344            0 :                 timer,
    1345            0 :                 shard,
    1346            0 :                 req,
    1347            0 :             } => {
    1348            0 :                 fail::fail_point!("ps::handle-pagerequest-message::slrusegment");
    1349            0 :                 let (shard, ctx) = upgrade_handle_and_set_context!(shard);
    1350              :                 (
    1351            0 :                     vec![
    1352            0 :                         self.handle_get_slru_segment_request(&shard, &req, &ctx)
    1353            0 :                             .instrument(span.clone())
    1354            0 :                             .await
    1355            0 :                             .map(|msg| (msg, timer))
    1356            0 :                             .map_err(|err| BatchedPageStreamError { err, req: req.hdr }),
    1357            0 :                     ],
    1358            0 :                     span,
    1359              :                 )
    1360              :             }
    1361              :             #[cfg(feature = "testing")]
    1362              :             BatchedFeMessage::Test {
    1363            0 :                 span,
    1364            0 :                 shard,
    1365            0 :                 requests,
    1366            0 :             } => {
    1367            0 :                 fail::fail_point!("ps::handle-pagerequest-message::test");
    1368            0 :                 let (shard, ctx) = upgrade_handle_and_set_context!(shard);
    1369              :                 (
    1370              :                     {
    1371            0 :                         let npages = requests.len();
    1372            0 :                         trace!(npages, "handling getpage request");
    1373            0 :                         let res = self
    1374            0 :                             .handle_test_request_batch(&shard, requests, &ctx)
    1375            0 :                             .instrument(span.clone())
    1376            0 :                             .await;
    1377            0 :                         assert_eq!(res.len(), npages);
    1378            0 :                         res
    1379            0 :                     },
    1380            0 :                     span,
    1381              :                 )
    1382              :             }
    1383            0 :             BatchedFeMessage::RespondError { span, error } => {
    1384            0 :                 // We've already decided to respond with an error, so we don't need to
    1385            0 :                 // call the handler.
    1386            0 :                 (vec![Err(error)], span)
    1387              :             }
    1388              :         })
    1389            0 :     }
    1390              : 
    1391              :     /// Pagestream sub-protocol handler.
    1392              :     ///
    1393              :     /// It is a simple request-response protocol inside a COPYBOTH session.
    1394              :     ///
    1395              :     /// # Coding Discipline
    1396              :     ///
    1397              :     /// Coding discipline within this function: all interaction with the `pgb` connection
    1398              :     /// needs to be sensitive to connection shutdown, currently signalled via [`Self::cancel`].
    1399              :     /// This is so that we can shutdown page_service quickly.
    1400              :     #[instrument(skip_all)]
    1401              :     async fn handle_pagerequests<IO>(
    1402              :         &mut self,
    1403              :         pgb: &mut PostgresBackend<IO>,
    1404              :         tenant_id: TenantId,
    1405              :         timeline_id: TimelineId,
    1406              :         protocol_version: PagestreamProtocolVersion,
    1407              :         ctx: RequestContext,
    1408              :     ) -> Result<(), QueryError>
    1409              :     where
    1410              :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
    1411              :     {
    1412              :         debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
    1413              : 
    1414              :         // switch client to COPYBOTH
    1415              :         pgb.write_message_noflush(&BeMessage::CopyBothResponse)?;
    1416              :         tokio::select! {
    1417              :             biased;
    1418              :             _ = self.cancel.cancelled() => {
    1419              :                 return Err(QueryError::Shutdown)
    1420              :             }
    1421              :             res = pgb.flush() => {
    1422              :                 res?;
    1423              :             }
    1424              :         }
    1425              : 
    1426              :         let io_concurrency = IoConcurrency::spawn_from_conf(
    1427              :             self.conf,
    1428              :             match self.gate_guard.try_clone() {
    1429              :                 Ok(guard) => guard,
    1430              :                 Err(_) => {
    1431              :                     info!("shutdown request received in page handler");
    1432              :                     return Err(QueryError::Shutdown);
    1433              :                 }
    1434              :             },
    1435              :         );
    1436              : 
    1437              :         let pgb_reader = pgb
    1438              :             .split()
    1439              :             .context("implementation error: split pgb into reader and writer")?;
    1440              : 
    1441              :         let timeline_handles = self
    1442              :             .timeline_handles
    1443              :             .take()
    1444              :             .expect("implementation error: timeline_handles should not be locked");
    1445              : 
    1446              :         let request_span = info_span!("request");
    1447              :         let ((pgb_reader, timeline_handles), result) = match self.pipelining_config.clone() {
    1448              :             PageServicePipeliningConfig::Pipelined(pipelining_config) => {
    1449              :                 self.handle_pagerequests_pipelined(
    1450              :                     pgb,
    1451              :                     pgb_reader,
    1452              :                     tenant_id,
    1453              :                     timeline_id,
    1454              :                     timeline_handles,
    1455              :                     request_span,
    1456              :                     pipelining_config,
    1457              :                     protocol_version,
    1458              :                     io_concurrency,
    1459              :                     &ctx,
    1460              :                 )
    1461              :                 .await
    1462              :             }
    1463              :             PageServicePipeliningConfig::Serial => {
    1464              :                 self.handle_pagerequests_serial(
    1465              :                     pgb,
    1466              :                     pgb_reader,
    1467              :                     tenant_id,
    1468              :                     timeline_id,
    1469              :                     timeline_handles,
    1470              :                     request_span,
    1471              :                     protocol_version,
    1472              :                     io_concurrency,
    1473              :                     &ctx,
    1474              :                 )
    1475              :                 .await
    1476              :             }
    1477              :         };
    1478              : 
    1479              :         debug!("pagestream subprotocol shut down cleanly");
    1480              : 
    1481              :         pgb.unsplit(pgb_reader)
    1482              :             .context("implementation error: unsplit pgb")?;
    1483              : 
    1484              :         let replaced = self.timeline_handles.replace(timeline_handles);
    1485              :         assert!(replaced.is_none());
    1486              : 
    1487              :         result
    1488              :     }
    1489              : 
    1490              :     #[allow(clippy::too_many_arguments)]
    1491            0 :     async fn handle_pagerequests_serial<IO>(
    1492            0 :         &mut self,
    1493            0 :         pgb_writer: &mut PostgresBackend<IO>,
    1494            0 :         mut pgb_reader: PostgresBackendReader<IO>,
    1495            0 :         tenant_id: TenantId,
    1496            0 :         timeline_id: TimelineId,
    1497            0 :         mut timeline_handles: TimelineHandles,
    1498            0 :         request_span: Span,
    1499            0 :         protocol_version: PagestreamProtocolVersion,
    1500            0 :         io_concurrency: IoConcurrency,
    1501            0 :         ctx: &RequestContext,
    1502            0 :     ) -> (
    1503            0 :         (PostgresBackendReader<IO>, TimelineHandles),
    1504            0 :         Result<(), QueryError>,
    1505            0 :     )
    1506            0 :     where
    1507            0 :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
    1508            0 :     {
    1509            0 :         let cancel = self.cancel.clone();
    1510            0 :         let err = loop {
    1511            0 :             let msg = Self::pagestream_read_message(
    1512            0 :                 &mut pgb_reader,
    1513            0 :                 tenant_id,
    1514            0 :                 timeline_id,
    1515            0 :                 &mut timeline_handles,
    1516            0 :                 &cancel,
    1517            0 :                 ctx,
    1518            0 :                 protocol_version,
    1519            0 :                 request_span.clone(),
    1520            0 :             )
    1521            0 :             .await;
    1522            0 :             let msg = match msg {
    1523            0 :                 Ok(msg) => msg,
    1524            0 :                 Err(e) => break e,
    1525              :             };
    1526            0 :             let msg = match msg {
    1527            0 :                 Some(msg) => msg,
    1528              :                 None => {
    1529            0 :                     debug!("pagestream subprotocol end observed");
    1530            0 :                     return ((pgb_reader, timeline_handles), Ok(()));
    1531              :                 }
    1532              :             };
    1533              : 
    1534            0 :             let result = self
    1535            0 :                 .pagesteam_handle_batched_message(
    1536            0 :                     pgb_writer,
    1537            0 :                     msg,
    1538            0 :                     io_concurrency.clone(),
    1539            0 :                     &cancel,
    1540            0 :                     protocol_version,
    1541            0 :                     ctx,
    1542            0 :                 )
    1543            0 :                 .await;
    1544            0 :             match result {
    1545            0 :                 Ok(()) => {}
    1546            0 :                 Err(e) => break e,
    1547              :             }
    1548              :         };
    1549            0 :         ((pgb_reader, timeline_handles), Err(err))
    1550            0 :     }
    1551              : 
    1552              :     /// # Cancel-Safety
    1553              :     ///
    1554              :     /// May leak tokio tasks if not polled to completion.
    1555              :     #[allow(clippy::too_many_arguments)]
    1556            0 :     async fn handle_pagerequests_pipelined<IO>(
    1557            0 :         &mut self,
    1558            0 :         pgb_writer: &mut PostgresBackend<IO>,
    1559            0 :         pgb_reader: PostgresBackendReader<IO>,
    1560            0 :         tenant_id: TenantId,
    1561            0 :         timeline_id: TimelineId,
    1562            0 :         mut timeline_handles: TimelineHandles,
    1563            0 :         request_span: Span,
    1564            0 :         pipelining_config: PageServicePipeliningConfigPipelined,
    1565            0 :         protocol_version: PagestreamProtocolVersion,
    1566            0 :         io_concurrency: IoConcurrency,
    1567            0 :         ctx: &RequestContext,
    1568            0 :     ) -> (
    1569            0 :         (PostgresBackendReader<IO>, TimelineHandles),
    1570            0 :         Result<(), QueryError>,
    1571            0 :     )
    1572            0 :     where
    1573            0 :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
    1574            0 :     {
    1575            0 :         //
    1576            0 :         // Pipelined pagestream handling consists of
    1577            0 :         // - a Batcher that reads requests off the wire and
    1578            0 :         //   and batches them if possible,
    1579            0 :         // - an Executor that processes the batched requests.
    1580            0 :         //
    1581            0 :         // The batch is built up inside an `spsc_fold` channel,
    1582            0 :         // shared betwen Batcher (Sender) and Executor (Receiver).
    1583            0 :         //
    1584            0 :         // The Batcher continously folds client requests into the batch,
    1585            0 :         // while the Executor can at any time take out what's in the batch
    1586            0 :         // in order to process it.
    1587            0 :         // This means the next batch builds up while the Executor
    1588            0 :         // executes the last batch.
    1589            0 :         //
    1590            0 :         // CANCELLATION
    1591            0 :         //
    1592            0 :         // We run both Batcher and Executor futures to completion before
    1593            0 :         // returning from this function.
    1594            0 :         //
    1595            0 :         // If Executor exits first, it signals cancellation to the Batcher
    1596            0 :         // via a CancellationToken that is child of `self.cancel`.
    1597            0 :         // If Batcher exits first, it signals cancellation to the Executor
    1598            0 :         // by dropping the spsc_fold channel Sender.
    1599            0 :         //
    1600            0 :         // CLEAN SHUTDOWN
    1601            0 :         //
    1602            0 :         // Clean shutdown means that the client ends the COPYBOTH session.
    1603            0 :         // In response to such a client message, the Batcher exits.
    1604            0 :         // The Executor continues to run, draining the spsc_fold channel.
    1605            0 :         // Once drained, the spsc_fold recv will fail with a distinct error
    1606            0 :         // indicating that the sender disconnected.
    1607            0 :         // The Executor exits with Ok(()) in response to that error.
    1608            0 :         //
    1609            0 :         // Server initiated shutdown is not clean shutdown, but instead
    1610            0 :         // is an error Err(QueryError::Shutdown) that is propagated through
    1611            0 :         // error propagation.
    1612            0 :         //
    1613            0 :         // ERROR PROPAGATION
    1614            0 :         //
    1615            0 :         // When the Batcher encounter an error, it sends it as a value
    1616            0 :         // through the spsc_fold channel and exits afterwards.
    1617            0 :         // When the Executor observes such an error in the channel,
    1618            0 :         // it exits returning that error value.
    1619            0 :         //
    1620            0 :         // This design ensures that the Executor stage will still process
    1621            0 :         // the batch that was in flight when the Batcher encountered an error,
    1622            0 :         // thereby beahving identical to a serial implementation.
    1623            0 : 
    1624            0 :         let PageServicePipeliningConfigPipelined {
    1625            0 :             max_batch_size,
    1626            0 :             execution,
    1627            0 :         } = pipelining_config;
    1628              : 
    1629              :         // Macro to _define_ a pipeline stage.
    1630              :         macro_rules! pipeline_stage {
    1631              :             ($name:literal, $cancel:expr, $make_fut:expr) => {{
    1632              :                 let cancel: CancellationToken = $cancel;
    1633              :                 let stage_fut = $make_fut(cancel.clone());
    1634            0 :                 async move {
    1635            0 :                     scopeguard::defer! {
    1636            0 :                         debug!("exiting");
    1637            0 :                     }
    1638            0 :                     timed_after_cancellation(stage_fut, $name, Duration::from_millis(100), &cancel)
    1639            0 :                         .await
    1640            0 :                 }
    1641              :                 .instrument(tracing::info_span!($name))
    1642              :             }};
    1643              :         }
    1644              : 
    1645              :         //
    1646              :         // Batcher
    1647              :         //
    1648              : 
    1649            0 :         let cancel_batcher = self.cancel.child_token();
    1650            0 :         let (mut batch_tx, mut batch_rx) = spsc_fold::channel();
    1651            0 :         let batcher = pipeline_stage!("batcher", cancel_batcher.clone(), move |cancel_batcher| {
    1652            0 :             let ctx = ctx.attached_child();
    1653            0 :             async move {
    1654            0 :                 let mut pgb_reader = pgb_reader;
    1655            0 :                 let mut exit = false;
    1656            0 :                 while !exit {
    1657            0 :                     let read_res = Self::pagestream_read_message(
    1658            0 :                         &mut pgb_reader,
    1659            0 :                         tenant_id,
    1660            0 :                         timeline_id,
    1661            0 :                         &mut timeline_handles,
    1662            0 :                         &cancel_batcher,
    1663            0 :                         &ctx,
    1664            0 :                         protocol_version,
    1665            0 :                         request_span.clone(),
    1666            0 :                     )
    1667            0 :                     .await;
    1668            0 :                     let Some(read_res) = read_res.transpose() else {
    1669            0 :                         debug!("client-initiated shutdown");
    1670            0 :                         break;
    1671              :                     };
    1672            0 :                     exit |= read_res.is_err();
    1673            0 :                     let could_send = batch_tx
    1674            0 :                         .send(read_res, |batch, res| {
    1675            0 :                             Self::pagestream_do_batch(max_batch_size, batch, res)
    1676            0 :                         })
    1677            0 :                         .await;
    1678            0 :                     exit |= could_send.is_err();
    1679              :                 }
    1680            0 :                 (pgb_reader, timeline_handles)
    1681            0 :             }
    1682            0 :         });
    1683              : 
    1684              :         //
    1685              :         // Executor
    1686              :         //
    1687              : 
    1688            0 :         let executor = pipeline_stage!("executor", self.cancel.clone(), move |cancel| {
    1689            0 :             let ctx = ctx.attached_child();
    1690            0 :             async move {
    1691            0 :                 let _cancel_batcher = cancel_batcher.drop_guard();
    1692              :                 loop {
    1693            0 :                     let maybe_batch = batch_rx.recv().await;
    1694            0 :                     let batch = match maybe_batch {
    1695            0 :                         Ok(batch) => batch,
    1696              :                         Err(spsc_fold::RecvError::SenderGone) => {
    1697            0 :                             debug!("upstream gone");
    1698            0 :                             return Ok(());
    1699              :                         }
    1700              :                     };
    1701            0 :                     let batch = match batch {
    1702            0 :                         Ok(batch) => batch,
    1703            0 :                         Err(e) => {
    1704            0 :                             return Err(e);
    1705              :                         }
    1706              :                     };
    1707            0 :                     self.pagesteam_handle_batched_message(
    1708            0 :                         pgb_writer,
    1709            0 :                         batch,
    1710            0 :                         io_concurrency.clone(),
    1711            0 :                         &cancel,
    1712            0 :                         protocol_version,
    1713            0 :                         &ctx,
    1714            0 :                     )
    1715            0 :                     .await?;
    1716              :                 }
    1717            0 :             }
    1718            0 :         });
    1719              : 
    1720              :         //
    1721              :         // Execute the stages.
    1722              :         //
    1723              : 
    1724            0 :         match execution {
    1725              :             PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
    1726            0 :                 tokio::join!(batcher, executor)
    1727              :             }
    1728              :             PageServiceProtocolPipelinedExecutionStrategy::Tasks => {
    1729              :                 // These tasks are not tracked anywhere.
    1730            0 :                 let read_messages_task = tokio::spawn(batcher);
    1731            0 :                 let (read_messages_task_res, executor_res_) =
    1732            0 :                     tokio::join!(read_messages_task, executor,);
    1733            0 :                 (
    1734            0 :                     read_messages_task_res.expect("propagated panic from read_messages"),
    1735            0 :                     executor_res_,
    1736            0 :                 )
    1737              :             }
    1738              :         }
    1739            0 :     }
    1740              : 
    1741              :     /// Helper function to handle the LSN from client request.
    1742              :     ///
    1743              :     /// Each GetPage (and Exists and Nblocks) request includes information about
    1744              :     /// which version of the page is being requested. The primary compute node
    1745              :     /// will always request the latest page version, by setting 'request_lsn' to
    1746              :     /// the last inserted or flushed WAL position, while a standby will request
    1747              :     /// a version at the LSN that it's currently caught up to.
    1748              :     ///
    1749              :     /// In either case, if the page server hasn't received the WAL up to the
    1750              :     /// requested LSN yet, we will wait for it to arrive. The return value is
    1751              :     /// the LSN that should be used to look up the page versions.
    1752              :     ///
    1753              :     /// In addition to the request LSN, each request carries another LSN,
    1754              :     /// 'not_modified_since', which is a hint to the pageserver that the client
    1755              :     /// knows that the page has not been modified between 'not_modified_since'
    1756              :     /// and the request LSN. This allows skipping the wait, as long as the WAL
    1757              :     /// up to 'not_modified_since' has arrived. If the client doesn't have any
    1758              :     /// information about when the page was modified, it will use
    1759              :     /// not_modified_since == lsn. If the client lies and sends a too low
    1760              :     /// not_modified_hint such that there are in fact later page versions, the
    1761              :     /// behavior is undefined: the pageserver may return any of the page versions
    1762              :     /// or an error.
    1763            0 :     async fn wait_or_get_last_lsn(
    1764            0 :         timeline: &Timeline,
    1765            0 :         request_lsn: Lsn,
    1766            0 :         not_modified_since: Lsn,
    1767            0 :         latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
    1768            0 :         ctx: &RequestContext,
    1769            0 :     ) -> Result<Lsn, PageStreamError> {
    1770            0 :         let last_record_lsn = timeline.get_last_record_lsn();
    1771            0 : 
    1772            0 :         // Sanity check the request
    1773            0 :         if request_lsn < not_modified_since {
    1774            0 :             return Err(PageStreamError::BadRequest(
    1775            0 :                 format!(
    1776            0 :                     "invalid request with request LSN {} and not_modified_since {}",
    1777            0 :                     request_lsn, not_modified_since,
    1778            0 :                 )
    1779            0 :                 .into(),
    1780            0 :             ));
    1781            0 :         }
    1782            0 : 
    1783            0 :         // Check explicitly for INVALID just to get a less scary error message if the request is obviously bogus
    1784            0 :         if request_lsn == Lsn::INVALID {
    1785            0 :             return Err(PageStreamError::BadRequest(
    1786            0 :                 "invalid LSN(0) in request".into(),
    1787            0 :             ));
    1788            0 :         }
    1789            0 : 
    1790            0 :         // Clients should only read from recent LSNs on their timeline, or from locations holding an LSN lease.
    1791            0 :         //
    1792            0 :         // We may have older data available, but we make a best effort to detect this case and return an error,
    1793            0 :         // to distinguish a misbehaving client (asking for old LSN) from a storage issue (data missing at a legitimate LSN).
    1794            0 :         if request_lsn < **latest_gc_cutoff_lsn && !timeline.is_gc_blocked_by_lsn_lease_deadline() {
    1795            0 :             let gc_info = &timeline.gc_info.read().unwrap();
    1796            0 :             if !gc_info.lsn_covered_by_lease(request_lsn) {
    1797            0 :                 return Err(
    1798            0 :                     PageStreamError::BadRequest(format!(
    1799            0 :                         "tried to request a page version that was garbage collected. requested at {} gc cutoff {}",
    1800            0 :                         request_lsn, **latest_gc_cutoff_lsn
    1801            0 :                     ).into())
    1802            0 :                 );
    1803            0 :             }
    1804            0 :         }
    1805              : 
    1806              :         // Wait for WAL up to 'not_modified_since' to arrive, if necessary
    1807            0 :         if not_modified_since > last_record_lsn {
    1808            0 :             timeline
    1809            0 :                 .wait_lsn(
    1810            0 :                     not_modified_since,
    1811            0 :                     crate::tenant::timeline::WaitLsnWaiter::PageService,
    1812            0 :                     timeline::WaitLsnTimeout::Default,
    1813            0 :                     ctx,
    1814            0 :                 )
    1815            0 :                 .await?;
    1816              :             // Since we waited for 'not_modified_since' to arrive, that is now the last
    1817              :             // record LSN. (Or close enough for our purposes; the last-record LSN can
    1818              :             // advance immediately after we return anyway)
    1819            0 :             Ok(not_modified_since)
    1820              :         } else {
    1821              :             // It might be better to use max(not_modified_since, latest_gc_cutoff_lsn)
    1822              :             // here instead. That would give the same result, since we know that there
    1823              :             // haven't been any modifications since 'not_modified_since'. Using an older
    1824              :             // LSN might be faster, because that could allow skipping recent layers when
    1825              :             // finding the page. However, we have historically used 'last_record_lsn', so
    1826              :             // stick to that for now.
    1827            0 :             Ok(std::cmp::min(last_record_lsn, request_lsn))
    1828              :         }
    1829            0 :     }
    1830              : 
    1831              :     /// Handles the lsn lease request.
    1832              :     /// If a lease cannot be obtained, the client will receive NULL.
    1833              :     #[instrument(skip_all, fields(shard_id, %lsn))]
    1834              :     async fn handle_make_lsn_lease<IO>(
    1835              :         &mut self,
    1836              :         pgb: &mut PostgresBackend<IO>,
    1837              :         tenant_shard_id: TenantShardId,
    1838              :         timeline_id: TimelineId,
    1839              :         lsn: Lsn,
    1840              :         ctx: &RequestContext,
    1841              :     ) -> Result<(), QueryError>
    1842              :     where
    1843              :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
    1844              :     {
    1845              :         let timeline = self
    1846              :             .timeline_handles
    1847              :             .as_mut()
    1848              :             .unwrap()
    1849              :             .get(
    1850              :                 tenant_shard_id.tenant_id,
    1851              :                 timeline_id,
    1852              :                 ShardSelector::Known(tenant_shard_id.to_index()),
    1853              :             )
    1854              :             .await?;
    1855              :         set_tracing_field_shard_id(&timeline);
    1856              : 
    1857              :         let lease = timeline
    1858              :             .renew_lsn_lease(lsn, timeline.get_lsn_lease_length(), ctx)
    1859            0 :             .inspect_err(|e| {
    1860            0 :                 warn!("{e}");
    1861            0 :             })
    1862              :             .ok();
    1863            0 :         let valid_until_str = lease.map(|l| {
    1864            0 :             l.valid_until
    1865            0 :                 .duration_since(SystemTime::UNIX_EPOCH)
    1866            0 :                 .expect("valid_until is earlier than UNIX_EPOCH")
    1867            0 :                 .as_millis()
    1868            0 :                 .to_string()
    1869            0 :         });
    1870              : 
    1871              :         info!(
    1872              :             "acquired lease for {} until {}",
    1873              :             lsn,
    1874              :             valid_until_str.as_deref().unwrap_or("<unknown>")
    1875              :         );
    1876              : 
    1877            0 :         let bytes = valid_until_str.as_ref().map(|x| x.as_bytes());
    1878              : 
    1879              :         pgb.write_message_noflush(&BeMessage::RowDescription(&[RowDescriptor::text_col(
    1880              :             b"valid_until",
    1881              :         )]))?
    1882              :         .write_message_noflush(&BeMessage::DataRow(&[bytes]))?;
    1883              : 
    1884              :         Ok(())
    1885              :     }
    1886              : 
    1887              :     #[instrument(skip_all, fields(shard_id))]
    1888              :     async fn handle_get_rel_exists_request(
    1889              :         &mut self,
    1890              :         timeline: &Timeline,
    1891              :         req: &PagestreamExistsRequest,
    1892              :         ctx: &RequestContext,
    1893              :     ) -> Result<PagestreamBeMessage, PageStreamError> {
    1894              :         let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
    1895              :         let lsn = Self::wait_or_get_last_lsn(
    1896              :             timeline,
    1897              :             req.hdr.request_lsn,
    1898              :             req.hdr.not_modified_since,
    1899              :             &latest_gc_cutoff_lsn,
    1900              :             ctx,
    1901              :         )
    1902              :         .await?;
    1903              : 
    1904              :         let exists = timeline
    1905              :             .get_rel_exists(req.rel, Version::Lsn(lsn), ctx)
    1906              :             .await?;
    1907              : 
    1908              :         Ok(PagestreamBeMessage::Exists(PagestreamExistsResponse {
    1909              :             req: *req,
    1910              :             exists,
    1911              :         }))
    1912              :     }
    1913              : 
    1914              :     #[instrument(skip_all, fields(shard_id))]
    1915              :     async fn handle_get_nblocks_request(
    1916              :         &mut self,
    1917              :         timeline: &Timeline,
    1918              :         req: &PagestreamNblocksRequest,
    1919              :         ctx: &RequestContext,
    1920              :     ) -> Result<PagestreamBeMessage, PageStreamError> {
    1921              :         let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
    1922              :         let lsn = Self::wait_or_get_last_lsn(
    1923              :             timeline,
    1924              :             req.hdr.request_lsn,
    1925              :             req.hdr.not_modified_since,
    1926              :             &latest_gc_cutoff_lsn,
    1927              :             ctx,
    1928              :         )
    1929              :         .await?;
    1930              : 
    1931              :         let n_blocks = timeline
    1932              :             .get_rel_size(req.rel, Version::Lsn(lsn), ctx)
    1933              :             .await?;
    1934              : 
    1935              :         Ok(PagestreamBeMessage::Nblocks(PagestreamNblocksResponse {
    1936              :             req: *req,
    1937              :             n_blocks,
    1938              :         }))
    1939              :     }
    1940              : 
    1941              :     #[instrument(skip_all, fields(shard_id))]
    1942              :     async fn handle_db_size_request(
    1943              :         &mut self,
    1944              :         timeline: &Timeline,
    1945              :         req: &PagestreamDbSizeRequest,
    1946              :         ctx: &RequestContext,
    1947              :     ) -> Result<PagestreamBeMessage, PageStreamError> {
    1948              :         let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
    1949              :         let lsn = Self::wait_or_get_last_lsn(
    1950              :             timeline,
    1951              :             req.hdr.request_lsn,
    1952              :             req.hdr.not_modified_since,
    1953              :             &latest_gc_cutoff_lsn,
    1954              :             ctx,
    1955              :         )
    1956              :         .await?;
    1957              : 
    1958              :         let total_blocks = timeline
    1959              :             .get_db_size(DEFAULTTABLESPACE_OID, req.dbnode, Version::Lsn(lsn), ctx)
    1960              :             .await?;
    1961              :         let db_size = total_blocks as i64 * BLCKSZ as i64;
    1962              : 
    1963              :         Ok(PagestreamBeMessage::DbSize(PagestreamDbSizeResponse {
    1964              :             req: *req,
    1965              :             db_size,
    1966              :         }))
    1967              :     }
    1968              : 
    1969              :     #[instrument(skip_all)]
    1970              :     async fn handle_get_page_at_lsn_request_batched(
    1971              :         &mut self,
    1972              :         timeline: &Timeline,
    1973              :         effective_lsn: Lsn,
    1974              :         requests: smallvec::SmallVec<[BatchedGetPageRequest; 1]>,
    1975              :         io_concurrency: IoConcurrency,
    1976              :         ctx: &RequestContext,
    1977              :     ) -> Vec<Result<(PagestreamBeMessage, SmgrOpTimer), BatchedPageStreamError>> {
    1978              :         debug_assert_current_span_has_tenant_and_timeline_id();
    1979              : 
    1980              :         timeline
    1981              :             .query_metrics
    1982              :             .observe_getpage_batch_start(requests.len());
    1983              : 
    1984              :         // If a page trace is running, submit an event for this request.
    1985              :         if let Some(page_trace) = timeline.page_trace.load().as_ref() {
    1986              :             let time = SystemTime::now();
    1987              :             for batch in &requests {
    1988              :                 let key = rel_block_to_key(batch.req.rel, batch.req.blkno).to_compact();
    1989              :                 // Ignore error (trace buffer may be full or tracer may have disconnected).
    1990              :                 _ = page_trace.try_send(PageTraceEvent {
    1991              :                     key,
    1992              :                     effective_lsn,
    1993              :                     time,
    1994              :                 });
    1995              :             }
    1996              :         }
    1997              : 
    1998              :         let results = timeline
    1999              :             .get_rel_page_at_lsn_batched(
    2000            0 :                 requests.iter().map(|p| (&p.req.rel, &p.req.blkno)),
    2001              :                 effective_lsn,
    2002              :                 io_concurrency,
    2003              :                 ctx,
    2004              :             )
    2005              :             .await;
    2006              :         assert_eq!(results.len(), requests.len());
    2007              : 
    2008              :         // TODO: avoid creating the new Vec here
    2009              :         Vec::from_iter(
    2010              :             requests
    2011              :                 .into_iter()
    2012              :                 .zip(results.into_iter())
    2013            0 :                 .map(|(req, res)| {
    2014            0 :                     res.map(|page| {
    2015            0 :                         (
    2016            0 :                             PagestreamBeMessage::GetPage(models::PagestreamGetPageResponse {
    2017            0 :                                 req: req.req,
    2018            0 :                                 page,
    2019            0 :                             }),
    2020            0 :                             req.timer,
    2021            0 :                         )
    2022            0 :                     })
    2023            0 :                     .map_err(|e| BatchedPageStreamError {
    2024            0 :                         err: PageStreamError::from(e),
    2025            0 :                         req: req.req.hdr,
    2026            0 :                     })
    2027            0 :                 }),
    2028              :         )
    2029              :     }
    2030              : 
    2031              :     #[instrument(skip_all, fields(shard_id))]
    2032              :     async fn handle_get_slru_segment_request(
    2033              :         &mut self,
    2034              :         timeline: &Timeline,
    2035              :         req: &PagestreamGetSlruSegmentRequest,
    2036              :         ctx: &RequestContext,
    2037              :     ) -> Result<PagestreamBeMessage, PageStreamError> {
    2038              :         let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
    2039              :         let lsn = Self::wait_or_get_last_lsn(
    2040              :             timeline,
    2041              :             req.hdr.request_lsn,
    2042              :             req.hdr.not_modified_since,
    2043              :             &latest_gc_cutoff_lsn,
    2044              :             ctx,
    2045              :         )
    2046              :         .await?;
    2047              : 
    2048              :         let kind = SlruKind::from_repr(req.kind)
    2049              :             .ok_or(PageStreamError::BadRequest("invalid SLRU kind".into()))?;
    2050              :         let segment = timeline.get_slru_segment(kind, req.segno, lsn, ctx).await?;
    2051              : 
    2052              :         Ok(PagestreamBeMessage::GetSlruSegment(
    2053              :             PagestreamGetSlruSegmentResponse { req: *req, segment },
    2054              :         ))
    2055              :     }
    2056              : 
    2057              :     // NB: this impl mimics what we do for batched getpage requests.
    2058              :     #[cfg(feature = "testing")]
    2059              :     #[instrument(skip_all, fields(shard_id))]
    2060              :     async fn handle_test_request_batch(
    2061              :         &mut self,
    2062              :         timeline: &Timeline,
    2063              :         requests: Vec<BatchedTestRequest>,
    2064              :         _ctx: &RequestContext,
    2065              :     ) -> Vec<Result<(PagestreamBeMessage, SmgrOpTimer), BatchedPageStreamError>> {
    2066              :         // real requests would do something with the timeline
    2067              :         let mut results = Vec::with_capacity(requests.len());
    2068              :         for _req in requests.iter() {
    2069              :             tokio::task::yield_now().await;
    2070              : 
    2071              :             results.push({
    2072              :                 if timeline.cancel.is_cancelled() {
    2073              :                     Err(PageReconstructError::Cancelled)
    2074              :                 } else {
    2075              :                     Ok(())
    2076              :                 }
    2077              :             });
    2078              :         }
    2079              : 
    2080              :         // TODO: avoid creating the new Vec here
    2081              :         Vec::from_iter(
    2082              :             requests
    2083              :                 .into_iter()
    2084              :                 .zip(results.into_iter())
    2085            0 :                 .map(|(req, res)| {
    2086            0 :                     res.map(|()| {
    2087            0 :                         (
    2088            0 :                             PagestreamBeMessage::Test(models::PagestreamTestResponse {
    2089            0 :                                 req: req.req.clone(),
    2090            0 :                             }),
    2091            0 :                             req.timer,
    2092            0 :                         )
    2093            0 :                     })
    2094            0 :                     .map_err(|e| BatchedPageStreamError {
    2095            0 :                         err: PageStreamError::from(e),
    2096            0 :                         req: req.req.hdr,
    2097            0 :                     })
    2098            0 :                 }),
    2099              :         )
    2100              :     }
    2101              : 
    2102              :     /// Note on "fullbackup":
    2103              :     /// Full basebackups should only be used for debugging purposes.
    2104              :     /// Originally, it was introduced to enable breaking storage format changes,
    2105              :     /// but that is not applicable anymore.
    2106              :     ///
    2107              :     /// # Coding Discipline
    2108              :     ///
    2109              :     /// Coding discipline within this function: all interaction with the `pgb` connection
    2110              :     /// needs to be sensitive to connection shutdown, currently signalled via [`Self::cancel`].
    2111              :     /// This is so that we can shutdown page_service quickly.
    2112              :     ///
    2113              :     /// TODO: wrap the pgb that we pass to the basebackup handler so that it's sensitive
    2114              :     /// to connection cancellation.
    2115              :     #[allow(clippy::too_many_arguments)]
    2116              :     #[instrument(skip_all, fields(shard_id, ?lsn, ?prev_lsn, %full_backup))]
    2117              :     async fn handle_basebackup_request<IO>(
    2118              :         &mut self,
    2119              :         pgb: &mut PostgresBackend<IO>,
    2120              :         tenant_id: TenantId,
    2121              :         timeline_id: TimelineId,
    2122              :         lsn: Option<Lsn>,
    2123              :         prev_lsn: Option<Lsn>,
    2124              :         full_backup: bool,
    2125              :         gzip: bool,
    2126              :         replica: bool,
    2127              :         ctx: &RequestContext,
    2128              :     ) -> Result<(), QueryError>
    2129              :     where
    2130              :         IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
    2131              :     {
    2132            0 :         fn map_basebackup_error(err: BasebackupError) -> QueryError {
    2133            0 :             match err {
    2134              :                 // TODO: passthrough the error site to the final error message?
    2135            0 :                 BasebackupError::Client(e, _) => QueryError::Disconnected(ConnectionError::Io(e)),
    2136            0 :                 BasebackupError::Server(e) => QueryError::Other(e),
    2137            0 :                 BasebackupError::Shutdown => QueryError::Shutdown,
    2138              :             }
    2139            0 :         }
    2140              : 
    2141              :         let started = std::time::Instant::now();
    2142              : 
    2143              :         let timeline = self
    2144              :             .timeline_handles
    2145              :             .as_mut()
    2146              :             .unwrap()
    2147              :             .get(tenant_id, timeline_id, ShardSelector::Zero)
    2148              :             .await?;
    2149              :         set_tracing_field_shard_id(&timeline);
    2150              :         let ctx = ctx.with_scope_timeline(&timeline);
    2151              : 
    2152              :         if timeline.is_archived() == Some(true) {
    2153              :             tracing::info!(
    2154              :                 "timeline {tenant_id}/{timeline_id} is archived, but got basebackup request for it."
    2155              :             );
    2156              :             return Err(QueryError::NotFound("timeline is archived".into()));
    2157              :         }
    2158              : 
    2159              :         let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
    2160              :         if let Some(lsn) = lsn {
    2161              :             // Backup was requested at a particular LSN. Wait for it to arrive.
    2162              :             info!("waiting for {}", lsn);
    2163              :             timeline
    2164              :                 .wait_lsn(
    2165              :                     lsn,
    2166              :                     crate::tenant::timeline::WaitLsnWaiter::PageService,
    2167              :                     crate::tenant::timeline::WaitLsnTimeout::Default,
    2168              :                     &ctx,
    2169              :                 )
    2170              :                 .await?;
    2171              :             timeline
    2172              :                 .check_lsn_is_in_scope(lsn, &latest_gc_cutoff_lsn)
    2173              :                 .context("invalid basebackup lsn")?;
    2174              :         }
    2175              : 
    2176              :         let lsn_awaited_after = started.elapsed();
    2177              : 
    2178              :         // switch client to COPYOUT
    2179              :         pgb.write_message_noflush(&BeMessage::CopyOutResponse)
    2180              :             .map_err(QueryError::Disconnected)?;
    2181              :         self.flush_cancellable(pgb, &self.cancel).await?;
    2182              : 
    2183              :         // Send a tarball of the latest layer on the timeline. Compress if not
    2184              :         // fullbackup. TODO Compress in that case too (tests need to be updated)
    2185              :         if full_backup {
    2186              :             let mut writer = pgb.copyout_writer();
    2187              :             basebackup::send_basebackup_tarball(
    2188              :                 &mut writer,
    2189              :                 &timeline,
    2190              :                 lsn,
    2191              :                 prev_lsn,
    2192              :                 full_backup,
    2193              :                 replica,
    2194              :                 &ctx,
    2195              :             )
    2196              :             .await
    2197              :             .map_err(map_basebackup_error)?;
    2198              :         } else {
    2199              :             let mut writer = BufWriter::new(pgb.copyout_writer());
    2200              :             if gzip {
    2201              :                 let mut encoder = GzipEncoder::with_quality(
    2202              :                     &mut writer,
    2203              :                     // NOTE using fast compression because it's on the critical path
    2204              :                     //      for compute startup. For an empty database, we get
    2205              :                     //      <100KB with this method. The Level::Best compression method
    2206              :                     //      gives us <20KB, but maybe we should add basebackup caching
    2207              :                     //      on compute shutdown first.
    2208              :                     async_compression::Level::Fastest,
    2209              :                 );
    2210              :                 basebackup::send_basebackup_tarball(
    2211              :                     &mut encoder,
    2212              :                     &timeline,
    2213              :                     lsn,
    2214              :                     prev_lsn,
    2215              :                     full_backup,
    2216              :                     replica,
    2217              :                     &ctx,
    2218              :                 )
    2219              :                 .await
    2220              :                 .map_err(map_basebackup_error)?;
    2221              :                 // shutdown the encoder to ensure the gzip footer is written
    2222              :                 encoder
    2223              :                     .shutdown()
    2224              :                     .await
    2225            0 :                     .map_err(|e| QueryError::Disconnected(ConnectionError::Io(e)))?;
    2226              :             } else {
    2227              :                 basebackup::send_basebackup_tarball(
    2228              :                     &mut writer,
    2229              :                     &timeline,
    2230              :                     lsn,
    2231              :                     prev_lsn,
    2232              :                     full_backup,
    2233              :                     replica,
    2234              :                     &ctx,
    2235              :                 )
    2236              :                 .await
    2237              :                 .map_err(map_basebackup_error)?;
    2238              :             }
    2239            0 :             writer.flush().await.map_err(|e| {
    2240            0 :                 map_basebackup_error(BasebackupError::Client(
    2241            0 :                     e,
    2242            0 :                     "handle_basebackup_request,flush",
    2243            0 :                 ))
    2244            0 :             })?;
    2245              :         }
    2246              : 
    2247              :         pgb.write_message_noflush(&BeMessage::CopyDone)
    2248              :             .map_err(QueryError::Disconnected)?;
    2249              :         self.flush_cancellable(pgb, &timeline.cancel).await?;
    2250              : 
    2251              :         let basebackup_after = started
    2252              :             .elapsed()
    2253              :             .checked_sub(lsn_awaited_after)
    2254              :             .unwrap_or(Duration::ZERO);
    2255              : 
    2256              :         info!(
    2257              :             lsn_await_millis = lsn_awaited_after.as_millis(),
    2258              :             basebackup_millis = basebackup_after.as_millis(),
    2259              :             "basebackup complete"
    2260              :         );
    2261              : 
    2262              :         Ok(())
    2263              :     }
    2264              : 
    2265              :     // when accessing management api supply None as an argument
    2266              :     // when using to authorize tenant pass corresponding tenant id
    2267            0 :     fn check_permission(&self, tenant_id: Option<TenantId>) -> Result<(), QueryError> {
    2268            0 :         if self.auth.is_none() {
    2269              :             // auth is set to Trust, nothing to check so just return ok
    2270            0 :             return Ok(());
    2271            0 :         }
    2272            0 :         // auth is some, just checked above, when auth is some
    2273            0 :         // then claims are always present because of checks during connection init
    2274            0 :         // so this expect won't trigger
    2275            0 :         let claims = self
    2276            0 :             .claims
    2277            0 :             .as_ref()
    2278            0 :             .expect("claims presence already checked");
    2279            0 :         check_permission(claims, tenant_id).map_err(|e| QueryError::Unauthorized(e.0))
    2280            0 :     }
    2281              : }
    2282              : 
    2283              : /// `basebackup tenant timeline [lsn] [--gzip] [--replica]`
    2284              : #[derive(Debug, Clone, Eq, PartialEq)]
    2285              : struct BaseBackupCmd {
    2286              :     tenant_id: TenantId,
    2287              :     timeline_id: TimelineId,
    2288              :     lsn: Option<Lsn>,
    2289              :     gzip: bool,
    2290              :     replica: bool,
    2291              : }
    2292              : 
    2293              : /// `fullbackup tenant timeline [lsn] [prev_lsn]`
    2294              : #[derive(Debug, Clone, Eq, PartialEq)]
    2295              : struct FullBackupCmd {
    2296              :     tenant_id: TenantId,
    2297              :     timeline_id: TimelineId,
    2298              :     lsn: Option<Lsn>,
    2299              :     prev_lsn: Option<Lsn>,
    2300              : }
    2301              : 
    2302              : /// `pagestream_v2 tenant timeline`
    2303              : #[derive(Debug, Clone, Eq, PartialEq)]
    2304              : struct PageStreamCmd {
    2305              :     tenant_id: TenantId,
    2306              :     timeline_id: TimelineId,
    2307              :     protocol_version: PagestreamProtocolVersion,
    2308              : }
    2309              : 
    2310              : /// `lease lsn tenant timeline lsn`
    2311              : #[derive(Debug, Clone, Eq, PartialEq)]
    2312              : struct LeaseLsnCmd {
    2313              :     tenant_shard_id: TenantShardId,
    2314              :     timeline_id: TimelineId,
    2315              :     lsn: Lsn,
    2316              : }
    2317              : 
    2318              : #[derive(Debug, Clone, Eq, PartialEq)]
    2319              : enum PageServiceCmd {
    2320              :     Set,
    2321              :     PageStream(PageStreamCmd),
    2322              :     BaseBackup(BaseBackupCmd),
    2323              :     FullBackup(FullBackupCmd),
    2324              :     LeaseLsn(LeaseLsnCmd),
    2325              : }
    2326              : 
    2327              : impl PageStreamCmd {
    2328           12 :     fn parse(query: &str, protocol_version: PagestreamProtocolVersion) -> anyhow::Result<Self> {
    2329           12 :         let parameters = query.split_whitespace().collect_vec();
    2330           12 :         if parameters.len() != 2 {
    2331            4 :             bail!(
    2332            4 :                 "invalid number of parameters for pagestream command: {}",
    2333            4 :                 query
    2334            4 :             );
    2335            8 :         }
    2336            8 :         let tenant_id = TenantId::from_str(parameters[0])
    2337            8 :             .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
    2338            4 :         let timeline_id = TimelineId::from_str(parameters[1])
    2339            4 :             .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
    2340            4 :         Ok(Self {
    2341            4 :             tenant_id,
    2342            4 :             timeline_id,
    2343            4 :             protocol_version,
    2344            4 :         })
    2345           12 :     }
    2346              : }
    2347              : 
    2348              : impl FullBackupCmd {
    2349            8 :     fn parse(query: &str) -> anyhow::Result<Self> {
    2350            8 :         let parameters = query.split_whitespace().collect_vec();
    2351            8 :         if parameters.len() < 2 || parameters.len() > 4 {
    2352            0 :             bail!(
    2353            0 :                 "invalid number of parameters for basebackup command: {}",
    2354            0 :                 query
    2355            0 :             );
    2356            8 :         }
    2357            8 :         let tenant_id = TenantId::from_str(parameters[0])
    2358            8 :             .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
    2359            8 :         let timeline_id = TimelineId::from_str(parameters[1])
    2360            8 :             .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
    2361              :         // The caller is responsible for providing correct lsn and prev_lsn.
    2362            8 :         let lsn = if let Some(lsn_str) = parameters.get(2) {
    2363              :             Some(
    2364            4 :                 Lsn::from_str(lsn_str)
    2365            4 :                     .with_context(|| format!("Failed to parse Lsn from {lsn_str}"))?,
    2366              :             )
    2367              :         } else {
    2368            4 :             None
    2369              :         };
    2370            8 :         let prev_lsn = if let Some(prev_lsn_str) = parameters.get(3) {
    2371              :             Some(
    2372            4 :                 Lsn::from_str(prev_lsn_str)
    2373            4 :                     .with_context(|| format!("Failed to parse Lsn from {prev_lsn_str}"))?,
    2374              :             )
    2375              :         } else {
    2376            4 :             None
    2377              :         };
    2378            8 :         Ok(Self {
    2379            8 :             tenant_id,
    2380            8 :             timeline_id,
    2381            8 :             lsn,
    2382            8 :             prev_lsn,
    2383            8 :         })
    2384            8 :     }
    2385              : }
    2386              : 
    2387              : impl BaseBackupCmd {
    2388           36 :     fn parse(query: &str) -> anyhow::Result<Self> {
    2389           36 :         let parameters = query.split_whitespace().collect_vec();
    2390           36 :         if parameters.len() < 2 {
    2391            0 :             bail!(
    2392            0 :                 "invalid number of parameters for basebackup command: {}",
    2393            0 :                 query
    2394            0 :             );
    2395           36 :         }
    2396           36 :         let tenant_id = TenantId::from_str(parameters[0])
    2397           36 :             .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
    2398           36 :         let timeline_id = TimelineId::from_str(parameters[1])
    2399           36 :             .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
    2400              :         let lsn;
    2401              :         let flags_parse_from;
    2402           36 :         if let Some(maybe_lsn) = parameters.get(2) {
    2403           32 :             if *maybe_lsn == "latest" {
    2404            4 :                 lsn = None;
    2405            4 :                 flags_parse_from = 3;
    2406           28 :             } else if maybe_lsn.starts_with("--") {
    2407           20 :                 lsn = None;
    2408           20 :                 flags_parse_from = 2;
    2409           20 :             } else {
    2410              :                 lsn = Some(
    2411            8 :                     Lsn::from_str(maybe_lsn)
    2412            8 :                         .with_context(|| format!("Failed to parse lsn from {maybe_lsn}"))?,
    2413              :                 );
    2414            8 :                 flags_parse_from = 3;
    2415              :             }
    2416            4 :         } else {
    2417            4 :             lsn = None;
    2418            4 :             flags_parse_from = 2;
    2419            4 :         }
    2420              : 
    2421           36 :         let mut gzip = false;
    2422           36 :         let mut replica = false;
    2423              : 
    2424           44 :         for &param in &parameters[flags_parse_from..] {
    2425           44 :             match param {
    2426           44 :                 "--gzip" => {
    2427           28 :                     if gzip {
    2428            4 :                         bail!("duplicate parameter for basebackup command: {param}")
    2429           24 :                     }
    2430           24 :                     gzip = true
    2431              :                 }
    2432           16 :                 "--replica" => {
    2433            8 :                     if replica {
    2434            0 :                         bail!("duplicate parameter for basebackup command: {param}")
    2435            8 :                     }
    2436            8 :                     replica = true
    2437              :                 }
    2438            8 :                 _ => bail!("invalid parameter for basebackup command: {param}"),
    2439              :             }
    2440              :         }
    2441           24 :         Ok(Self {
    2442           24 :             tenant_id,
    2443           24 :             timeline_id,
    2444           24 :             lsn,
    2445           24 :             gzip,
    2446           24 :             replica,
    2447           24 :         })
    2448           36 :     }
    2449              : }
    2450              : 
    2451              : impl LeaseLsnCmd {
    2452            8 :     fn parse(query: &str) -> anyhow::Result<Self> {
    2453            8 :         let parameters = query.split_whitespace().collect_vec();
    2454            8 :         if parameters.len() != 3 {
    2455            0 :             bail!(
    2456            0 :                 "invalid number of parameters for lease lsn command: {}",
    2457            0 :                 query
    2458            0 :             );
    2459            8 :         }
    2460            8 :         let tenant_shard_id = TenantShardId::from_str(parameters[0])
    2461            8 :             .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
    2462            8 :         let timeline_id = TimelineId::from_str(parameters[1])
    2463            8 :             .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
    2464            8 :         let lsn = Lsn::from_str(parameters[2])
    2465            8 :             .with_context(|| format!("Failed to parse lsn from {}", parameters[2]))?;
    2466            8 :         Ok(Self {
    2467            8 :             tenant_shard_id,
    2468            8 :             timeline_id,
    2469            8 :             lsn,
    2470            8 :         })
    2471            8 :     }
    2472              : }
    2473              : 
    2474              : impl PageServiceCmd {
    2475           84 :     fn parse(query: &str) -> anyhow::Result<Self> {
    2476           84 :         let query = query.trim();
    2477           84 :         let Some((cmd, other)) = query.split_once(' ') else {
    2478            8 :             bail!("cannot parse query: {query}")
    2479              :         };
    2480           76 :         match cmd.to_ascii_lowercase().as_str() {
    2481           76 :             "pagestream_v2" => Ok(Self::PageStream(PageStreamCmd::parse(
    2482           12 :                 other,
    2483           12 :                 PagestreamProtocolVersion::V2,
    2484           12 :             )?)),
    2485           64 :             "pagestream_v3" => Ok(Self::PageStream(PageStreamCmd::parse(
    2486            0 :                 other,
    2487            0 :                 PagestreamProtocolVersion::V3,
    2488            0 :             )?)),
    2489           64 :             "basebackup" => Ok(Self::BaseBackup(BaseBackupCmd::parse(other)?)),
    2490           28 :             "fullbackup" => Ok(Self::FullBackup(FullBackupCmd::parse(other)?)),
    2491           20 :             "lease" => {
    2492           12 :                 let Some((cmd2, other)) = other.split_once(' ') else {
    2493            0 :                     bail!("invalid lease command: {cmd}");
    2494              :                 };
    2495           12 :                 let cmd2 = cmd2.to_ascii_lowercase();
    2496           12 :                 if cmd2 == "lsn" {
    2497            8 :                     Ok(Self::LeaseLsn(LeaseLsnCmd::parse(other)?))
    2498              :                 } else {
    2499            4 :                     bail!("invalid lease command: {cmd}");
    2500              :                 }
    2501              :             }
    2502            8 :             "set" => Ok(Self::Set),
    2503            0 :             _ => Err(anyhow::anyhow!("unsupported command {cmd} in {query}")),
    2504              :         }
    2505           84 :     }
    2506              : }
    2507              : 
    2508              : impl<IO> postgres_backend::Handler<IO> for PageServerHandler
    2509              : where
    2510              :     IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
    2511              : {
    2512            0 :     fn check_auth_jwt(
    2513            0 :         &mut self,
    2514            0 :         _pgb: &mut PostgresBackend<IO>,
    2515            0 :         jwt_response: &[u8],
    2516            0 :     ) -> Result<(), QueryError> {
    2517              :         // this unwrap is never triggered, because check_auth_jwt only called when auth_type is NeonJWT
    2518              :         // which requires auth to be present
    2519            0 :         let data = self
    2520            0 :             .auth
    2521            0 :             .as_ref()
    2522            0 :             .unwrap()
    2523            0 :             .decode(str::from_utf8(jwt_response).context("jwt response is not UTF-8")?)
    2524            0 :             .map_err(|e| QueryError::Unauthorized(e.0))?;
    2525              : 
    2526            0 :         if matches!(data.claims.scope, Scope::Tenant) && data.claims.tenant_id.is_none() {
    2527            0 :             return Err(QueryError::Unauthorized(
    2528            0 :                 "jwt token scope is Tenant, but tenant id is missing".into(),
    2529            0 :             ));
    2530            0 :         }
    2531            0 : 
    2532            0 :         debug!(
    2533            0 :             "jwt scope check succeeded for scope: {:#?} by tenant id: {:?}",
    2534              :             data.claims.scope, data.claims.tenant_id,
    2535              :         );
    2536              : 
    2537            0 :         self.claims = Some(data.claims);
    2538            0 :         Ok(())
    2539            0 :     }
    2540              : 
    2541            0 :     fn startup(
    2542            0 :         &mut self,
    2543            0 :         _pgb: &mut PostgresBackend<IO>,
    2544            0 :         sm: &FeStartupPacket,
    2545            0 :     ) -> Result<(), QueryError> {
    2546            0 :         fail::fail_point!("ps::connection-start::startup-packet");
    2547              : 
    2548            0 :         if let FeStartupPacket::StartupMessage { params, .. } = sm {
    2549            0 :             if let Some(app_name) = params.get("application_name") {
    2550            0 :                 Span::current().record("application_name", field::display(app_name));
    2551            0 :             }
    2552            0 :         };
    2553              : 
    2554            0 :         Ok(())
    2555            0 :     }
    2556              : 
    2557              :     #[instrument(skip_all, fields(tenant_id, timeline_id))]
    2558              :     async fn process_query(
    2559              :         &mut self,
    2560              :         pgb: &mut PostgresBackend<IO>,
    2561              :         query_string: &str,
    2562              :     ) -> Result<(), QueryError> {
    2563            0 :         fail::fail_point!("simulated-bad-compute-connection", |_| {
    2564            0 :             info!("Hit failpoint for bad connection");
    2565            0 :             Err(QueryError::SimulatedConnectionError)
    2566            0 :         });
    2567              : 
    2568              :         fail::fail_point!("ps::connection-start::process-query");
    2569              : 
    2570              :         let ctx = self.connection_ctx.attached_child();
    2571              :         debug!("process query {query_string}");
    2572              :         let query = PageServiceCmd::parse(query_string)?;
    2573              :         match query {
    2574              :             PageServiceCmd::PageStream(PageStreamCmd {
    2575              :                 tenant_id,
    2576              :                 timeline_id,
    2577              :                 protocol_version,
    2578              :             }) => {
    2579              :                 tracing::Span::current()
    2580              :                     .record("tenant_id", field::display(tenant_id))
    2581              :                     .record("timeline_id", field::display(timeline_id));
    2582              : 
    2583              :                 self.check_permission(Some(tenant_id))?;
    2584              :                 let command_kind = match protocol_version {
    2585              :                     PagestreamProtocolVersion::V2 => ComputeCommandKind::PageStreamV2,
    2586              :                     PagestreamProtocolVersion::V3 => ComputeCommandKind::PageStreamV3,
    2587              :                 };
    2588              :                 COMPUTE_COMMANDS_COUNTERS.for_command(command_kind).inc();
    2589              : 
    2590              :                 self.handle_pagerequests(pgb, tenant_id, timeline_id, protocol_version, ctx)
    2591              :                     .await?;
    2592              :             }
    2593              :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2594              :                 tenant_id,
    2595              :                 timeline_id,
    2596              :                 lsn,
    2597              :                 gzip,
    2598              :                 replica,
    2599              :             }) => {
    2600              :                 tracing::Span::current()
    2601              :                     .record("tenant_id", field::display(tenant_id))
    2602              :                     .record("timeline_id", field::display(timeline_id));
    2603              : 
    2604              :                 self.check_permission(Some(tenant_id))?;
    2605              : 
    2606              :                 COMPUTE_COMMANDS_COUNTERS
    2607              :                     .for_command(ComputeCommandKind::Basebackup)
    2608              :                     .inc();
    2609              :                 let metric_recording = metrics::BASEBACKUP_QUERY_TIME.start_recording();
    2610            0 :                 let res = async {
    2611            0 :                     self.handle_basebackup_request(
    2612            0 :                         pgb,
    2613            0 :                         tenant_id,
    2614            0 :                         timeline_id,
    2615            0 :                         lsn,
    2616            0 :                         None,
    2617            0 :                         false,
    2618            0 :                         gzip,
    2619            0 :                         replica,
    2620            0 :                         &ctx,
    2621            0 :                     )
    2622            0 :                     .await?;
    2623            0 :                     pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
    2624            0 :                     Result::<(), QueryError>::Ok(())
    2625            0 :                 }
    2626              :                 .await;
    2627              :                 metric_recording.observe(&res);
    2628              :                 res?;
    2629              :             }
    2630              :             // same as basebackup, but result includes relational data as well
    2631              :             PageServiceCmd::FullBackup(FullBackupCmd {
    2632              :                 tenant_id,
    2633              :                 timeline_id,
    2634              :                 lsn,
    2635              :                 prev_lsn,
    2636              :             }) => {
    2637              :                 tracing::Span::current()
    2638              :                     .record("tenant_id", field::display(tenant_id))
    2639              :                     .record("timeline_id", field::display(timeline_id));
    2640              : 
    2641              :                 self.check_permission(Some(tenant_id))?;
    2642              : 
    2643              :                 COMPUTE_COMMANDS_COUNTERS
    2644              :                     .for_command(ComputeCommandKind::Fullbackup)
    2645              :                     .inc();
    2646              : 
    2647              :                 // Check that the timeline exists
    2648              :                 self.handle_basebackup_request(
    2649              :                     pgb,
    2650              :                     tenant_id,
    2651              :                     timeline_id,
    2652              :                     lsn,
    2653              :                     prev_lsn,
    2654              :                     true,
    2655              :                     false,
    2656              :                     false,
    2657              :                     &ctx,
    2658              :                 )
    2659              :                 .await?;
    2660              :                 pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
    2661              :             }
    2662              :             PageServiceCmd::Set => {
    2663              :                 // important because psycopg2 executes "SET datestyle TO 'ISO'"
    2664              :                 // on connect
    2665              :                 pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
    2666              :             }
    2667              :             PageServiceCmd::LeaseLsn(LeaseLsnCmd {
    2668              :                 tenant_shard_id,
    2669              :                 timeline_id,
    2670              :                 lsn,
    2671              :             }) => {
    2672              :                 tracing::Span::current()
    2673              :                     .record("tenant_id", field::display(tenant_shard_id))
    2674              :                     .record("timeline_id", field::display(timeline_id));
    2675              : 
    2676              :                 self.check_permission(Some(tenant_shard_id.tenant_id))?;
    2677              : 
    2678              :                 COMPUTE_COMMANDS_COUNTERS
    2679              :                     .for_command(ComputeCommandKind::LeaseLsn)
    2680              :                     .inc();
    2681              : 
    2682              :                 match self
    2683              :                     .handle_make_lsn_lease(pgb, tenant_shard_id, timeline_id, lsn, &ctx)
    2684              :                     .await
    2685              :                 {
    2686              :                     Ok(()) => {
    2687              :                         pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?
    2688              :                     }
    2689              :                     Err(e) => {
    2690              :                         error!("error obtaining lsn lease for {lsn}: {e:?}");
    2691              :                         pgb.write_message_noflush(&BeMessage::ErrorResponse(
    2692              :                             &e.to_string(),
    2693              :                             Some(e.pg_error_code()),
    2694              :                         ))?
    2695              :                     }
    2696              :                 };
    2697              :             }
    2698              :         }
    2699              : 
    2700              :         Ok(())
    2701              :     }
    2702              : }
    2703              : 
    2704              : impl From<GetActiveTenantError> for QueryError {
    2705            0 :     fn from(e: GetActiveTenantError) -> Self {
    2706            0 :         match e {
    2707            0 :             GetActiveTenantError::WaitForActiveTimeout { .. } => QueryError::Disconnected(
    2708            0 :                 ConnectionError::Io(io::Error::new(io::ErrorKind::TimedOut, e.to_string())),
    2709            0 :             ),
    2710              :             GetActiveTenantError::Cancelled
    2711              :             | GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => {
    2712            0 :                 QueryError::Shutdown
    2713              :             }
    2714            0 :             e @ GetActiveTenantError::NotFound(_) => QueryError::NotFound(format!("{e}").into()),
    2715            0 :             e => QueryError::Other(anyhow::anyhow!(e)),
    2716              :         }
    2717            0 :     }
    2718              : }
    2719              : 
    2720              : #[derive(Debug, thiserror::Error)]
    2721              : pub(crate) enum GetActiveTimelineError {
    2722              :     #[error(transparent)]
    2723              :     Tenant(GetActiveTenantError),
    2724              :     #[error(transparent)]
    2725              :     Timeline(#[from] GetTimelineError),
    2726              : }
    2727              : 
    2728              : impl From<GetActiveTimelineError> for QueryError {
    2729            0 :     fn from(e: GetActiveTimelineError) -> Self {
    2730            0 :         match e {
    2731            0 :             GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled) => QueryError::Shutdown,
    2732            0 :             GetActiveTimelineError::Tenant(e) => e.into(),
    2733            0 :             GetActiveTimelineError::Timeline(e) => QueryError::NotFound(format!("{e}").into()),
    2734              :         }
    2735            0 :     }
    2736              : }
    2737              : 
    2738              : impl From<crate::tenant::timeline::handle::HandleUpgradeError> for QueryError {
    2739            0 :     fn from(e: crate::tenant::timeline::handle::HandleUpgradeError) -> Self {
    2740            0 :         match e {
    2741            0 :             crate::tenant::timeline::handle::HandleUpgradeError::ShutDown => QueryError::Shutdown,
    2742            0 :         }
    2743            0 :     }
    2744              : }
    2745              : 
    2746            0 : fn set_tracing_field_shard_id(timeline: &Timeline) {
    2747            0 :     debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
    2748            0 :     tracing::Span::current().record(
    2749            0 :         "shard_id",
    2750            0 :         tracing::field::display(timeline.tenant_shard_id.shard_slug()),
    2751            0 :     );
    2752            0 :     debug_assert_current_span_has_tenant_and_timeline_id();
    2753            0 : }
    2754              : 
    2755              : struct WaitedForLsn(Lsn);
    2756              : impl From<WaitedForLsn> for Lsn {
    2757            0 :     fn from(WaitedForLsn(lsn): WaitedForLsn) -> Self {
    2758            0 :         lsn
    2759            0 :     }
    2760              : }
    2761              : 
    2762              : #[cfg(test)]
    2763              : mod tests {
    2764              :     use utils::shard::ShardCount;
    2765              : 
    2766              :     use super::*;
    2767              : 
    2768              :     #[test]
    2769            4 :     fn pageservice_cmd_parse() {
    2770            4 :         let tenant_id = TenantId::generate();
    2771            4 :         let timeline_id = TimelineId::generate();
    2772            4 :         let cmd =
    2773            4 :             PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id} {timeline_id}")).unwrap();
    2774            4 :         assert_eq!(
    2775            4 :             cmd,
    2776            4 :             PageServiceCmd::PageStream(PageStreamCmd {
    2777            4 :                 tenant_id,
    2778            4 :                 timeline_id,
    2779            4 :                 protocol_version: PagestreamProtocolVersion::V2,
    2780            4 :             })
    2781            4 :         );
    2782            4 :         let cmd = PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id}")).unwrap();
    2783            4 :         assert_eq!(
    2784            4 :             cmd,
    2785            4 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2786            4 :                 tenant_id,
    2787            4 :                 timeline_id,
    2788            4 :                 lsn: None,
    2789            4 :                 gzip: false,
    2790            4 :                 replica: false
    2791            4 :             })
    2792            4 :         );
    2793            4 :         let cmd =
    2794            4 :             PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} --gzip")).unwrap();
    2795            4 :         assert_eq!(
    2796            4 :             cmd,
    2797            4 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2798            4 :                 tenant_id,
    2799            4 :                 timeline_id,
    2800            4 :                 lsn: None,
    2801            4 :                 gzip: true,
    2802            4 :                 replica: false
    2803            4 :             })
    2804            4 :         );
    2805            4 :         let cmd =
    2806            4 :             PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} latest")).unwrap();
    2807            4 :         assert_eq!(
    2808            4 :             cmd,
    2809            4 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2810            4 :                 tenant_id,
    2811            4 :                 timeline_id,
    2812            4 :                 lsn: None,
    2813            4 :                 gzip: false,
    2814            4 :                 replica: false
    2815            4 :             })
    2816            4 :         );
    2817            4 :         let cmd = PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} 0/16ABCDE"))
    2818            4 :             .unwrap();
    2819            4 :         assert_eq!(
    2820            4 :             cmd,
    2821            4 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2822            4 :                 tenant_id,
    2823            4 :                 timeline_id,
    2824            4 :                 lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
    2825            4 :                 gzip: false,
    2826            4 :                 replica: false
    2827            4 :             })
    2828            4 :         );
    2829            4 :         let cmd = PageServiceCmd::parse(&format!(
    2830            4 :             "basebackup {tenant_id} {timeline_id} --replica --gzip"
    2831            4 :         ))
    2832            4 :         .unwrap();
    2833            4 :         assert_eq!(
    2834            4 :             cmd,
    2835            4 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2836            4 :                 tenant_id,
    2837            4 :                 timeline_id,
    2838            4 :                 lsn: None,
    2839            4 :                 gzip: true,
    2840            4 :                 replica: true
    2841            4 :             })
    2842            4 :         );
    2843            4 :         let cmd = PageServiceCmd::parse(&format!(
    2844            4 :             "basebackup {tenant_id} {timeline_id} 0/16ABCDE --replica --gzip"
    2845            4 :         ))
    2846            4 :         .unwrap();
    2847            4 :         assert_eq!(
    2848            4 :             cmd,
    2849            4 :             PageServiceCmd::BaseBackup(BaseBackupCmd {
    2850            4 :                 tenant_id,
    2851            4 :                 timeline_id,
    2852            4 :                 lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
    2853            4 :                 gzip: true,
    2854            4 :                 replica: true
    2855            4 :             })
    2856            4 :         );
    2857            4 :         let cmd = PageServiceCmd::parse(&format!("fullbackup {tenant_id} {timeline_id}")).unwrap();
    2858            4 :         assert_eq!(
    2859            4 :             cmd,
    2860            4 :             PageServiceCmd::FullBackup(FullBackupCmd {
    2861            4 :                 tenant_id,
    2862            4 :                 timeline_id,
    2863            4 :                 lsn: None,
    2864            4 :                 prev_lsn: None
    2865            4 :             })
    2866            4 :         );
    2867            4 :         let cmd = PageServiceCmd::parse(&format!(
    2868            4 :             "fullbackup {tenant_id} {timeline_id} 0/16ABCDE 0/16ABCDF"
    2869            4 :         ))
    2870            4 :         .unwrap();
    2871            4 :         assert_eq!(
    2872            4 :             cmd,
    2873            4 :             PageServiceCmd::FullBackup(FullBackupCmd {
    2874            4 :                 tenant_id,
    2875            4 :                 timeline_id,
    2876            4 :                 lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
    2877            4 :                 prev_lsn: Some(Lsn::from_str("0/16ABCDF").unwrap()),
    2878            4 :             })
    2879            4 :         );
    2880            4 :         let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    2881            4 :         let cmd = PageServiceCmd::parse(&format!(
    2882            4 :             "lease lsn {tenant_shard_id} {timeline_id} 0/16ABCDE"
    2883            4 :         ))
    2884            4 :         .unwrap();
    2885            4 :         assert_eq!(
    2886            4 :             cmd,
    2887            4 :             PageServiceCmd::LeaseLsn(LeaseLsnCmd {
    2888            4 :                 tenant_shard_id,
    2889            4 :                 timeline_id,
    2890            4 :                 lsn: Lsn::from_str("0/16ABCDE").unwrap(),
    2891            4 :             })
    2892            4 :         );
    2893            4 :         let tenant_shard_id = TenantShardId::split(&tenant_shard_id, ShardCount(8))[1];
    2894            4 :         let cmd = PageServiceCmd::parse(&format!(
    2895            4 :             "lease lsn {tenant_shard_id} {timeline_id} 0/16ABCDE"
    2896            4 :         ))
    2897            4 :         .unwrap();
    2898            4 :         assert_eq!(
    2899            4 :             cmd,
    2900            4 :             PageServiceCmd::LeaseLsn(LeaseLsnCmd {
    2901            4 :                 tenant_shard_id,
    2902            4 :                 timeline_id,
    2903            4 :                 lsn: Lsn::from_str("0/16ABCDE").unwrap(),
    2904            4 :             })
    2905            4 :         );
    2906            4 :         let cmd = PageServiceCmd::parse("set a = b").unwrap();
    2907            4 :         assert_eq!(cmd, PageServiceCmd::Set);
    2908            4 :         let cmd = PageServiceCmd::parse("SET foo").unwrap();
    2909            4 :         assert_eq!(cmd, PageServiceCmd::Set);
    2910            4 :     }
    2911              : 
    2912              :     #[test]
    2913            4 :     fn pageservice_cmd_err_handling() {
    2914            4 :         let tenant_id = TenantId::generate();
    2915            4 :         let timeline_id = TimelineId::generate();
    2916            4 :         let cmd = PageServiceCmd::parse("unknown_command");
    2917            4 :         assert!(cmd.is_err());
    2918            4 :         let cmd = PageServiceCmd::parse("pagestream_v2");
    2919            4 :         assert!(cmd.is_err());
    2920            4 :         let cmd = PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id}xxx"));
    2921            4 :         assert!(cmd.is_err());
    2922            4 :         let cmd = PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id}xxx {timeline_id}xxx"));
    2923            4 :         assert!(cmd.is_err());
    2924            4 :         let cmd = PageServiceCmd::parse(&format!(
    2925            4 :             "basebackup {tenant_id} {timeline_id} --gzip --gzip"
    2926            4 :         ));
    2927            4 :         assert!(cmd.is_err());
    2928            4 :         let cmd = PageServiceCmd::parse(&format!(
    2929            4 :             "basebackup {tenant_id} {timeline_id} --gzip --unknown"
    2930            4 :         ));
    2931            4 :         assert!(cmd.is_err());
    2932            4 :         let cmd = PageServiceCmd::parse(&format!(
    2933            4 :             "basebackup {tenant_id} {timeline_id} --gzip 0/16ABCDE"
    2934            4 :         ));
    2935            4 :         assert!(cmd.is_err());
    2936            4 :         let cmd = PageServiceCmd::parse(&format!("lease {tenant_id} {timeline_id} gzip 0/16ABCDE"));
    2937            4 :         assert!(cmd.is_err());
    2938            4 :     }
    2939              : }
        

Generated by: LCOV version 2.1-beta