Line data Source code
1 : //! The Page Service listens for client connections and serves their GetPage@LSN
2 : //! requests.
3 :
4 : use anyhow::{bail, Context};
5 : use async_compression::tokio::write::GzipEncoder;
6 : use bytes::Buf;
7 : use futures::FutureExt;
8 : use itertools::Itertools;
9 : use once_cell::sync::OnceCell;
10 : use pageserver_api::config::{
11 : PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
12 : PageServiceProtocolPipelinedExecutionStrategy,
13 : };
14 : use pageserver_api::models::{self, TenantState};
15 : use pageserver_api::models::{
16 : PagestreamBeMessage, PagestreamDbSizeRequest, PagestreamDbSizeResponse,
17 : PagestreamErrorResponse, PagestreamExistsRequest, PagestreamExistsResponse,
18 : PagestreamFeMessage, PagestreamGetPageRequest, PagestreamGetSlruSegmentRequest,
19 : PagestreamGetSlruSegmentResponse, PagestreamNblocksRequest, PagestreamNblocksResponse,
20 : PagestreamProtocolVersion, PagestreamRequest,
21 : };
22 : use pageserver_api::shard::TenantShardId;
23 : use postgres_backend::{
24 : is_expected_io_error, AuthType, PostgresBackend, PostgresBackendReader, QueryError,
25 : };
26 : use pq_proto::framed::ConnectionError;
27 : use pq_proto::FeStartupPacket;
28 : use pq_proto::{BeMessage, FeMessage, RowDescriptor};
29 : use std::borrow::Cow;
30 : use std::io;
31 : use std::num::NonZeroUsize;
32 : use std::str;
33 : use std::str::FromStr;
34 : use std::sync::Arc;
35 : use std::time::SystemTime;
36 : use std::time::{Duration, Instant};
37 : use tokio::io::{AsyncRead, AsyncWrite};
38 : use tokio::io::{AsyncWriteExt, BufWriter};
39 : use tokio::task::JoinHandle;
40 : use tokio_util::sync::CancellationToken;
41 : use tracing::*;
42 : use utils::sync::gate::{Gate, GateGuard};
43 : use utils::sync::spsc_fold;
44 : use utils::{
45 : auth::{Claims, Scope, SwappableJwtAuth},
46 : id::{TenantId, TimelineId},
47 : lsn::Lsn,
48 : simple_rcu::RcuReadGuard,
49 : };
50 :
51 : use crate::auth::check_permission;
52 : use crate::basebackup::BasebackupError;
53 : use crate::config::PageServerConf;
54 : use crate::context::{DownloadBehavior, RequestContext};
55 : use crate::metrics::{self, SmgrOpTimer};
56 : use crate::metrics::{ComputeCommandKind, COMPUTE_COMMANDS_COUNTERS, LIVE_CONNECTIONS};
57 : use crate::pgdatadir_mapping::Version;
58 : use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
59 : use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id;
60 : use crate::task_mgr::TaskKind;
61 : use crate::task_mgr::{self, COMPUTE_REQUEST_RUNTIME};
62 : use crate::tenant::mgr::ShardSelector;
63 : use crate::tenant::mgr::TenantManager;
64 : use crate::tenant::mgr::{GetActiveTenantError, GetTenantError, ShardResolveResult};
65 : use crate::tenant::storage_layer::IoConcurrency;
66 : use crate::tenant::timeline::{self, WaitLsnError};
67 : use crate::tenant::GetTimelineError;
68 : use crate::tenant::PageReconstructError;
69 : use crate::tenant::Timeline;
70 : use crate::{basebackup, timed_after_cancellation};
71 : use pageserver_api::key::rel_block_to_key;
72 : use pageserver_api::models::PageTraceEvent;
73 : use pageserver_api::reltag::SlruKind;
74 : use postgres_ffi::pg_constants::DEFAULTTABLESPACE_OID;
75 : use postgres_ffi::BLCKSZ;
76 : use std::os::fd::AsRawFd;
77 :
78 : /// How long we may wait for a [`crate::tenant::mgr::TenantSlot::InProgress`]` and/or a [`crate::tenant::Tenant`] which
79 : /// is not yet in state [`TenantState::Active`].
80 : ///
81 : /// NB: this is a different value than [`crate::http::routes::ACTIVE_TENANT_TIMEOUT`].
82 : const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);
83 :
84 : ///////////////////////////////////////////////////////////////////////////////
85 :
86 : pub struct Listener {
87 : cancel: CancellationToken,
88 : /// Cancel the listener task through `listen_cancel` to shut down the listener
89 : /// and get a handle on the existing connections.
90 : task: JoinHandle<Connections>,
91 : }
92 :
93 : pub struct Connections {
94 : cancel: CancellationToken,
95 : tasks: tokio::task::JoinSet<ConnectionHandlerResult>,
96 : gate: Gate,
97 : }
98 :
99 0 : pub fn spawn(
100 0 : conf: &'static PageServerConf,
101 0 : tenant_manager: Arc<TenantManager>,
102 0 : pg_auth: Option<Arc<SwappableJwtAuth>>,
103 0 : tcp_listener: tokio::net::TcpListener,
104 0 : ) -> Listener {
105 0 : let cancel = CancellationToken::new();
106 0 : let libpq_ctx = RequestContext::todo_child(
107 0 : TaskKind::LibpqEndpointListener,
108 0 : // listener task shouldn't need to download anything. (We will
109 0 : // create a separate sub-contexts for each connection, with their
110 0 : // own download behavior. This context is used only to listen and
111 0 : // accept connections.)
112 0 : DownloadBehavior::Error,
113 0 : );
114 0 : let task = COMPUTE_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
115 0 : "libpq listener",
116 0 : libpq_listener_main(
117 0 : conf,
118 0 : tenant_manager,
119 0 : pg_auth,
120 0 : tcp_listener,
121 0 : conf.pg_auth_type,
122 0 : conf.page_service_pipelining.clone(),
123 0 : libpq_ctx,
124 0 : cancel.clone(),
125 0 : )
126 0 : .map(anyhow::Ok),
127 0 : ));
128 0 :
129 0 : Listener { cancel, task }
130 0 : }
131 :
132 : impl Listener {
133 0 : pub async fn stop_accepting(self) -> Connections {
134 0 : self.cancel.cancel();
135 0 : self.task
136 0 : .await
137 0 : .expect("unreachable: we wrap the listener task in task_mgr::exit_on_panic_or_error")
138 0 : }
139 : }
140 : impl Connections {
141 0 : pub(crate) async fn shutdown(self) {
142 0 : let Self {
143 0 : cancel,
144 0 : mut tasks,
145 0 : gate,
146 0 : } = self;
147 0 : cancel.cancel();
148 0 : while let Some(res) = tasks.join_next().await {
149 0 : Self::handle_connection_completion(res);
150 0 : }
151 0 : gate.close().await;
152 0 : }
153 :
154 0 : fn handle_connection_completion(res: Result<anyhow::Result<()>, tokio::task::JoinError>) {
155 0 : match res {
156 0 : Ok(Ok(())) => {}
157 0 : Ok(Err(e)) => error!("error in page_service connection task: {:?}", e),
158 0 : Err(e) => error!("page_service connection task panicked: {:?}", e),
159 : }
160 0 : }
161 : }
162 :
163 : ///
164 : /// Main loop of the page service.
165 : ///
166 : /// Listens for connections, and launches a new handler task for each.
167 : ///
168 : /// Returns Ok(()) upon cancellation via `cancel`, returning the set of
169 : /// open connections.
170 : ///
171 : #[allow(clippy::too_many_arguments)]
172 0 : pub async fn libpq_listener_main(
173 0 : conf: &'static PageServerConf,
174 0 : tenant_manager: Arc<TenantManager>,
175 0 : auth: Option<Arc<SwappableJwtAuth>>,
176 0 : listener: tokio::net::TcpListener,
177 0 : auth_type: AuthType,
178 0 : pipelining_config: PageServicePipeliningConfig,
179 0 : listener_ctx: RequestContext,
180 0 : listener_cancel: CancellationToken,
181 0 : ) -> Connections {
182 0 : let connections_cancel = CancellationToken::new();
183 0 : let connections_gate = Gate::default();
184 0 : let mut connection_handler_tasks = tokio::task::JoinSet::default();
185 :
186 : loop {
187 0 : let gate_guard = match connections_gate.enter() {
188 0 : Ok(guard) => guard,
189 0 : Err(_) => break,
190 : };
191 :
192 0 : let accepted = tokio::select! {
193 : biased;
194 0 : _ = listener_cancel.cancelled() => break,
195 0 : next = connection_handler_tasks.join_next(), if !connection_handler_tasks.is_empty() => {
196 0 : let res = next.expect("we dont poll while empty");
197 0 : Connections::handle_connection_completion(res);
198 0 : continue;
199 : }
200 0 : accepted = listener.accept() => accepted,
201 0 : };
202 0 :
203 0 : match accepted {
204 0 : Ok((socket, peer_addr)) => {
205 0 : // Connection established. Spawn a new task to handle it.
206 0 : debug!("accepted connection from {}", peer_addr);
207 0 : let local_auth = auth.clone();
208 0 : let connection_ctx = listener_ctx
209 0 : .detached_child(TaskKind::PageRequestHandler, DownloadBehavior::Download);
210 0 : connection_handler_tasks.spawn(page_service_conn_main(
211 0 : conf,
212 0 : tenant_manager.clone(),
213 0 : local_auth,
214 0 : socket,
215 0 : auth_type,
216 0 : pipelining_config.clone(),
217 0 : connection_ctx,
218 0 : connections_cancel.child_token(),
219 0 : gate_guard,
220 0 : ));
221 : }
222 0 : Err(err) => {
223 0 : // accept() failed. Log the error, and loop back to retry on next connection.
224 0 : error!("accept() failed: {:?}", err);
225 : }
226 : }
227 : }
228 :
229 0 : debug!("page_service listener loop terminated");
230 :
231 0 : Connections {
232 0 : cancel: connections_cancel,
233 0 : tasks: connection_handler_tasks,
234 0 : gate: connections_gate,
235 0 : }
236 0 : }
237 :
238 : type ConnectionHandlerResult = anyhow::Result<()>;
239 :
240 : #[instrument(skip_all, fields(peer_addr, application_name))]
241 : #[allow(clippy::too_many_arguments)]
242 : async fn page_service_conn_main(
243 : conf: &'static PageServerConf,
244 : tenant_manager: Arc<TenantManager>,
245 : auth: Option<Arc<SwappableJwtAuth>>,
246 : socket: tokio::net::TcpStream,
247 : auth_type: AuthType,
248 : pipelining_config: PageServicePipeliningConfig,
249 : connection_ctx: RequestContext,
250 : cancel: CancellationToken,
251 : gate_guard: GateGuard,
252 : ) -> ConnectionHandlerResult {
253 : let _guard = LIVE_CONNECTIONS
254 : .with_label_values(&["page_service"])
255 : .guard();
256 :
257 : socket
258 : .set_nodelay(true)
259 : .context("could not set TCP_NODELAY")?;
260 :
261 : let socket_fd = socket.as_raw_fd();
262 :
263 : let peer_addr = socket.peer_addr().context("get peer address")?;
264 : tracing::Span::current().record("peer_addr", field::display(peer_addr));
265 :
266 : // setup read timeout of 10 minutes. the timeout is rather arbitrary for requirements:
267 : // - long enough for most valid compute connections
268 : // - less than infinite to stop us from "leaking" connections to long-gone computes
269 : //
270 : // no write timeout is used, because the kernel is assumed to error writes after some time.
271 : let mut socket = tokio_io_timeout::TimeoutReader::new(socket);
272 :
273 : let default_timeout_ms = 10 * 60 * 1000; // 10 minutes by default
274 0 : let socket_timeout_ms = (|| {
275 0 : fail::fail_point!("simulated-bad-compute-connection", |avg_timeout_ms| {
276 : // Exponential distribution for simulating
277 : // poor network conditions, expect about avg_timeout_ms to be around 15
278 : // in tests
279 0 : if let Some(avg_timeout_ms) = avg_timeout_ms {
280 0 : let avg = avg_timeout_ms.parse::<i64>().unwrap() as f32;
281 0 : let u = rand::random::<f32>();
282 0 : ((1.0 - u).ln() / (-avg)) as u64
283 : } else {
284 0 : default_timeout_ms
285 : }
286 0 : });
287 0 : default_timeout_ms
288 : })();
289 :
290 : // A timeout here does not mean the client died, it can happen if it's just idle for
291 : // a while: we will tear down this PageServerHandler and instantiate a new one if/when
292 : // they reconnect.
293 : socket.set_timeout(Some(std::time::Duration::from_millis(socket_timeout_ms)));
294 : let socket = Box::pin(socket);
295 :
296 : fail::fail_point!("ps::connection-start::pre-login");
297 :
298 : // XXX: pgbackend.run() should take the connection_ctx,
299 : // and create a child per-query context when it invokes process_query.
300 : // But it's in a shared crate, so, we store connection_ctx inside PageServerHandler
301 : // and create the per-query context in process_query ourselves.
302 : let mut conn_handler = PageServerHandler::new(
303 : conf,
304 : tenant_manager,
305 : auth,
306 : pipelining_config,
307 : connection_ctx,
308 : cancel.clone(),
309 : gate_guard,
310 : );
311 : let pgbackend = PostgresBackend::new_from_io(socket_fd, socket, peer_addr, auth_type, None)?;
312 :
313 : match pgbackend.run(&mut conn_handler, &cancel).await {
314 : Ok(()) => {
315 : // we've been requested to shut down
316 : Ok(())
317 : }
318 : Err(QueryError::Disconnected(ConnectionError::Io(io_error))) => {
319 : if is_expected_io_error(&io_error) {
320 : info!("Postgres client disconnected ({io_error})");
321 : Ok(())
322 : } else {
323 : let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id();
324 : Err(io_error).context(format!(
325 : "Postgres connection error for tenant_id={:?} client at peer_addr={}",
326 : tenant_id, peer_addr
327 : ))
328 : }
329 : }
330 : other => {
331 : let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id();
332 : other.context(format!(
333 : "Postgres query error for tenant_id={:?} client peer_addr={}",
334 : tenant_id, peer_addr
335 : ))
336 : }
337 : }
338 : }
339 :
340 : struct PageServerHandler {
341 : conf: &'static PageServerConf,
342 : auth: Option<Arc<SwappableJwtAuth>>,
343 : claims: Option<Claims>,
344 :
345 : /// The context created for the lifetime of the connection
346 : /// services by this PageServerHandler.
347 : /// For each query received over the connection,
348 : /// `process_query` creates a child context from this one.
349 : connection_ctx: RequestContext,
350 :
351 : cancel: CancellationToken,
352 :
353 : /// None only while pagestream protocol is being processed.
354 : timeline_handles: Option<TimelineHandles>,
355 :
356 : pipelining_config: PageServicePipeliningConfig,
357 :
358 : gate_guard: GateGuard,
359 : }
360 :
361 : struct TimelineHandles {
362 : wrapper: TenantManagerWrapper,
363 : /// Note on size: the typical size of this map is 1. The largest size we expect
364 : /// to see is the number of shards divided by the number of pageservers (typically < 2),
365 : /// or the ratio used when splitting shards (i.e. how many children created from one)
366 : /// parent shard, where a "large" number might be ~8.
367 : handles: timeline::handle::Cache<TenantManagerTypes>,
368 : }
369 :
370 : impl TimelineHandles {
371 0 : fn new(tenant_manager: Arc<TenantManager>) -> Self {
372 0 : Self {
373 0 : wrapper: TenantManagerWrapper {
374 0 : tenant_manager,
375 0 : tenant_id: OnceCell::new(),
376 0 : },
377 0 : handles: Default::default(),
378 0 : }
379 0 : }
380 0 : async fn get(
381 0 : &mut self,
382 0 : tenant_id: TenantId,
383 0 : timeline_id: TimelineId,
384 0 : shard_selector: ShardSelector,
385 0 : ) -> Result<timeline::handle::Handle<TenantManagerTypes>, GetActiveTimelineError> {
386 0 : if *self.wrapper.tenant_id.get_or_init(|| tenant_id) != tenant_id {
387 0 : return Err(GetActiveTimelineError::Tenant(
388 0 : GetActiveTenantError::SwitchedTenant,
389 0 : ));
390 0 : }
391 0 : self.handles
392 0 : .get(timeline_id, shard_selector, &self.wrapper)
393 0 : .await
394 0 : .map_err(|e| match e {
395 0 : timeline::handle::GetError::TenantManager(e) => e,
396 : timeline::handle::GetError::TimelineGateClosed => {
397 0 : trace!("timeline gate closed");
398 0 : GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown)
399 : }
400 : timeline::handle::GetError::PerTimelineStateShutDown => {
401 0 : trace!("per-timeline state shut down");
402 0 : GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown)
403 : }
404 0 : })
405 0 : }
406 :
407 0 : fn tenant_id(&self) -> Option<TenantId> {
408 0 : self.wrapper.tenant_id.get().copied()
409 0 : }
410 : }
411 :
412 : pub(crate) struct TenantManagerWrapper {
413 : tenant_manager: Arc<TenantManager>,
414 : // We do not support switching tenant_id on a connection at this point.
415 : // We can can add support for this later if needed without changing
416 : // the protocol.
417 : tenant_id: once_cell::sync::OnceCell<TenantId>,
418 : }
419 :
420 : #[derive(Debug)]
421 : pub(crate) struct TenantManagerTypes;
422 :
423 : impl timeline::handle::Types for TenantManagerTypes {
424 : type TenantManagerError = GetActiveTimelineError;
425 : type TenantManager = TenantManagerWrapper;
426 : type Timeline = Arc<Timeline>;
427 : }
428 :
429 : impl timeline::handle::ArcTimeline<TenantManagerTypes> for Arc<Timeline> {
430 0 : fn gate(&self) -> &utils::sync::gate::Gate {
431 0 : &self.gate
432 0 : }
433 :
434 0 : fn shard_timeline_id(&self) -> timeline::handle::ShardTimelineId {
435 0 : Timeline::shard_timeline_id(self)
436 0 : }
437 :
438 0 : fn per_timeline_state(&self) -> &timeline::handle::PerTimelineState<TenantManagerTypes> {
439 0 : &self.handles
440 0 : }
441 :
442 0 : fn get_shard_identity(&self) -> &pageserver_api::shard::ShardIdentity {
443 0 : Timeline::get_shard_identity(self)
444 0 : }
445 : }
446 :
447 : impl timeline::handle::TenantManager<TenantManagerTypes> for TenantManagerWrapper {
448 0 : async fn resolve(
449 0 : &self,
450 0 : timeline_id: TimelineId,
451 0 : shard_selector: ShardSelector,
452 0 : ) -> Result<Arc<Timeline>, GetActiveTimelineError> {
453 0 : let tenant_id = self.tenant_id.get().expect("we set this in get()");
454 0 : let timeout = ACTIVE_TENANT_TIMEOUT;
455 0 : let wait_start = Instant::now();
456 0 : let deadline = wait_start + timeout;
457 0 : let tenant_shard = loop {
458 0 : let resolved = self
459 0 : .tenant_manager
460 0 : .resolve_attached_shard(tenant_id, shard_selector);
461 0 : match resolved {
462 0 : ShardResolveResult::Found(tenant_shard) => break tenant_shard,
463 : ShardResolveResult::NotFound => {
464 0 : return Err(GetActiveTimelineError::Tenant(
465 0 : GetActiveTenantError::NotFound(GetTenantError::NotFound(*tenant_id)),
466 0 : ));
467 : }
468 0 : ShardResolveResult::InProgress(barrier) => {
469 0 : // We can't authoritatively answer right now: wait for InProgress state
470 0 : // to end, then try again
471 0 : tokio::select! {
472 0 : _ = barrier.wait() => {
473 0 : // The barrier completed: proceed around the loop to try looking up again
474 0 : },
475 0 : _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
476 0 : return Err(GetActiveTimelineError::Tenant(GetActiveTenantError::WaitForActiveTimeout {
477 0 : latest_state: None,
478 0 : wait_time: timeout,
479 0 : }));
480 : }
481 : }
482 : }
483 : };
484 : };
485 :
486 0 : tracing::debug!("Waiting for tenant to enter active state...");
487 0 : tenant_shard
488 0 : .wait_to_become_active(deadline.duration_since(Instant::now()))
489 0 : .await
490 0 : .map_err(GetActiveTimelineError::Tenant)?;
491 :
492 0 : let timeline = tenant_shard
493 0 : .get_timeline(timeline_id, true)
494 0 : .map_err(GetActiveTimelineError::Timeline)?;
495 0 : Ok(timeline)
496 0 : }
497 : }
498 :
499 : #[derive(thiserror::Error, Debug)]
500 : enum PageStreamError {
501 : /// We encountered an error that should prompt the client to reconnect:
502 : /// in practice this means we drop the connection without sending a response.
503 : #[error("Reconnect required: {0}")]
504 : Reconnect(Cow<'static, str>),
505 :
506 : /// We were instructed to shutdown while processing the query
507 : #[error("Shutting down")]
508 : Shutdown,
509 :
510 : /// Something went wrong reading a page: this likely indicates a pageserver bug
511 : #[error("Read error")]
512 : Read(#[source] PageReconstructError),
513 :
514 : /// Ran out of time waiting for an LSN
515 : #[error("LSN timeout: {0}")]
516 : LsnTimeout(WaitLsnError),
517 :
518 : /// The entity required to serve the request (tenant or timeline) is not found,
519 : /// or is not found in a suitable state to serve a request.
520 : #[error("Not found: {0}")]
521 : NotFound(Cow<'static, str>),
522 :
523 : /// Request asked for something that doesn't make sense, like an invalid LSN
524 : #[error("Bad request: {0}")]
525 : BadRequest(Cow<'static, str>),
526 : }
527 :
528 : impl From<PageReconstructError> for PageStreamError {
529 0 : fn from(value: PageReconstructError) -> Self {
530 0 : match value {
531 0 : PageReconstructError::Cancelled => Self::Shutdown,
532 0 : e => Self::Read(e),
533 : }
534 0 : }
535 : }
536 :
537 : impl From<GetActiveTimelineError> for PageStreamError {
538 0 : fn from(value: GetActiveTimelineError) -> Self {
539 0 : match value {
540 : GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled)
541 : | GetActiveTimelineError::Tenant(GetActiveTenantError::WillNotBecomeActive(
542 : TenantState::Stopping { .. },
543 : ))
544 0 : | GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown) => Self::Shutdown,
545 0 : GetActiveTimelineError::Tenant(e) => Self::NotFound(format!("{e}").into()),
546 0 : GetActiveTimelineError::Timeline(e) => Self::NotFound(format!("{e}").into()),
547 : }
548 0 : }
549 : }
550 :
551 : impl From<WaitLsnError> for PageStreamError {
552 0 : fn from(value: WaitLsnError) -> Self {
553 0 : match value {
554 0 : e @ WaitLsnError::Timeout(_) => Self::LsnTimeout(e),
555 0 : WaitLsnError::Shutdown => Self::Shutdown,
556 0 : e @ WaitLsnError::BadState { .. } => Self::Reconnect(format!("{e}").into()),
557 : }
558 0 : }
559 : }
560 :
561 : impl From<WaitLsnError> for QueryError {
562 0 : fn from(value: WaitLsnError) -> Self {
563 0 : match value {
564 0 : e @ WaitLsnError::Timeout(_) => Self::Other(anyhow::Error::new(e)),
565 0 : WaitLsnError::Shutdown => Self::Shutdown,
566 0 : WaitLsnError::BadState { .. } => Self::Reconnect,
567 : }
568 0 : }
569 : }
570 :
571 : #[derive(thiserror::Error, Debug)]
572 : struct BatchedPageStreamError {
573 : req: PagestreamRequest,
574 : err: PageStreamError,
575 : }
576 :
577 : impl std::fmt::Display for BatchedPageStreamError {
578 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
579 0 : self.err.fmt(f)
580 0 : }
581 : }
582 :
583 : struct BatchedGetPageRequest {
584 : req: PagestreamGetPageRequest,
585 : timer: SmgrOpTimer,
586 : }
587 :
588 : #[cfg(feature = "testing")]
589 : struct BatchedTestRequest {
590 : req: models::PagestreamTestRequest,
591 : timer: SmgrOpTimer,
592 : }
593 :
594 : /// NB: we only hold [`timeline::handle::WeakHandle`] inside this enum,
595 : /// so that we don't keep the [`Timeline::gate`] open while the batch
596 : /// is being built up inside the [`spsc_fold`] (pagestream pipelining).
597 : enum BatchedFeMessage {
598 : Exists {
599 : span: Span,
600 : timer: SmgrOpTimer,
601 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
602 : req: models::PagestreamExistsRequest,
603 : },
604 : Nblocks {
605 : span: Span,
606 : timer: SmgrOpTimer,
607 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
608 : req: models::PagestreamNblocksRequest,
609 : },
610 : GetPage {
611 : span: Span,
612 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
613 : effective_request_lsn: Lsn,
614 : pages: smallvec::SmallVec<[BatchedGetPageRequest; 1]>,
615 : },
616 : DbSize {
617 : span: Span,
618 : timer: SmgrOpTimer,
619 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
620 : req: models::PagestreamDbSizeRequest,
621 : },
622 : GetSlruSegment {
623 : span: Span,
624 : timer: SmgrOpTimer,
625 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
626 : req: models::PagestreamGetSlruSegmentRequest,
627 : },
628 : #[cfg(feature = "testing")]
629 : Test {
630 : span: Span,
631 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
632 : requests: Vec<BatchedTestRequest>,
633 : },
634 : RespondError {
635 : span: Span,
636 : error: BatchedPageStreamError,
637 : },
638 : }
639 :
640 : impl BatchedFeMessage {
641 0 : fn observe_execution_start(&mut self, at: Instant) {
642 0 : match self {
643 0 : BatchedFeMessage::Exists { timer, .. }
644 0 : | BatchedFeMessage::Nblocks { timer, .. }
645 0 : | BatchedFeMessage::DbSize { timer, .. }
646 0 : | BatchedFeMessage::GetSlruSegment { timer, .. } => {
647 0 : timer.observe_execution_start(at);
648 0 : }
649 0 : BatchedFeMessage::GetPage { pages, .. } => {
650 0 : for page in pages {
651 0 : page.timer.observe_execution_start(at);
652 0 : }
653 : }
654 : #[cfg(feature = "testing")]
655 0 : BatchedFeMessage::Test { requests, .. } => {
656 0 : for req in requests {
657 0 : req.timer.observe_execution_start(at);
658 0 : }
659 : }
660 0 : BatchedFeMessage::RespondError { .. } => {}
661 : }
662 0 : }
663 : }
664 :
665 : impl PageServerHandler {
666 0 : pub fn new(
667 0 : conf: &'static PageServerConf,
668 0 : tenant_manager: Arc<TenantManager>,
669 0 : auth: Option<Arc<SwappableJwtAuth>>,
670 0 : pipelining_config: PageServicePipeliningConfig,
671 0 : connection_ctx: RequestContext,
672 0 : cancel: CancellationToken,
673 0 : gate_guard: GateGuard,
674 0 : ) -> Self {
675 0 : PageServerHandler {
676 0 : conf,
677 0 : auth,
678 0 : claims: None,
679 0 : connection_ctx,
680 0 : timeline_handles: Some(TimelineHandles::new(tenant_manager)),
681 0 : cancel,
682 0 : pipelining_config,
683 0 : gate_guard,
684 0 : }
685 0 : }
686 :
687 : /// This function always respects cancellation of any timeline in `[Self::shard_timelines]`. Pass in
688 : /// a cancellation token at the next scope up (such as a tenant cancellation token) to ensure we respect
689 : /// cancellation if there aren't any timelines in the cache.
690 : ///
691 : /// If calling from a function that doesn't use the `[Self::shard_timelines]` cache, then pass in the
692 : /// timeline cancellation token.
693 0 : async fn flush_cancellable<IO>(
694 0 : &self,
695 0 : pgb: &mut PostgresBackend<IO>,
696 0 : cancel: &CancellationToken,
697 0 : ) -> Result<(), QueryError>
698 0 : where
699 0 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
700 0 : {
701 0 : tokio::select!(
702 0 : flush_r = pgb.flush() => {
703 0 : Ok(flush_r?)
704 : },
705 0 : _ = cancel.cancelled() => {
706 0 : Err(QueryError::Shutdown)
707 : }
708 : )
709 0 : }
710 :
711 : #[allow(clippy::too_many_arguments)]
712 0 : async fn pagestream_read_message<IO>(
713 0 : pgb: &mut PostgresBackendReader<IO>,
714 0 : tenant_id: TenantId,
715 0 : timeline_id: TimelineId,
716 0 : timeline_handles: &mut TimelineHandles,
717 0 : cancel: &CancellationToken,
718 0 : ctx: &RequestContext,
719 0 : protocol_version: PagestreamProtocolVersion,
720 0 : parent_span: Span,
721 0 : ) -> Result<Option<BatchedFeMessage>, QueryError>
722 0 : where
723 0 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
724 0 : {
725 0 : let msg = tokio::select! {
726 : biased;
727 0 : _ = cancel.cancelled() => {
728 0 : return Err(QueryError::Shutdown)
729 : }
730 0 : msg = pgb.read_message() => { msg }
731 0 : };
732 0 :
733 0 : let received_at = Instant::now();
734 :
735 0 : let copy_data_bytes = match msg? {
736 0 : Some(FeMessage::CopyData(bytes)) => bytes,
737 : Some(FeMessage::Terminate) => {
738 0 : return Ok(None);
739 : }
740 0 : Some(m) => {
741 0 : return Err(QueryError::Other(anyhow::anyhow!(
742 0 : "unexpected message: {m:?} during COPY"
743 0 : )));
744 : }
745 : None => {
746 0 : return Ok(None);
747 : } // client disconnected
748 : };
749 0 : trace!("query: {copy_data_bytes:?}");
750 :
751 0 : fail::fail_point!("ps::handle-pagerequest-message");
752 :
753 : // parse request
754 0 : let neon_fe_msg =
755 0 : PagestreamFeMessage::parse(&mut copy_data_bytes.reader(), protocol_version)?;
756 :
757 : // TODO: turn in to async closure once available to avoid repeating received_at
758 0 : async fn record_op_start_and_throttle(
759 0 : shard: &timeline::handle::Handle<TenantManagerTypes>,
760 0 : op: metrics::SmgrQueryType,
761 0 : received_at: Instant,
762 0 : ) -> Result<SmgrOpTimer, QueryError> {
763 0 : // It's important to start the smgr op metric recorder as early as possible
764 0 : // so that the _started counters are incremented before we do
765 0 : // any serious waiting, e.g., for throttle, batching, or actual request handling.
766 0 : let mut timer = shard.query_metrics.start_smgr_op(op, received_at);
767 0 : let now = Instant::now();
768 0 : timer.observe_throttle_start(now);
769 0 : let throttled = tokio::select! {
770 0 : res = shard.pagestream_throttle.throttle(1, now) => res,
771 0 : _ = shard.cancel.cancelled() => return Err(QueryError::Shutdown),
772 : };
773 0 : timer.observe_throttle_done(throttled);
774 0 : Ok(timer)
775 0 : }
776 :
777 0 : let batched_msg = match neon_fe_msg {
778 0 : PagestreamFeMessage::Exists(req) => {
779 0 : let shard = timeline_handles
780 0 : .get(tenant_id, timeline_id, ShardSelector::Zero)
781 0 : .await?;
782 0 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
783 0 : let span = tracing::info_span!(parent: &parent_span, "handle_get_rel_exists_request", rel = %req.rel, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
784 0 : let timer = record_op_start_and_throttle(
785 0 : &shard,
786 0 : metrics::SmgrQueryType::GetRelExists,
787 0 : received_at,
788 0 : )
789 0 : .await?;
790 0 : BatchedFeMessage::Exists {
791 0 : span,
792 0 : timer,
793 0 : shard: shard.downgrade(),
794 0 : req,
795 0 : }
796 : }
797 0 : PagestreamFeMessage::Nblocks(req) => {
798 0 : let shard = timeline_handles
799 0 : .get(tenant_id, timeline_id, ShardSelector::Zero)
800 0 : .await?;
801 0 : let span = tracing::info_span!(parent: &parent_span, "handle_get_nblocks_request", rel = %req.rel, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
802 0 : let timer = record_op_start_and_throttle(
803 0 : &shard,
804 0 : metrics::SmgrQueryType::GetRelSize,
805 0 : received_at,
806 0 : )
807 0 : .await?;
808 0 : BatchedFeMessage::Nblocks {
809 0 : span,
810 0 : timer,
811 0 : shard: shard.downgrade(),
812 0 : req,
813 0 : }
814 : }
815 0 : PagestreamFeMessage::DbSize(req) => {
816 0 : let shard = timeline_handles
817 0 : .get(tenant_id, timeline_id, ShardSelector::Zero)
818 0 : .await?;
819 0 : let span = tracing::info_span!(parent: &parent_span, "handle_db_size_request", dbnode = %req.dbnode, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
820 0 : let timer = record_op_start_and_throttle(
821 0 : &shard,
822 0 : metrics::SmgrQueryType::GetDbSize,
823 0 : received_at,
824 0 : )
825 0 : .await?;
826 0 : BatchedFeMessage::DbSize {
827 0 : span,
828 0 : timer,
829 0 : shard: shard.downgrade(),
830 0 : req,
831 0 : }
832 : }
833 0 : PagestreamFeMessage::GetSlruSegment(req) => {
834 0 : let shard = timeline_handles
835 0 : .get(tenant_id, timeline_id, ShardSelector::Zero)
836 0 : .await?;
837 0 : let span = tracing::info_span!(parent: &parent_span, "handle_get_slru_segment_request", kind = %req.kind, segno = %req.segno, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
838 0 : let timer = record_op_start_and_throttle(
839 0 : &shard,
840 0 : metrics::SmgrQueryType::GetSlruSegment,
841 0 : received_at,
842 0 : )
843 0 : .await?;
844 0 : BatchedFeMessage::GetSlruSegment {
845 0 : span,
846 0 : timer,
847 0 : shard: shard.downgrade(),
848 0 : req,
849 0 : }
850 : }
851 0 : PagestreamFeMessage::GetPage(req) => {
852 : // avoid a somewhat costly Span::record() by constructing the entire span in one go.
853 : macro_rules! mkspan {
854 : (before shard routing) => {{
855 : tracing::info_span!(parent: &parent_span, "handle_get_page_request", rel = %req.rel, blkno = %req.blkno, req_lsn = %req.hdr.request_lsn)
856 : }};
857 : ($shard_id:expr) => {{
858 : tracing::info_span!(parent: &parent_span, "handle_get_page_request", rel = %req.rel, blkno = %req.blkno, req_lsn = %req.hdr.request_lsn, shard_id = %$shard_id)
859 : }};
860 : }
861 :
862 : macro_rules! respond_error {
863 : ($span:expr, $error:expr) => {{
864 : let error = BatchedFeMessage::RespondError {
865 : span: $span,
866 : error: BatchedPageStreamError {
867 : req: req.hdr,
868 : err: $error,
869 : },
870 : };
871 : Ok(Some(error))
872 : }};
873 : }
874 :
875 0 : let key = rel_block_to_key(req.rel, req.blkno);
876 0 : let shard = match timeline_handles
877 0 : .get(tenant_id, timeline_id, ShardSelector::Page(key))
878 0 : .await
879 : {
880 0 : Ok(tl) => tl,
881 0 : Err(e) => {
882 0 : let span = mkspan!(before shard routing);
883 0 : match e {
884 : GetActiveTimelineError::Tenant(GetActiveTenantError::NotFound(_)) => {
885 : // We already know this tenant exists in general, because we resolved it at
886 : // start of connection. Getting a NotFound here indicates that the shard containing
887 : // the requested page is not present on this node: the client's knowledge of shard->pageserver
888 : // mapping is out of date.
889 : //
890 : // Closing the connection by returning ``::Reconnect` has the side effect of rate-limiting above message, via
891 : // client's reconnect backoff, as well as hopefully prompting the client to load its updated configuration
892 : // and talk to a different pageserver.
893 0 : return respond_error!(
894 0 : span,
895 0 : PageStreamError::Reconnect(
896 0 : "getpage@lsn request routed to wrong shard".into()
897 0 : )
898 0 : );
899 : }
900 0 : e => {
901 0 : return respond_error!(span, e.into());
902 : }
903 : }
904 : }
905 : };
906 0 : let span = mkspan!(shard.tenant_shard_id.shard_slug());
907 :
908 0 : let timer = record_op_start_and_throttle(
909 0 : &shard,
910 0 : metrics::SmgrQueryType::GetPageAtLsn,
911 0 : received_at,
912 0 : )
913 0 : .await?;
914 :
915 : // We're holding the Handle
916 0 : let effective_request_lsn = match Self::wait_or_get_last_lsn(
917 0 : &shard,
918 0 : req.hdr.request_lsn,
919 0 : req.hdr.not_modified_since,
920 0 : &shard.get_applied_gc_cutoff_lsn(),
921 0 : ctx,
922 0 : )
923 0 : // TODO: if we actually need to wait for lsn here, it delays the entire batch which doesn't need to wait
924 0 : .await
925 : {
926 0 : Ok(lsn) => lsn,
927 0 : Err(e) => {
928 0 : return respond_error!(span, e);
929 : }
930 : };
931 : BatchedFeMessage::GetPage {
932 0 : span,
933 0 : shard: shard.downgrade(),
934 0 : effective_request_lsn,
935 0 : pages: smallvec::smallvec![BatchedGetPageRequest { req, timer }],
936 : }
937 : }
938 : #[cfg(feature = "testing")]
939 0 : PagestreamFeMessage::Test(req) => {
940 0 : let shard = timeline_handles
941 0 : .get(tenant_id, timeline_id, ShardSelector::Zero)
942 0 : .await?;
943 0 : let span = tracing::info_span!(parent: &parent_span, "handle_test_request", shard_id = %shard.tenant_shard_id.shard_slug());
944 0 : let timer =
945 0 : record_op_start_and_throttle(&shard, metrics::SmgrQueryType::Test, received_at)
946 0 : .await?;
947 0 : BatchedFeMessage::Test {
948 0 : span,
949 0 : shard: shard.downgrade(),
950 0 : requests: vec![BatchedTestRequest { req, timer }],
951 0 : }
952 : }
953 : };
954 0 : Ok(Some(batched_msg))
955 0 : }
956 :
957 : /// Post-condition: `batch` is Some()
958 : #[instrument(skip_all, level = tracing::Level::TRACE)]
959 : #[allow(clippy::boxed_local)]
960 : fn pagestream_do_batch(
961 : max_batch_size: NonZeroUsize,
962 : batch: &mut Result<BatchedFeMessage, QueryError>,
963 : this_msg: Result<BatchedFeMessage, QueryError>,
964 : ) -> Result<(), Result<BatchedFeMessage, QueryError>> {
965 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
966 :
967 : let this_msg = match this_msg {
968 : Ok(this_msg) => this_msg,
969 : Err(e) => return Err(Err(e)),
970 : };
971 :
972 : match (&mut *batch, this_msg) {
973 : // something batched already, let's see if we can add this message to the batch
974 : (
975 : Ok(BatchedFeMessage::GetPage {
976 : span: _,
977 : shard: accum_shard,
978 : pages: ref mut accum_pages,
979 : effective_request_lsn: accum_lsn,
980 : }),
981 : BatchedFeMessage::GetPage {
982 : span: _,
983 : shard: this_shard,
984 : pages: this_pages,
985 : effective_request_lsn: this_lsn,
986 : },
987 0 : ) if (|| {
988 0 : assert_eq!(this_pages.len(), 1);
989 0 : if accum_pages.len() >= max_batch_size.get() {
990 0 : trace!(%accum_lsn, %this_lsn, %max_batch_size, "stopping batching because of batch size");
991 0 : assert_eq!(accum_pages.len(), max_batch_size.get());
992 0 : return false;
993 0 : }
994 0 : if !accum_shard.is_same_handle_as(&this_shard) {
995 0 : trace!(%accum_lsn, %this_lsn, "stopping batching because timeline object mismatch");
996 : // TODO: we _could_ batch & execute each shard seperately (and in parallel).
997 : // But the current logic for keeping responses in order does not support that.
998 0 : return false;
999 0 : }
1000 0 : // the vectored get currently only supports a single LSN, so, bounce as soon
1001 0 : // as the effective request_lsn changes
1002 0 : if *accum_lsn != this_lsn {
1003 0 : trace!(%accum_lsn, %this_lsn, "stopping batching because LSN changed");
1004 0 : return false;
1005 0 : }
1006 0 : true
1007 : })() =>
1008 : {
1009 : // ok to batch
1010 : accum_pages.extend(this_pages);
1011 : Ok(())
1012 : }
1013 : #[cfg(feature = "testing")]
1014 : (
1015 : Ok(BatchedFeMessage::Test {
1016 : shard: accum_shard,
1017 : requests: accum_requests,
1018 : ..
1019 : }),
1020 : BatchedFeMessage::Test {
1021 : shard: this_shard,
1022 : requests: this_requests,
1023 : ..
1024 : },
1025 0 : ) if (|| {
1026 0 : assert!(this_requests.len() == 1);
1027 0 : if accum_requests.len() >= max_batch_size.get() {
1028 0 : trace!(%max_batch_size, "stopping batching because of batch size");
1029 0 : assert_eq!(accum_requests.len(), max_batch_size.get());
1030 0 : return false;
1031 0 : }
1032 0 : if !accum_shard.is_same_handle_as(&this_shard) {
1033 0 : trace!("stopping batching because timeline object mismatch");
1034 : // TODO: we _could_ batch & execute each shard seperately (and in parallel).
1035 : // But the current logic for keeping responses in order does not support that.
1036 0 : return false;
1037 0 : }
1038 0 : let this_batch_key = this_requests[0].req.batch_key;
1039 0 : let accum_batch_key = accum_requests[0].req.batch_key;
1040 0 : if this_requests[0].req.batch_key != accum_requests[0].req.batch_key {
1041 0 : trace!(%accum_batch_key, %this_batch_key, "stopping batching because batch key changed");
1042 0 : return false;
1043 0 : }
1044 0 : true
1045 : })() =>
1046 : {
1047 : // ok to batch
1048 : accum_requests.extend(this_requests);
1049 : Ok(())
1050 : }
1051 : // something batched already but this message is unbatchable
1052 : (_, this_msg) => {
1053 : // by default, don't continue batching
1054 : Err(Ok(this_msg))
1055 : }
1056 : }
1057 : }
1058 :
1059 0 : #[instrument(level = tracing::Level::DEBUG, skip_all)]
1060 : async fn pagesteam_handle_batched_message<IO>(
1061 : &mut self,
1062 : pgb_writer: &mut PostgresBackend<IO>,
1063 : batch: BatchedFeMessage,
1064 : io_concurrency: IoConcurrency,
1065 : cancel: &CancellationToken,
1066 : protocol_version: PagestreamProtocolVersion,
1067 : ctx: &RequestContext,
1068 : ) -> Result<(), QueryError>
1069 : where
1070 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
1071 : {
1072 : let started_at = Instant::now();
1073 : let batch = {
1074 : let mut batch = batch;
1075 : batch.observe_execution_start(started_at);
1076 : batch
1077 : };
1078 :
1079 : // invoke handler function
1080 : let (mut handler_results, span): (
1081 : Vec<Result<(PagestreamBeMessage, SmgrOpTimer), BatchedPageStreamError>>,
1082 : _,
1083 : ) = match batch {
1084 : BatchedFeMessage::Exists {
1085 : span,
1086 : timer,
1087 : shard,
1088 : req,
1089 : } => {
1090 : fail::fail_point!("ps::handle-pagerequest-message::exists");
1091 : (
1092 : vec![self
1093 : .handle_get_rel_exists_request(&*shard.upgrade()?, &req, ctx)
1094 : .instrument(span.clone())
1095 : .await
1096 0 : .map(|msg| (msg, timer))
1097 0 : .map_err(|err| BatchedPageStreamError { err, req: req.hdr })],
1098 : span,
1099 : )
1100 : }
1101 : BatchedFeMessage::Nblocks {
1102 : span,
1103 : timer,
1104 : shard,
1105 : req,
1106 : } => {
1107 : fail::fail_point!("ps::handle-pagerequest-message::nblocks");
1108 : (
1109 : vec![self
1110 : .handle_get_nblocks_request(&*shard.upgrade()?, &req, ctx)
1111 : .instrument(span.clone())
1112 : .await
1113 0 : .map(|msg| (msg, timer))
1114 0 : .map_err(|err| BatchedPageStreamError { err, req: req.hdr })],
1115 : span,
1116 : )
1117 : }
1118 : BatchedFeMessage::GetPage {
1119 : span,
1120 : shard,
1121 : effective_request_lsn,
1122 : pages,
1123 : } => {
1124 : fail::fail_point!("ps::handle-pagerequest-message::getpage");
1125 : (
1126 : {
1127 : let npages = pages.len();
1128 : trace!(npages, "handling getpage request");
1129 : let res = self
1130 : .handle_get_page_at_lsn_request_batched(
1131 : &*shard.upgrade()?,
1132 : effective_request_lsn,
1133 : pages,
1134 : io_concurrency,
1135 : ctx,
1136 : )
1137 : .instrument(span.clone())
1138 : .await;
1139 : assert_eq!(res.len(), npages);
1140 : res
1141 : },
1142 : span,
1143 : )
1144 : }
1145 : BatchedFeMessage::DbSize {
1146 : span,
1147 : timer,
1148 : shard,
1149 : req,
1150 : } => {
1151 : fail::fail_point!("ps::handle-pagerequest-message::dbsize");
1152 : (
1153 : vec![self
1154 : .handle_db_size_request(&*shard.upgrade()?, &req, ctx)
1155 : .instrument(span.clone())
1156 : .await
1157 0 : .map(|msg| (msg, timer))
1158 0 : .map_err(|err| BatchedPageStreamError { err, req: req.hdr })],
1159 : span,
1160 : )
1161 : }
1162 : BatchedFeMessage::GetSlruSegment {
1163 : span,
1164 : timer,
1165 : shard,
1166 : req,
1167 : } => {
1168 : fail::fail_point!("ps::handle-pagerequest-message::slrusegment");
1169 : (
1170 : vec![self
1171 : .handle_get_slru_segment_request(&*shard.upgrade()?, &req, ctx)
1172 : .instrument(span.clone())
1173 : .await
1174 0 : .map(|msg| (msg, timer))
1175 0 : .map_err(|err| BatchedPageStreamError { err, req: req.hdr })],
1176 : span,
1177 : )
1178 : }
1179 : #[cfg(feature = "testing")]
1180 : BatchedFeMessage::Test {
1181 : span,
1182 : shard,
1183 : requests,
1184 : } => {
1185 : fail::fail_point!("ps::handle-pagerequest-message::test");
1186 : (
1187 : {
1188 : let npages = requests.len();
1189 : trace!(npages, "handling getpage request");
1190 : let res = self
1191 : .handle_test_request_batch(&*shard.upgrade()?, requests, ctx)
1192 : .instrument(span.clone())
1193 : .await;
1194 : assert_eq!(res.len(), npages);
1195 : res
1196 : },
1197 : span,
1198 : )
1199 : }
1200 : BatchedFeMessage::RespondError { span, error } => {
1201 : // We've already decided to respond with an error, so we don't need to
1202 : // call the handler.
1203 : (vec![Err(error)], span)
1204 : }
1205 : };
1206 :
1207 : // We purposefully don't count flush time into the smgr operation timer.
1208 : //
1209 : // The reason is that current compute client will not perform protocol processing
1210 : // if the postgres backend process is doing things other than `->smgr_read()`.
1211 : // This is especially the case for prefetch.
1212 : //
1213 : // If the compute doesn't read from the connection, eventually TCP will backpressure
1214 : // all the way into our flush call below.
1215 : //
1216 : // The timer's underlying metric is used for a storage-internal latency SLO and
1217 : // we don't want to include latency in it that we can't control.
1218 : // And as pointed out above, in this case, we don't control the time that flush will take.
1219 : //
1220 : // We put each response in the batch onto the wire in a separate pgb_writer.flush()
1221 : // call, which (all unmeasured) adds syscall overhead but reduces time to first byte
1222 : // and avoids building up a "giant" contiguous userspace buffer to hold the entire response.
1223 : // TODO: vectored socket IO would be great, but pgb_writer doesn't support that.
1224 : let flush_timers = {
1225 : let flushing_start_time = Instant::now();
1226 : let mut flush_timers = Vec::with_capacity(handler_results.len());
1227 : for handler_result in &mut handler_results {
1228 : let flush_timer = match handler_result {
1229 : Ok((_, timer)) => Some(
1230 : timer
1231 : .observe_execution_end(flushing_start_time)
1232 : .expect("we are the first caller"),
1233 : ),
1234 : Err(_) => {
1235 : // TODO: measure errors
1236 : None
1237 : }
1238 : };
1239 : flush_timers.push(flush_timer);
1240 : }
1241 : assert_eq!(flush_timers.len(), handler_results.len());
1242 : flush_timers
1243 : };
1244 :
1245 : // Map handler result to protocol behavior.
1246 : // Some handler errors cause exit from pagestream protocol.
1247 : // Other handler errors are sent back as an error message and we stay in pagestream protocol.
1248 : for (handler_result, flushing_timer) in handler_results.into_iter().zip(flush_timers) {
1249 : let response_msg = match handler_result {
1250 : Err(e) => match &e.err {
1251 : PageStreamError::Shutdown => {
1252 : // If we fail to fulfil a request during shutdown, which may be _because_ of
1253 : // shutdown, then do not send the error to the client. Instead just drop the
1254 : // connection.
1255 0 : span.in_scope(|| info!("dropping connection due to shutdown"));
1256 : return Err(QueryError::Shutdown);
1257 : }
1258 : PageStreamError::Reconnect(reason) => {
1259 0 : span.in_scope(|| info!("handler requested reconnect: {reason}"));
1260 : return Err(QueryError::Reconnect);
1261 : }
1262 : PageStreamError::Read(_)
1263 : | PageStreamError::LsnTimeout(_)
1264 : | PageStreamError::NotFound(_)
1265 : | PageStreamError::BadRequest(_) => {
1266 : // print the all details to the log with {:#}, but for the client the
1267 : // error message is enough. Do not log if shutting down, as the anyhow::Error
1268 : // here includes cancellation which is not an error.
1269 : let full = utils::error::report_compact_sources(&e.err);
1270 0 : span.in_scope(|| {
1271 0 : error!("error reading relation or page version: {full:#}")
1272 0 : });
1273 :
1274 : PagestreamBeMessage::Error(PagestreamErrorResponse {
1275 : req: e.req,
1276 : message: e.err.to_string(),
1277 : })
1278 : }
1279 : },
1280 : Ok((response_msg, _op_timer_already_observed)) => response_msg,
1281 : };
1282 :
1283 : //
1284 : // marshal & transmit response message
1285 : //
1286 :
1287 : pgb_writer.write_message_noflush(&BeMessage::CopyData(
1288 : &response_msg.serialize(protocol_version),
1289 : ))?;
1290 :
1291 : // what we want to do
1292 : let socket_fd = pgb_writer.socket_fd;
1293 : let flush_fut = pgb_writer.flush();
1294 : // metric for how long flushing takes
1295 : let flush_fut = match flushing_timer {
1296 : Some(flushing_timer) => futures::future::Either::Left(flushing_timer.measure(
1297 : Instant::now(),
1298 : flush_fut,
1299 : socket_fd,
1300 : )),
1301 : None => futures::future::Either::Right(flush_fut),
1302 : };
1303 : // do it while respecting cancellation
1304 0 : let _: () = async move {
1305 0 : tokio::select! {
1306 : biased;
1307 0 : _ = cancel.cancelled() => {
1308 : // We were requested to shut down.
1309 0 : info!("shutdown request received in page handler");
1310 0 : return Err(QueryError::Shutdown)
1311 : }
1312 0 : res = flush_fut => {
1313 0 : res?;
1314 : }
1315 : }
1316 0 : Ok(())
1317 0 : }
1318 : .await?;
1319 : }
1320 : Ok(())
1321 : }
1322 :
1323 : /// Pagestream sub-protocol handler.
1324 : ///
1325 : /// It is a simple request-response protocol inside a COPYBOTH session.
1326 : ///
1327 : /// # Coding Discipline
1328 : ///
1329 : /// Coding discipline within this function: all interaction with the `pgb` connection
1330 : /// needs to be sensitive to connection shutdown, currently signalled via [`Self::cancel`].
1331 : /// This is so that we can shutdown page_service quickly.
1332 : #[instrument(skip_all)]
1333 : async fn handle_pagerequests<IO>(
1334 : &mut self,
1335 : pgb: &mut PostgresBackend<IO>,
1336 : tenant_id: TenantId,
1337 : timeline_id: TimelineId,
1338 : protocol_version: PagestreamProtocolVersion,
1339 : ctx: RequestContext,
1340 : ) -> Result<(), QueryError>
1341 : where
1342 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
1343 : {
1344 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
1345 :
1346 : // switch client to COPYBOTH
1347 : pgb.write_message_noflush(&BeMessage::CopyBothResponse)?;
1348 : tokio::select! {
1349 : biased;
1350 : _ = self.cancel.cancelled() => {
1351 : return Err(QueryError::Shutdown)
1352 : }
1353 : res = pgb.flush() => {
1354 : res?;
1355 : }
1356 : }
1357 :
1358 : let io_concurrency = IoConcurrency::spawn_from_conf(
1359 : self.conf,
1360 : match self.gate_guard.try_clone() {
1361 : Ok(guard) => guard,
1362 : Err(_) => {
1363 : info!("shutdown request received in page handler");
1364 : return Err(QueryError::Shutdown);
1365 : }
1366 : },
1367 : );
1368 :
1369 : let pgb_reader = pgb
1370 : .split()
1371 : .context("implementation error: split pgb into reader and writer")?;
1372 :
1373 : let timeline_handles = self
1374 : .timeline_handles
1375 : .take()
1376 : .expect("implementation error: timeline_handles should not be locked");
1377 :
1378 : let request_span = info_span!("request");
1379 : let ((pgb_reader, timeline_handles), result) = match self.pipelining_config.clone() {
1380 : PageServicePipeliningConfig::Pipelined(pipelining_config) => {
1381 : self.handle_pagerequests_pipelined(
1382 : pgb,
1383 : pgb_reader,
1384 : tenant_id,
1385 : timeline_id,
1386 : timeline_handles,
1387 : request_span,
1388 : pipelining_config,
1389 : protocol_version,
1390 : io_concurrency,
1391 : &ctx,
1392 : )
1393 : .await
1394 : }
1395 : PageServicePipeliningConfig::Serial => {
1396 : self.handle_pagerequests_serial(
1397 : pgb,
1398 : pgb_reader,
1399 : tenant_id,
1400 : timeline_id,
1401 : timeline_handles,
1402 : request_span,
1403 : protocol_version,
1404 : io_concurrency,
1405 : &ctx,
1406 : )
1407 : .await
1408 : }
1409 : };
1410 :
1411 : debug!("pagestream subprotocol shut down cleanly");
1412 :
1413 : pgb.unsplit(pgb_reader)
1414 : .context("implementation error: unsplit pgb")?;
1415 :
1416 : let replaced = self.timeline_handles.replace(timeline_handles);
1417 : assert!(replaced.is_none());
1418 :
1419 : result
1420 : }
1421 :
1422 : #[allow(clippy::too_many_arguments)]
1423 0 : async fn handle_pagerequests_serial<IO>(
1424 0 : &mut self,
1425 0 : pgb_writer: &mut PostgresBackend<IO>,
1426 0 : mut pgb_reader: PostgresBackendReader<IO>,
1427 0 : tenant_id: TenantId,
1428 0 : timeline_id: TimelineId,
1429 0 : mut timeline_handles: TimelineHandles,
1430 0 : request_span: Span,
1431 0 : protocol_version: PagestreamProtocolVersion,
1432 0 : io_concurrency: IoConcurrency,
1433 0 : ctx: &RequestContext,
1434 0 : ) -> (
1435 0 : (PostgresBackendReader<IO>, TimelineHandles),
1436 0 : Result<(), QueryError>,
1437 0 : )
1438 0 : where
1439 0 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
1440 0 : {
1441 0 : let cancel = self.cancel.clone();
1442 0 : let err = loop {
1443 0 : let msg = Self::pagestream_read_message(
1444 0 : &mut pgb_reader,
1445 0 : tenant_id,
1446 0 : timeline_id,
1447 0 : &mut timeline_handles,
1448 0 : &cancel,
1449 0 : ctx,
1450 0 : protocol_version,
1451 0 : request_span.clone(),
1452 0 : )
1453 0 : .await;
1454 0 : let msg = match msg {
1455 0 : Ok(msg) => msg,
1456 0 : Err(e) => break e,
1457 : };
1458 0 : let msg = match msg {
1459 0 : Some(msg) => msg,
1460 : None => {
1461 0 : debug!("pagestream subprotocol end observed");
1462 0 : return ((pgb_reader, timeline_handles), Ok(()));
1463 : }
1464 : };
1465 :
1466 0 : let err = self
1467 0 : .pagesteam_handle_batched_message(
1468 0 : pgb_writer,
1469 0 : msg,
1470 0 : io_concurrency.clone(),
1471 0 : &cancel,
1472 0 : protocol_version,
1473 0 : ctx,
1474 0 : )
1475 0 : .await;
1476 0 : match err {
1477 0 : Ok(()) => {}
1478 0 : Err(e) => break e,
1479 : }
1480 : };
1481 0 : ((pgb_reader, timeline_handles), Err(err))
1482 0 : }
1483 :
1484 : /// # Cancel-Safety
1485 : ///
1486 : /// May leak tokio tasks if not polled to completion.
1487 : #[allow(clippy::too_many_arguments)]
1488 0 : async fn handle_pagerequests_pipelined<IO>(
1489 0 : &mut self,
1490 0 : pgb_writer: &mut PostgresBackend<IO>,
1491 0 : pgb_reader: PostgresBackendReader<IO>,
1492 0 : tenant_id: TenantId,
1493 0 : timeline_id: TimelineId,
1494 0 : mut timeline_handles: TimelineHandles,
1495 0 : request_span: Span,
1496 0 : pipelining_config: PageServicePipeliningConfigPipelined,
1497 0 : protocol_version: PagestreamProtocolVersion,
1498 0 : io_concurrency: IoConcurrency,
1499 0 : ctx: &RequestContext,
1500 0 : ) -> (
1501 0 : (PostgresBackendReader<IO>, TimelineHandles),
1502 0 : Result<(), QueryError>,
1503 0 : )
1504 0 : where
1505 0 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
1506 0 : {
1507 0 : //
1508 0 : // Pipelined pagestream handling consists of
1509 0 : // - a Batcher that reads requests off the wire and
1510 0 : // and batches them if possible,
1511 0 : // - an Executor that processes the batched requests.
1512 0 : //
1513 0 : // The batch is built up inside an `spsc_fold` channel,
1514 0 : // shared betwen Batcher (Sender) and Executor (Receiver).
1515 0 : //
1516 0 : // The Batcher continously folds client requests into the batch,
1517 0 : // while the Executor can at any time take out what's in the batch
1518 0 : // in order to process it.
1519 0 : // This means the next batch builds up while the Executor
1520 0 : // executes the last batch.
1521 0 : //
1522 0 : // CANCELLATION
1523 0 : //
1524 0 : // We run both Batcher and Executor futures to completion before
1525 0 : // returning from this function.
1526 0 : //
1527 0 : // If Executor exits first, it signals cancellation to the Batcher
1528 0 : // via a CancellationToken that is child of `self.cancel`.
1529 0 : // If Batcher exits first, it signals cancellation to the Executor
1530 0 : // by dropping the spsc_fold channel Sender.
1531 0 : //
1532 0 : // CLEAN SHUTDOWN
1533 0 : //
1534 0 : // Clean shutdown means that the client ends the COPYBOTH session.
1535 0 : // In response to such a client message, the Batcher exits.
1536 0 : // The Executor continues to run, draining the spsc_fold channel.
1537 0 : // Once drained, the spsc_fold recv will fail with a distinct error
1538 0 : // indicating that the sender disconnected.
1539 0 : // The Executor exits with Ok(()) in response to that error.
1540 0 : //
1541 0 : // Server initiated shutdown is not clean shutdown, but instead
1542 0 : // is an error Err(QueryError::Shutdown) that is propagated through
1543 0 : // error propagation.
1544 0 : //
1545 0 : // ERROR PROPAGATION
1546 0 : //
1547 0 : // When the Batcher encounter an error, it sends it as a value
1548 0 : // through the spsc_fold channel and exits afterwards.
1549 0 : // When the Executor observes such an error in the channel,
1550 0 : // it exits returning that error value.
1551 0 : //
1552 0 : // This design ensures that the Executor stage will still process
1553 0 : // the batch that was in flight when the Batcher encountered an error,
1554 0 : // thereby beahving identical to a serial implementation.
1555 0 :
1556 0 : let PageServicePipeliningConfigPipelined {
1557 0 : max_batch_size,
1558 0 : execution,
1559 0 : } = pipelining_config;
1560 :
1561 : // Macro to _define_ a pipeline stage.
1562 : macro_rules! pipeline_stage {
1563 : ($name:literal, $cancel:expr, $make_fut:expr) => {{
1564 : let cancel: CancellationToken = $cancel;
1565 : let stage_fut = $make_fut(cancel.clone());
1566 0 : async move {
1567 0 : scopeguard::defer! {
1568 0 : debug!("exiting");
1569 0 : }
1570 0 : timed_after_cancellation(stage_fut, $name, Duration::from_millis(100), &cancel)
1571 0 : .await
1572 0 : }
1573 : .instrument(tracing::info_span!($name))
1574 : }};
1575 : }
1576 :
1577 : //
1578 : // Batcher
1579 : //
1580 :
1581 0 : let cancel_batcher = self.cancel.child_token();
1582 0 : let (mut batch_tx, mut batch_rx) = spsc_fold::channel();
1583 0 : let batcher = pipeline_stage!("batcher", cancel_batcher.clone(), move |cancel_batcher| {
1584 0 : let ctx = ctx.attached_child();
1585 0 : async move {
1586 0 : let mut pgb_reader = pgb_reader;
1587 0 : let mut exit = false;
1588 0 : while !exit {
1589 0 : let read_res = Self::pagestream_read_message(
1590 0 : &mut pgb_reader,
1591 0 : tenant_id,
1592 0 : timeline_id,
1593 0 : &mut timeline_handles,
1594 0 : &cancel_batcher,
1595 0 : &ctx,
1596 0 : protocol_version,
1597 0 : request_span.clone(),
1598 0 : )
1599 0 : .await;
1600 0 : let Some(read_res) = read_res.transpose() else {
1601 0 : debug!("client-initiated shutdown");
1602 0 : break;
1603 : };
1604 0 : exit |= read_res.is_err();
1605 0 : let could_send = batch_tx
1606 0 : .send(read_res, |batch, res| {
1607 0 : Self::pagestream_do_batch(max_batch_size, batch, res)
1608 0 : })
1609 0 : .await;
1610 0 : exit |= could_send.is_err();
1611 : }
1612 0 : (pgb_reader, timeline_handles)
1613 0 : }
1614 0 : });
1615 :
1616 : //
1617 : // Executor
1618 : //
1619 :
1620 0 : let executor = pipeline_stage!("executor", self.cancel.clone(), move |cancel| {
1621 0 : let ctx = ctx.attached_child();
1622 0 : async move {
1623 0 : let _cancel_batcher = cancel_batcher.drop_guard();
1624 : loop {
1625 0 : let maybe_batch = batch_rx.recv().await;
1626 0 : let batch = match maybe_batch {
1627 0 : Ok(batch) => batch,
1628 : Err(spsc_fold::RecvError::SenderGone) => {
1629 0 : debug!("upstream gone");
1630 0 : return Ok(());
1631 : }
1632 : };
1633 0 : let batch = match batch {
1634 0 : Ok(batch) => batch,
1635 0 : Err(e) => {
1636 0 : return Err(e);
1637 : }
1638 : };
1639 0 : self.pagesteam_handle_batched_message(
1640 0 : pgb_writer,
1641 0 : batch,
1642 0 : io_concurrency.clone(),
1643 0 : &cancel,
1644 0 : protocol_version,
1645 0 : &ctx,
1646 0 : )
1647 0 : .await?;
1648 : }
1649 0 : }
1650 0 : });
1651 :
1652 : //
1653 : // Execute the stages.
1654 : //
1655 :
1656 0 : match execution {
1657 : PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
1658 0 : tokio::join!(batcher, executor)
1659 : }
1660 : PageServiceProtocolPipelinedExecutionStrategy::Tasks => {
1661 : // These tasks are not tracked anywhere.
1662 0 : let read_messages_task = tokio::spawn(batcher);
1663 0 : let (read_messages_task_res, executor_res_) =
1664 0 : tokio::join!(read_messages_task, executor,);
1665 0 : (
1666 0 : read_messages_task_res.expect("propagated panic from read_messages"),
1667 0 : executor_res_,
1668 0 : )
1669 : }
1670 : }
1671 0 : }
1672 :
1673 : /// Helper function to handle the LSN from client request.
1674 : ///
1675 : /// Each GetPage (and Exists and Nblocks) request includes information about
1676 : /// which version of the page is being requested. The primary compute node
1677 : /// will always request the latest page version, by setting 'request_lsn' to
1678 : /// the last inserted or flushed WAL position, while a standby will request
1679 : /// a version at the LSN that it's currently caught up to.
1680 : ///
1681 : /// In either case, if the page server hasn't received the WAL up to the
1682 : /// requested LSN yet, we will wait for it to arrive. The return value is
1683 : /// the LSN that should be used to look up the page versions.
1684 : ///
1685 : /// In addition to the request LSN, each request carries another LSN,
1686 : /// 'not_modified_since', which is a hint to the pageserver that the client
1687 : /// knows that the page has not been modified between 'not_modified_since'
1688 : /// and the request LSN. This allows skipping the wait, as long as the WAL
1689 : /// up to 'not_modified_since' has arrived. If the client doesn't have any
1690 : /// information about when the page was modified, it will use
1691 : /// not_modified_since == lsn. If the client lies and sends a too low
1692 : /// not_modified_hint such that there are in fact later page versions, the
1693 : /// behavior is undefined: the pageserver may return any of the page versions
1694 : /// or an error.
1695 0 : async fn wait_or_get_last_lsn(
1696 0 : timeline: &Timeline,
1697 0 : request_lsn: Lsn,
1698 0 : not_modified_since: Lsn,
1699 0 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1700 0 : ctx: &RequestContext,
1701 0 : ) -> Result<Lsn, PageStreamError> {
1702 0 : let last_record_lsn = timeline.get_last_record_lsn();
1703 0 :
1704 0 : // Sanity check the request
1705 0 : if request_lsn < not_modified_since {
1706 0 : return Err(PageStreamError::BadRequest(
1707 0 : format!(
1708 0 : "invalid request with request LSN {} and not_modified_since {}",
1709 0 : request_lsn, not_modified_since,
1710 0 : )
1711 0 : .into(),
1712 0 : ));
1713 0 : }
1714 0 :
1715 0 : // Check explicitly for INVALID just to get a less scary error message if the request is obviously bogus
1716 0 : if request_lsn == Lsn::INVALID {
1717 0 : return Err(PageStreamError::BadRequest(
1718 0 : "invalid LSN(0) in request".into(),
1719 0 : ));
1720 0 : }
1721 0 :
1722 0 : // Clients should only read from recent LSNs on their timeline, or from locations holding an LSN lease.
1723 0 : //
1724 0 : // We may have older data available, but we make a best effort to detect this case and return an error,
1725 0 : // to distinguish a misbehaving client (asking for old LSN) from a storage issue (data missing at a legitimate LSN).
1726 0 : if request_lsn < **latest_gc_cutoff_lsn && !timeline.is_gc_blocked_by_lsn_lease_deadline() {
1727 0 : let gc_info = &timeline.gc_info.read().unwrap();
1728 0 : if !gc_info.lsn_covered_by_lease(request_lsn) {
1729 0 : return Err(
1730 0 : PageStreamError::BadRequest(format!(
1731 0 : "tried to request a page version that was garbage collected. requested at {} gc cutoff {}",
1732 0 : request_lsn, **latest_gc_cutoff_lsn
1733 0 : ).into())
1734 0 : );
1735 0 : }
1736 0 : }
1737 :
1738 : // Wait for WAL up to 'not_modified_since' to arrive, if necessary
1739 0 : if not_modified_since > last_record_lsn {
1740 0 : timeline
1741 0 : .wait_lsn(
1742 0 : not_modified_since,
1743 0 : crate::tenant::timeline::WaitLsnWaiter::PageService,
1744 0 : timeline::WaitLsnTimeout::Default,
1745 0 : ctx,
1746 0 : )
1747 0 : .await?;
1748 : // Since we waited for 'not_modified_since' to arrive, that is now the last
1749 : // record LSN. (Or close enough for our purposes; the last-record LSN can
1750 : // advance immediately after we return anyway)
1751 0 : Ok(not_modified_since)
1752 : } else {
1753 : // It might be better to use max(not_modified_since, latest_gc_cutoff_lsn)
1754 : // here instead. That would give the same result, since we know that there
1755 : // haven't been any modifications since 'not_modified_since'. Using an older
1756 : // LSN might be faster, because that could allow skipping recent layers when
1757 : // finding the page. However, we have historically used 'last_record_lsn', so
1758 : // stick to that for now.
1759 0 : Ok(std::cmp::min(last_record_lsn, request_lsn))
1760 : }
1761 0 : }
1762 :
1763 : /// Handles the lsn lease request.
1764 : /// If a lease cannot be obtained, the client will receive NULL.
1765 : #[instrument(skip_all, fields(shard_id, %lsn))]
1766 : async fn handle_make_lsn_lease<IO>(
1767 : &mut self,
1768 : pgb: &mut PostgresBackend<IO>,
1769 : tenant_shard_id: TenantShardId,
1770 : timeline_id: TimelineId,
1771 : lsn: Lsn,
1772 : ctx: &RequestContext,
1773 : ) -> Result<(), QueryError>
1774 : where
1775 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
1776 : {
1777 : let timeline = self
1778 : .timeline_handles
1779 : .as_mut()
1780 : .unwrap()
1781 : .get(
1782 : tenant_shard_id.tenant_id,
1783 : timeline_id,
1784 : ShardSelector::Known(tenant_shard_id.to_index()),
1785 : )
1786 : .await?;
1787 : set_tracing_field_shard_id(&timeline);
1788 :
1789 : let lease = timeline
1790 : .renew_lsn_lease(lsn, timeline.get_lsn_lease_length(), ctx)
1791 0 : .inspect_err(|e| {
1792 0 : warn!("{e}");
1793 0 : })
1794 : .ok();
1795 0 : let valid_until_str = lease.map(|l| {
1796 0 : l.valid_until
1797 0 : .duration_since(SystemTime::UNIX_EPOCH)
1798 0 : .expect("valid_until is earlier than UNIX_EPOCH")
1799 0 : .as_millis()
1800 0 : .to_string()
1801 0 : });
1802 :
1803 : info!(
1804 : "acquired lease for {} until {}",
1805 : lsn,
1806 : valid_until_str.as_deref().unwrap_or("<unknown>")
1807 : );
1808 :
1809 0 : let bytes = valid_until_str.as_ref().map(|x| x.as_bytes());
1810 :
1811 : pgb.write_message_noflush(&BeMessage::RowDescription(&[RowDescriptor::text_col(
1812 : b"valid_until",
1813 : )]))?
1814 : .write_message_noflush(&BeMessage::DataRow(&[bytes]))?;
1815 :
1816 : Ok(())
1817 : }
1818 :
1819 : #[instrument(skip_all, fields(shard_id))]
1820 : async fn handle_get_rel_exists_request(
1821 : &mut self,
1822 : timeline: &Timeline,
1823 : req: &PagestreamExistsRequest,
1824 : ctx: &RequestContext,
1825 : ) -> Result<PagestreamBeMessage, PageStreamError> {
1826 : let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
1827 : let lsn = Self::wait_or_get_last_lsn(
1828 : timeline,
1829 : req.hdr.request_lsn,
1830 : req.hdr.not_modified_since,
1831 : &latest_gc_cutoff_lsn,
1832 : ctx,
1833 : )
1834 : .await?;
1835 :
1836 : let exists = timeline
1837 : .get_rel_exists(req.rel, Version::Lsn(lsn), ctx)
1838 : .await?;
1839 :
1840 : Ok(PagestreamBeMessage::Exists(PagestreamExistsResponse {
1841 : req: *req,
1842 : exists,
1843 : }))
1844 : }
1845 :
1846 : #[instrument(skip_all, fields(shard_id))]
1847 : async fn handle_get_nblocks_request(
1848 : &mut self,
1849 : timeline: &Timeline,
1850 : req: &PagestreamNblocksRequest,
1851 : ctx: &RequestContext,
1852 : ) -> Result<PagestreamBeMessage, PageStreamError> {
1853 : let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
1854 : let lsn = Self::wait_or_get_last_lsn(
1855 : timeline,
1856 : req.hdr.request_lsn,
1857 : req.hdr.not_modified_since,
1858 : &latest_gc_cutoff_lsn,
1859 : ctx,
1860 : )
1861 : .await?;
1862 :
1863 : let n_blocks = timeline
1864 : .get_rel_size(req.rel, Version::Lsn(lsn), ctx)
1865 : .await?;
1866 :
1867 : Ok(PagestreamBeMessage::Nblocks(PagestreamNblocksResponse {
1868 : req: *req,
1869 : n_blocks,
1870 : }))
1871 : }
1872 :
1873 : #[instrument(skip_all, fields(shard_id))]
1874 : async fn handle_db_size_request(
1875 : &mut self,
1876 : timeline: &Timeline,
1877 : req: &PagestreamDbSizeRequest,
1878 : ctx: &RequestContext,
1879 : ) -> Result<PagestreamBeMessage, PageStreamError> {
1880 : let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
1881 : let lsn = Self::wait_or_get_last_lsn(
1882 : timeline,
1883 : req.hdr.request_lsn,
1884 : req.hdr.not_modified_since,
1885 : &latest_gc_cutoff_lsn,
1886 : ctx,
1887 : )
1888 : .await?;
1889 :
1890 : let total_blocks = timeline
1891 : .get_db_size(DEFAULTTABLESPACE_OID, req.dbnode, Version::Lsn(lsn), ctx)
1892 : .await?;
1893 : let db_size = total_blocks as i64 * BLCKSZ as i64;
1894 :
1895 : Ok(PagestreamBeMessage::DbSize(PagestreamDbSizeResponse {
1896 : req: *req,
1897 : db_size,
1898 : }))
1899 : }
1900 :
1901 : #[instrument(skip_all)]
1902 : async fn handle_get_page_at_lsn_request_batched(
1903 : &mut self,
1904 : timeline: &Timeline,
1905 : effective_lsn: Lsn,
1906 : requests: smallvec::SmallVec<[BatchedGetPageRequest; 1]>,
1907 : io_concurrency: IoConcurrency,
1908 : ctx: &RequestContext,
1909 : ) -> Vec<Result<(PagestreamBeMessage, SmgrOpTimer), BatchedPageStreamError>> {
1910 : debug_assert_current_span_has_tenant_and_timeline_id();
1911 :
1912 : timeline
1913 : .query_metrics
1914 : .observe_getpage_batch_start(requests.len());
1915 :
1916 : // If a page trace is running, submit an event for this request.
1917 : if let Some(page_trace) = timeline.page_trace.load().as_ref() {
1918 : let time = SystemTime::now();
1919 : for batch in &requests {
1920 : let key = rel_block_to_key(batch.req.rel, batch.req.blkno).to_compact();
1921 : // Ignore error (trace buffer may be full or tracer may have disconnected).
1922 : _ = page_trace.try_send(PageTraceEvent {
1923 : key,
1924 : effective_lsn,
1925 : time,
1926 : });
1927 : }
1928 : }
1929 :
1930 : let results = timeline
1931 : .get_rel_page_at_lsn_batched(
1932 0 : requests.iter().map(|p| (&p.req.rel, &p.req.blkno)),
1933 : effective_lsn,
1934 : io_concurrency,
1935 : ctx,
1936 : )
1937 : .await;
1938 : assert_eq!(results.len(), requests.len());
1939 :
1940 : // TODO: avoid creating the new Vec here
1941 : Vec::from_iter(
1942 : requests
1943 : .into_iter()
1944 : .zip(results.into_iter())
1945 0 : .map(|(req, res)| {
1946 0 : res.map(|page| {
1947 0 : (
1948 0 : PagestreamBeMessage::GetPage(models::PagestreamGetPageResponse {
1949 0 : req: req.req,
1950 0 : page,
1951 0 : }),
1952 0 : req.timer,
1953 0 : )
1954 0 : })
1955 0 : .map_err(|e| BatchedPageStreamError {
1956 0 : err: PageStreamError::from(e),
1957 0 : req: req.req.hdr,
1958 0 : })
1959 0 : }),
1960 : )
1961 : }
1962 :
1963 : #[instrument(skip_all, fields(shard_id))]
1964 : async fn handle_get_slru_segment_request(
1965 : &mut self,
1966 : timeline: &Timeline,
1967 : req: &PagestreamGetSlruSegmentRequest,
1968 : ctx: &RequestContext,
1969 : ) -> Result<PagestreamBeMessage, PageStreamError> {
1970 : let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
1971 : let lsn = Self::wait_or_get_last_lsn(
1972 : timeline,
1973 : req.hdr.request_lsn,
1974 : req.hdr.not_modified_since,
1975 : &latest_gc_cutoff_lsn,
1976 : ctx,
1977 : )
1978 : .await?;
1979 :
1980 : let kind = SlruKind::from_repr(req.kind)
1981 : .ok_or(PageStreamError::BadRequest("invalid SLRU kind".into()))?;
1982 : let segment = timeline.get_slru_segment(kind, req.segno, lsn, ctx).await?;
1983 :
1984 : Ok(PagestreamBeMessage::GetSlruSegment(
1985 : PagestreamGetSlruSegmentResponse { req: *req, segment },
1986 : ))
1987 : }
1988 :
1989 : // NB: this impl mimics what we do for batched getpage requests.
1990 : #[cfg(feature = "testing")]
1991 : #[instrument(skip_all, fields(shard_id))]
1992 : async fn handle_test_request_batch(
1993 : &mut self,
1994 : timeline: &Timeline,
1995 : requests: Vec<BatchedTestRequest>,
1996 : _ctx: &RequestContext,
1997 : ) -> Vec<Result<(PagestreamBeMessage, SmgrOpTimer), BatchedPageStreamError>> {
1998 : // real requests would do something with the timeline
1999 : let mut results = Vec::with_capacity(requests.len());
2000 : for _req in requests.iter() {
2001 : tokio::task::yield_now().await;
2002 :
2003 : results.push({
2004 : if timeline.cancel.is_cancelled() {
2005 : Err(PageReconstructError::Cancelled)
2006 : } else {
2007 : Ok(())
2008 : }
2009 : });
2010 : }
2011 :
2012 : // TODO: avoid creating the new Vec here
2013 : Vec::from_iter(
2014 : requests
2015 : .into_iter()
2016 : .zip(results.into_iter())
2017 0 : .map(|(req, res)| {
2018 0 : res.map(|()| {
2019 0 : (
2020 0 : PagestreamBeMessage::Test(models::PagestreamTestResponse {
2021 0 : req: req.req.clone(),
2022 0 : }),
2023 0 : req.timer,
2024 0 : )
2025 0 : })
2026 0 : .map_err(|e| BatchedPageStreamError {
2027 0 : err: PageStreamError::from(e),
2028 0 : req: req.req.hdr,
2029 0 : })
2030 0 : }),
2031 : )
2032 : }
2033 :
2034 : /// Note on "fullbackup":
2035 : /// Full basebackups should only be used for debugging purposes.
2036 : /// Originally, it was introduced to enable breaking storage format changes,
2037 : /// but that is not applicable anymore.
2038 : ///
2039 : /// # Coding Discipline
2040 : ///
2041 : /// Coding discipline within this function: all interaction with the `pgb` connection
2042 : /// needs to be sensitive to connection shutdown, currently signalled via [`Self::cancel`].
2043 : /// This is so that we can shutdown page_service quickly.
2044 : ///
2045 : /// TODO: wrap the pgb that we pass to the basebackup handler so that it's sensitive
2046 : /// to connection cancellation.
2047 : #[allow(clippy::too_many_arguments)]
2048 : #[instrument(skip_all, fields(shard_id, ?lsn, ?prev_lsn, %full_backup))]
2049 : async fn handle_basebackup_request<IO>(
2050 : &mut self,
2051 : pgb: &mut PostgresBackend<IO>,
2052 : tenant_id: TenantId,
2053 : timeline_id: TimelineId,
2054 : lsn: Option<Lsn>,
2055 : prev_lsn: Option<Lsn>,
2056 : full_backup: bool,
2057 : gzip: bool,
2058 : replica: bool,
2059 : ctx: &RequestContext,
2060 : ) -> Result<(), QueryError>
2061 : where
2062 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
2063 : {
2064 0 : fn map_basebackup_error(err: BasebackupError) -> QueryError {
2065 0 : match err {
2066 : // TODO: passthrough the error site to the final error message?
2067 0 : BasebackupError::Client(e, _) => QueryError::Disconnected(ConnectionError::Io(e)),
2068 0 : BasebackupError::Server(e) => QueryError::Other(e),
2069 : }
2070 0 : }
2071 :
2072 : let started = std::time::Instant::now();
2073 :
2074 : let timeline = self
2075 : .timeline_handles
2076 : .as_mut()
2077 : .unwrap()
2078 : .get(tenant_id, timeline_id, ShardSelector::Zero)
2079 : .await?;
2080 : set_tracing_field_shard_id(&timeline);
2081 :
2082 : if timeline.is_archived() == Some(true) {
2083 : // TODO after a grace period, turn this log line into a hard error
2084 : tracing::warn!("timeline {tenant_id}/{timeline_id} is archived, but got basebackup request for it.");
2085 : //return Err(QueryError::NotFound("timeline is archived".into()))
2086 : }
2087 :
2088 : let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
2089 : if let Some(lsn) = lsn {
2090 : // Backup was requested at a particular LSN. Wait for it to arrive.
2091 : info!("waiting for {}", lsn);
2092 : timeline
2093 : .wait_lsn(
2094 : lsn,
2095 : crate::tenant::timeline::WaitLsnWaiter::PageService,
2096 : crate::tenant::timeline::WaitLsnTimeout::Default,
2097 : ctx,
2098 : )
2099 : .await?;
2100 : timeline
2101 : .check_lsn_is_in_scope(lsn, &latest_gc_cutoff_lsn)
2102 : .context("invalid basebackup lsn")?;
2103 : }
2104 :
2105 : let lsn_awaited_after = started.elapsed();
2106 :
2107 : // switch client to COPYOUT
2108 : pgb.write_message_noflush(&BeMessage::CopyOutResponse)
2109 : .map_err(QueryError::Disconnected)?;
2110 : self.flush_cancellable(pgb, &self.cancel).await?;
2111 :
2112 : // Send a tarball of the latest layer on the timeline. Compress if not
2113 : // fullbackup. TODO Compress in that case too (tests need to be updated)
2114 : if full_backup {
2115 : let mut writer = pgb.copyout_writer();
2116 : basebackup::send_basebackup_tarball(
2117 : &mut writer,
2118 : &timeline,
2119 : lsn,
2120 : prev_lsn,
2121 : full_backup,
2122 : replica,
2123 : ctx,
2124 : )
2125 : .await
2126 : .map_err(map_basebackup_error)?;
2127 : } else {
2128 : let mut writer = BufWriter::new(pgb.copyout_writer());
2129 : if gzip {
2130 : let mut encoder = GzipEncoder::with_quality(
2131 : &mut writer,
2132 : // NOTE using fast compression because it's on the critical path
2133 : // for compute startup. For an empty database, we get
2134 : // <100KB with this method. The Level::Best compression method
2135 : // gives us <20KB, but maybe we should add basebackup caching
2136 : // on compute shutdown first.
2137 : async_compression::Level::Fastest,
2138 : );
2139 : basebackup::send_basebackup_tarball(
2140 : &mut encoder,
2141 : &timeline,
2142 : lsn,
2143 : prev_lsn,
2144 : full_backup,
2145 : replica,
2146 : ctx,
2147 : )
2148 : .await
2149 : .map_err(map_basebackup_error)?;
2150 : // shutdown the encoder to ensure the gzip footer is written
2151 : encoder
2152 : .shutdown()
2153 : .await
2154 0 : .map_err(|e| QueryError::Disconnected(ConnectionError::Io(e)))?;
2155 : } else {
2156 : basebackup::send_basebackup_tarball(
2157 : &mut writer,
2158 : &timeline,
2159 : lsn,
2160 : prev_lsn,
2161 : full_backup,
2162 : replica,
2163 : ctx,
2164 : )
2165 : .await
2166 : .map_err(map_basebackup_error)?;
2167 : }
2168 0 : writer.flush().await.map_err(|e| {
2169 0 : map_basebackup_error(BasebackupError::Client(
2170 0 : e,
2171 0 : "handle_basebackup_request,flush",
2172 0 : ))
2173 0 : })?;
2174 : }
2175 :
2176 : pgb.write_message_noflush(&BeMessage::CopyDone)
2177 : .map_err(QueryError::Disconnected)?;
2178 : self.flush_cancellable(pgb, &timeline.cancel).await?;
2179 :
2180 : let basebackup_after = started
2181 : .elapsed()
2182 : .checked_sub(lsn_awaited_after)
2183 : .unwrap_or(Duration::ZERO);
2184 :
2185 : info!(
2186 : lsn_await_millis = lsn_awaited_after.as_millis(),
2187 : basebackup_millis = basebackup_after.as_millis(),
2188 : "basebackup complete"
2189 : );
2190 :
2191 : Ok(())
2192 : }
2193 :
2194 : // when accessing management api supply None as an argument
2195 : // when using to authorize tenant pass corresponding tenant id
2196 0 : fn check_permission(&self, tenant_id: Option<TenantId>) -> Result<(), QueryError> {
2197 0 : if self.auth.is_none() {
2198 : // auth is set to Trust, nothing to check so just return ok
2199 0 : return Ok(());
2200 0 : }
2201 0 : // auth is some, just checked above, when auth is some
2202 0 : // then claims are always present because of checks during connection init
2203 0 : // so this expect won't trigger
2204 0 : let claims = self
2205 0 : .claims
2206 0 : .as_ref()
2207 0 : .expect("claims presence already checked");
2208 0 : check_permission(claims, tenant_id).map_err(|e| QueryError::Unauthorized(e.0))
2209 0 : }
2210 : }
2211 :
2212 : /// `basebackup tenant timeline [lsn] [--gzip] [--replica]`
2213 : #[derive(Debug, Clone, Eq, PartialEq)]
2214 : struct BaseBackupCmd {
2215 : tenant_id: TenantId,
2216 : timeline_id: TimelineId,
2217 : lsn: Option<Lsn>,
2218 : gzip: bool,
2219 : replica: bool,
2220 : }
2221 :
2222 : /// `fullbackup tenant timeline [lsn] [prev_lsn]`
2223 : #[derive(Debug, Clone, Eq, PartialEq)]
2224 : struct FullBackupCmd {
2225 : tenant_id: TenantId,
2226 : timeline_id: TimelineId,
2227 : lsn: Option<Lsn>,
2228 : prev_lsn: Option<Lsn>,
2229 : }
2230 :
2231 : /// `pagestream_v2 tenant timeline`
2232 : #[derive(Debug, Clone, Eq, PartialEq)]
2233 : struct PageStreamCmd {
2234 : tenant_id: TenantId,
2235 : timeline_id: TimelineId,
2236 : protocol_version: PagestreamProtocolVersion,
2237 : }
2238 :
2239 : /// `lease lsn tenant timeline lsn`
2240 : #[derive(Debug, Clone, Eq, PartialEq)]
2241 : struct LeaseLsnCmd {
2242 : tenant_shard_id: TenantShardId,
2243 : timeline_id: TimelineId,
2244 : lsn: Lsn,
2245 : }
2246 :
2247 : #[derive(Debug, Clone, Eq, PartialEq)]
2248 : enum PageServiceCmd {
2249 : Set,
2250 : PageStream(PageStreamCmd),
2251 : BaseBackup(BaseBackupCmd),
2252 : FullBackup(FullBackupCmd),
2253 : LeaseLsn(LeaseLsnCmd),
2254 : }
2255 :
2256 : impl PageStreamCmd {
2257 12 : fn parse(query: &str, protocol_version: PagestreamProtocolVersion) -> anyhow::Result<Self> {
2258 12 : let parameters = query.split_whitespace().collect_vec();
2259 12 : if parameters.len() != 2 {
2260 4 : bail!(
2261 4 : "invalid number of parameters for pagestream command: {}",
2262 4 : query
2263 4 : );
2264 8 : }
2265 8 : let tenant_id = TenantId::from_str(parameters[0])
2266 8 : .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
2267 4 : let timeline_id = TimelineId::from_str(parameters[1])
2268 4 : .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
2269 4 : Ok(Self {
2270 4 : tenant_id,
2271 4 : timeline_id,
2272 4 : protocol_version,
2273 4 : })
2274 12 : }
2275 : }
2276 :
2277 : impl FullBackupCmd {
2278 8 : fn parse(query: &str) -> anyhow::Result<Self> {
2279 8 : let parameters = query.split_whitespace().collect_vec();
2280 8 : if parameters.len() < 2 || parameters.len() > 4 {
2281 0 : bail!(
2282 0 : "invalid number of parameters for basebackup command: {}",
2283 0 : query
2284 0 : );
2285 8 : }
2286 8 : let tenant_id = TenantId::from_str(parameters[0])
2287 8 : .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
2288 8 : let timeline_id = TimelineId::from_str(parameters[1])
2289 8 : .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
2290 : // The caller is responsible for providing correct lsn and prev_lsn.
2291 8 : let lsn = if let Some(lsn_str) = parameters.get(2) {
2292 : Some(
2293 4 : Lsn::from_str(lsn_str)
2294 4 : .with_context(|| format!("Failed to parse Lsn from {lsn_str}"))?,
2295 : )
2296 : } else {
2297 4 : None
2298 : };
2299 8 : let prev_lsn = if let Some(prev_lsn_str) = parameters.get(3) {
2300 : Some(
2301 4 : Lsn::from_str(prev_lsn_str)
2302 4 : .with_context(|| format!("Failed to parse Lsn from {prev_lsn_str}"))?,
2303 : )
2304 : } else {
2305 4 : None
2306 : };
2307 8 : Ok(Self {
2308 8 : tenant_id,
2309 8 : timeline_id,
2310 8 : lsn,
2311 8 : prev_lsn,
2312 8 : })
2313 8 : }
2314 : }
2315 :
2316 : impl BaseBackupCmd {
2317 36 : fn parse(query: &str) -> anyhow::Result<Self> {
2318 36 : let parameters = query.split_whitespace().collect_vec();
2319 36 : if parameters.len() < 2 {
2320 0 : bail!(
2321 0 : "invalid number of parameters for basebackup command: {}",
2322 0 : query
2323 0 : );
2324 36 : }
2325 36 : let tenant_id = TenantId::from_str(parameters[0])
2326 36 : .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
2327 36 : let timeline_id = TimelineId::from_str(parameters[1])
2328 36 : .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
2329 : let lsn;
2330 : let flags_parse_from;
2331 36 : if let Some(maybe_lsn) = parameters.get(2) {
2332 32 : if *maybe_lsn == "latest" {
2333 4 : lsn = None;
2334 4 : flags_parse_from = 3;
2335 28 : } else if maybe_lsn.starts_with("--") {
2336 20 : lsn = None;
2337 20 : flags_parse_from = 2;
2338 20 : } else {
2339 : lsn = Some(
2340 8 : Lsn::from_str(maybe_lsn)
2341 8 : .with_context(|| format!("Failed to parse lsn from {maybe_lsn}"))?,
2342 : );
2343 8 : flags_parse_from = 3;
2344 : }
2345 4 : } else {
2346 4 : lsn = None;
2347 4 : flags_parse_from = 2;
2348 4 : }
2349 :
2350 36 : let mut gzip = false;
2351 36 : let mut replica = false;
2352 :
2353 44 : for ¶m in ¶meters[flags_parse_from..] {
2354 44 : match param {
2355 44 : "--gzip" => {
2356 28 : if gzip {
2357 4 : bail!("duplicate parameter for basebackup command: {param}")
2358 24 : }
2359 24 : gzip = true
2360 : }
2361 16 : "--replica" => {
2362 8 : if replica {
2363 0 : bail!("duplicate parameter for basebackup command: {param}")
2364 8 : }
2365 8 : replica = true
2366 : }
2367 8 : _ => bail!("invalid parameter for basebackup command: {param}"),
2368 : }
2369 : }
2370 24 : Ok(Self {
2371 24 : tenant_id,
2372 24 : timeline_id,
2373 24 : lsn,
2374 24 : gzip,
2375 24 : replica,
2376 24 : })
2377 36 : }
2378 : }
2379 :
2380 : impl LeaseLsnCmd {
2381 8 : fn parse(query: &str) -> anyhow::Result<Self> {
2382 8 : let parameters = query.split_whitespace().collect_vec();
2383 8 : if parameters.len() != 3 {
2384 0 : bail!(
2385 0 : "invalid number of parameters for lease lsn command: {}",
2386 0 : query
2387 0 : );
2388 8 : }
2389 8 : let tenant_shard_id = TenantShardId::from_str(parameters[0])
2390 8 : .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
2391 8 : let timeline_id = TimelineId::from_str(parameters[1])
2392 8 : .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
2393 8 : let lsn = Lsn::from_str(parameters[2])
2394 8 : .with_context(|| format!("Failed to parse lsn from {}", parameters[2]))?;
2395 8 : Ok(Self {
2396 8 : tenant_shard_id,
2397 8 : timeline_id,
2398 8 : lsn,
2399 8 : })
2400 8 : }
2401 : }
2402 :
2403 : impl PageServiceCmd {
2404 84 : fn parse(query: &str) -> anyhow::Result<Self> {
2405 84 : let query = query.trim();
2406 84 : let Some((cmd, other)) = query.split_once(' ') else {
2407 8 : bail!("cannot parse query: {query}")
2408 : };
2409 76 : match cmd.to_ascii_lowercase().as_str() {
2410 76 : "pagestream_v2" => Ok(Self::PageStream(PageStreamCmd::parse(
2411 12 : other,
2412 12 : PagestreamProtocolVersion::V2,
2413 12 : )?)),
2414 64 : "pagestream_v3" => Ok(Self::PageStream(PageStreamCmd::parse(
2415 0 : other,
2416 0 : PagestreamProtocolVersion::V3,
2417 0 : )?)),
2418 64 : "basebackup" => Ok(Self::BaseBackup(BaseBackupCmd::parse(other)?)),
2419 28 : "fullbackup" => Ok(Self::FullBackup(FullBackupCmd::parse(other)?)),
2420 20 : "lease" => {
2421 12 : let Some((cmd2, other)) = other.split_once(' ') else {
2422 0 : bail!("invalid lease command: {cmd}");
2423 : };
2424 12 : let cmd2 = cmd2.to_ascii_lowercase();
2425 12 : if cmd2 == "lsn" {
2426 8 : Ok(Self::LeaseLsn(LeaseLsnCmd::parse(other)?))
2427 : } else {
2428 4 : bail!("invalid lease command: {cmd}");
2429 : }
2430 : }
2431 8 : "set" => Ok(Self::Set),
2432 0 : _ => Err(anyhow::anyhow!("unsupported command {cmd} in {query}")),
2433 : }
2434 84 : }
2435 : }
2436 :
2437 : impl<IO> postgres_backend::Handler<IO> for PageServerHandler
2438 : where
2439 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
2440 : {
2441 0 : fn check_auth_jwt(
2442 0 : &mut self,
2443 0 : _pgb: &mut PostgresBackend<IO>,
2444 0 : jwt_response: &[u8],
2445 0 : ) -> Result<(), QueryError> {
2446 : // this unwrap is never triggered, because check_auth_jwt only called when auth_type is NeonJWT
2447 : // which requires auth to be present
2448 0 : let data = self
2449 0 : .auth
2450 0 : .as_ref()
2451 0 : .unwrap()
2452 0 : .decode(str::from_utf8(jwt_response).context("jwt response is not UTF-8")?)
2453 0 : .map_err(|e| QueryError::Unauthorized(e.0))?;
2454 :
2455 0 : if matches!(data.claims.scope, Scope::Tenant) && data.claims.tenant_id.is_none() {
2456 0 : return Err(QueryError::Unauthorized(
2457 0 : "jwt token scope is Tenant, but tenant id is missing".into(),
2458 0 : ));
2459 0 : }
2460 0 :
2461 0 : debug!(
2462 0 : "jwt scope check succeeded for scope: {:#?} by tenant id: {:?}",
2463 : data.claims.scope, data.claims.tenant_id,
2464 : );
2465 :
2466 0 : self.claims = Some(data.claims);
2467 0 : Ok(())
2468 0 : }
2469 :
2470 0 : fn startup(
2471 0 : &mut self,
2472 0 : _pgb: &mut PostgresBackend<IO>,
2473 0 : sm: &FeStartupPacket,
2474 0 : ) -> Result<(), QueryError> {
2475 0 : fail::fail_point!("ps::connection-start::startup-packet");
2476 :
2477 0 : if let FeStartupPacket::StartupMessage { params, .. } = sm {
2478 0 : if let Some(app_name) = params.get("application_name") {
2479 0 : Span::current().record("application_name", field::display(app_name));
2480 0 : }
2481 0 : };
2482 :
2483 0 : Ok(())
2484 0 : }
2485 :
2486 : #[instrument(skip_all, fields(tenant_id, timeline_id))]
2487 : async fn process_query(
2488 : &mut self,
2489 : pgb: &mut PostgresBackend<IO>,
2490 : query_string: &str,
2491 : ) -> Result<(), QueryError> {
2492 0 : fail::fail_point!("simulated-bad-compute-connection", |_| {
2493 0 : info!("Hit failpoint for bad connection");
2494 0 : Err(QueryError::SimulatedConnectionError)
2495 0 : });
2496 :
2497 : fail::fail_point!("ps::connection-start::process-query");
2498 :
2499 : let ctx = self.connection_ctx.attached_child();
2500 : debug!("process query {query_string}");
2501 : let query = PageServiceCmd::parse(query_string)?;
2502 : match query {
2503 : PageServiceCmd::PageStream(PageStreamCmd {
2504 : tenant_id,
2505 : timeline_id,
2506 : protocol_version,
2507 : }) => {
2508 : tracing::Span::current()
2509 : .record("tenant_id", field::display(tenant_id))
2510 : .record("timeline_id", field::display(timeline_id));
2511 :
2512 : self.check_permission(Some(tenant_id))?;
2513 : let command_kind = match protocol_version {
2514 : PagestreamProtocolVersion::V2 => ComputeCommandKind::PageStreamV2,
2515 : PagestreamProtocolVersion::V3 => ComputeCommandKind::PageStreamV3,
2516 : };
2517 : COMPUTE_COMMANDS_COUNTERS.for_command(command_kind).inc();
2518 :
2519 : self.handle_pagerequests(pgb, tenant_id, timeline_id, protocol_version, ctx)
2520 : .await?;
2521 : }
2522 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2523 : tenant_id,
2524 : timeline_id,
2525 : lsn,
2526 : gzip,
2527 : replica,
2528 : }) => {
2529 : tracing::Span::current()
2530 : .record("tenant_id", field::display(tenant_id))
2531 : .record("timeline_id", field::display(timeline_id));
2532 :
2533 : self.check_permission(Some(tenant_id))?;
2534 :
2535 : COMPUTE_COMMANDS_COUNTERS
2536 : .for_command(ComputeCommandKind::Basebackup)
2537 : .inc();
2538 : let metric_recording = metrics::BASEBACKUP_QUERY_TIME.start_recording();
2539 0 : let res = async {
2540 0 : self.handle_basebackup_request(
2541 0 : pgb,
2542 0 : tenant_id,
2543 0 : timeline_id,
2544 0 : lsn,
2545 0 : None,
2546 0 : false,
2547 0 : gzip,
2548 0 : replica,
2549 0 : &ctx,
2550 0 : )
2551 0 : .await?;
2552 0 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
2553 0 : Result::<(), QueryError>::Ok(())
2554 0 : }
2555 : .await;
2556 : metric_recording.observe(&res);
2557 : res?;
2558 : }
2559 : // same as basebackup, but result includes relational data as well
2560 : PageServiceCmd::FullBackup(FullBackupCmd {
2561 : tenant_id,
2562 : timeline_id,
2563 : lsn,
2564 : prev_lsn,
2565 : }) => {
2566 : tracing::Span::current()
2567 : .record("tenant_id", field::display(tenant_id))
2568 : .record("timeline_id", field::display(timeline_id));
2569 :
2570 : self.check_permission(Some(tenant_id))?;
2571 :
2572 : COMPUTE_COMMANDS_COUNTERS
2573 : .for_command(ComputeCommandKind::Fullbackup)
2574 : .inc();
2575 :
2576 : // Check that the timeline exists
2577 : self.handle_basebackup_request(
2578 : pgb,
2579 : tenant_id,
2580 : timeline_id,
2581 : lsn,
2582 : prev_lsn,
2583 : true,
2584 : false,
2585 : false,
2586 : &ctx,
2587 : )
2588 : .await?;
2589 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
2590 : }
2591 : PageServiceCmd::Set => {
2592 : // important because psycopg2 executes "SET datestyle TO 'ISO'"
2593 : // on connect
2594 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
2595 : }
2596 : PageServiceCmd::LeaseLsn(LeaseLsnCmd {
2597 : tenant_shard_id,
2598 : timeline_id,
2599 : lsn,
2600 : }) => {
2601 : tracing::Span::current()
2602 : .record("tenant_id", field::display(tenant_shard_id))
2603 : .record("timeline_id", field::display(timeline_id));
2604 :
2605 : self.check_permission(Some(tenant_shard_id.tenant_id))?;
2606 :
2607 : COMPUTE_COMMANDS_COUNTERS
2608 : .for_command(ComputeCommandKind::LeaseLsn)
2609 : .inc();
2610 :
2611 : match self
2612 : .handle_make_lsn_lease(pgb, tenant_shard_id, timeline_id, lsn, &ctx)
2613 : .await
2614 : {
2615 : Ok(()) => {
2616 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?
2617 : }
2618 : Err(e) => {
2619 : error!("error obtaining lsn lease for {lsn}: {e:?}");
2620 : pgb.write_message_noflush(&BeMessage::ErrorResponse(
2621 : &e.to_string(),
2622 : Some(e.pg_error_code()),
2623 : ))?
2624 : }
2625 : };
2626 : }
2627 : }
2628 :
2629 : Ok(())
2630 : }
2631 : }
2632 :
2633 : impl From<GetActiveTenantError> for QueryError {
2634 0 : fn from(e: GetActiveTenantError) -> Self {
2635 0 : match e {
2636 0 : GetActiveTenantError::WaitForActiveTimeout { .. } => QueryError::Disconnected(
2637 0 : ConnectionError::Io(io::Error::new(io::ErrorKind::TimedOut, e.to_string())),
2638 0 : ),
2639 : GetActiveTenantError::Cancelled
2640 : | GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => {
2641 0 : QueryError::Shutdown
2642 : }
2643 0 : e @ GetActiveTenantError::NotFound(_) => QueryError::NotFound(format!("{e}").into()),
2644 0 : e => QueryError::Other(anyhow::anyhow!(e)),
2645 : }
2646 0 : }
2647 : }
2648 :
2649 : #[derive(Debug, thiserror::Error)]
2650 : pub(crate) enum GetActiveTimelineError {
2651 : #[error(transparent)]
2652 : Tenant(GetActiveTenantError),
2653 : #[error(transparent)]
2654 : Timeline(#[from] GetTimelineError),
2655 : }
2656 :
2657 : impl From<GetActiveTimelineError> for QueryError {
2658 0 : fn from(e: GetActiveTimelineError) -> Self {
2659 0 : match e {
2660 0 : GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled) => QueryError::Shutdown,
2661 0 : GetActiveTimelineError::Tenant(e) => e.into(),
2662 0 : GetActiveTimelineError::Timeline(e) => QueryError::NotFound(format!("{e}").into()),
2663 : }
2664 0 : }
2665 : }
2666 :
2667 : impl From<crate::tenant::timeline::handle::HandleUpgradeError> for QueryError {
2668 0 : fn from(e: crate::tenant::timeline::handle::HandleUpgradeError) -> Self {
2669 0 : match e {
2670 0 : crate::tenant::timeline::handle::HandleUpgradeError::ShutDown => QueryError::Shutdown,
2671 0 : }
2672 0 : }
2673 : }
2674 :
2675 0 : fn set_tracing_field_shard_id(timeline: &Timeline) {
2676 0 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
2677 0 : tracing::Span::current().record(
2678 0 : "shard_id",
2679 0 : tracing::field::display(timeline.tenant_shard_id.shard_slug()),
2680 0 : );
2681 0 : debug_assert_current_span_has_tenant_and_timeline_id();
2682 0 : }
2683 :
2684 : struct WaitedForLsn(Lsn);
2685 : impl From<WaitedForLsn> for Lsn {
2686 0 : fn from(WaitedForLsn(lsn): WaitedForLsn) -> Self {
2687 0 : lsn
2688 0 : }
2689 : }
2690 :
2691 : #[cfg(test)]
2692 : mod tests {
2693 : use utils::shard::ShardCount;
2694 :
2695 : use super::*;
2696 :
2697 : #[test]
2698 4 : fn pageservice_cmd_parse() {
2699 4 : let tenant_id = TenantId::generate();
2700 4 : let timeline_id = TimelineId::generate();
2701 4 : let cmd =
2702 4 : PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id} {timeline_id}")).unwrap();
2703 4 : assert_eq!(
2704 4 : cmd,
2705 4 : PageServiceCmd::PageStream(PageStreamCmd {
2706 4 : tenant_id,
2707 4 : timeline_id,
2708 4 : protocol_version: PagestreamProtocolVersion::V2,
2709 4 : })
2710 4 : );
2711 4 : let cmd = PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id}")).unwrap();
2712 4 : assert_eq!(
2713 4 : cmd,
2714 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2715 4 : tenant_id,
2716 4 : timeline_id,
2717 4 : lsn: None,
2718 4 : gzip: false,
2719 4 : replica: false
2720 4 : })
2721 4 : );
2722 4 : let cmd =
2723 4 : PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} --gzip")).unwrap();
2724 4 : assert_eq!(
2725 4 : cmd,
2726 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2727 4 : tenant_id,
2728 4 : timeline_id,
2729 4 : lsn: None,
2730 4 : gzip: true,
2731 4 : replica: false
2732 4 : })
2733 4 : );
2734 4 : let cmd =
2735 4 : PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} latest")).unwrap();
2736 4 : assert_eq!(
2737 4 : cmd,
2738 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2739 4 : tenant_id,
2740 4 : timeline_id,
2741 4 : lsn: None,
2742 4 : gzip: false,
2743 4 : replica: false
2744 4 : })
2745 4 : );
2746 4 : let cmd = PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} 0/16ABCDE"))
2747 4 : .unwrap();
2748 4 : assert_eq!(
2749 4 : cmd,
2750 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2751 4 : tenant_id,
2752 4 : timeline_id,
2753 4 : lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
2754 4 : gzip: false,
2755 4 : replica: false
2756 4 : })
2757 4 : );
2758 4 : let cmd = PageServiceCmd::parse(&format!(
2759 4 : "basebackup {tenant_id} {timeline_id} --replica --gzip"
2760 4 : ))
2761 4 : .unwrap();
2762 4 : assert_eq!(
2763 4 : cmd,
2764 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2765 4 : tenant_id,
2766 4 : timeline_id,
2767 4 : lsn: None,
2768 4 : gzip: true,
2769 4 : replica: true
2770 4 : })
2771 4 : );
2772 4 : let cmd = PageServiceCmd::parse(&format!(
2773 4 : "basebackup {tenant_id} {timeline_id} 0/16ABCDE --replica --gzip"
2774 4 : ))
2775 4 : .unwrap();
2776 4 : assert_eq!(
2777 4 : cmd,
2778 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2779 4 : tenant_id,
2780 4 : timeline_id,
2781 4 : lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
2782 4 : gzip: true,
2783 4 : replica: true
2784 4 : })
2785 4 : );
2786 4 : let cmd = PageServiceCmd::parse(&format!("fullbackup {tenant_id} {timeline_id}")).unwrap();
2787 4 : assert_eq!(
2788 4 : cmd,
2789 4 : PageServiceCmd::FullBackup(FullBackupCmd {
2790 4 : tenant_id,
2791 4 : timeline_id,
2792 4 : lsn: None,
2793 4 : prev_lsn: None
2794 4 : })
2795 4 : );
2796 4 : let cmd = PageServiceCmd::parse(&format!(
2797 4 : "fullbackup {tenant_id} {timeline_id} 0/16ABCDE 0/16ABCDF"
2798 4 : ))
2799 4 : .unwrap();
2800 4 : assert_eq!(
2801 4 : cmd,
2802 4 : PageServiceCmd::FullBackup(FullBackupCmd {
2803 4 : tenant_id,
2804 4 : timeline_id,
2805 4 : lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
2806 4 : prev_lsn: Some(Lsn::from_str("0/16ABCDF").unwrap()),
2807 4 : })
2808 4 : );
2809 4 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
2810 4 : let cmd = PageServiceCmd::parse(&format!(
2811 4 : "lease lsn {tenant_shard_id} {timeline_id} 0/16ABCDE"
2812 4 : ))
2813 4 : .unwrap();
2814 4 : assert_eq!(
2815 4 : cmd,
2816 4 : PageServiceCmd::LeaseLsn(LeaseLsnCmd {
2817 4 : tenant_shard_id,
2818 4 : timeline_id,
2819 4 : lsn: Lsn::from_str("0/16ABCDE").unwrap(),
2820 4 : })
2821 4 : );
2822 4 : let tenant_shard_id = TenantShardId::split(&tenant_shard_id, ShardCount(8))[1];
2823 4 : let cmd = PageServiceCmd::parse(&format!(
2824 4 : "lease lsn {tenant_shard_id} {timeline_id} 0/16ABCDE"
2825 4 : ))
2826 4 : .unwrap();
2827 4 : assert_eq!(
2828 4 : cmd,
2829 4 : PageServiceCmd::LeaseLsn(LeaseLsnCmd {
2830 4 : tenant_shard_id,
2831 4 : timeline_id,
2832 4 : lsn: Lsn::from_str("0/16ABCDE").unwrap(),
2833 4 : })
2834 4 : );
2835 4 : let cmd = PageServiceCmd::parse("set a = b").unwrap();
2836 4 : assert_eq!(cmd, PageServiceCmd::Set);
2837 4 : let cmd = PageServiceCmd::parse("SET foo").unwrap();
2838 4 : assert_eq!(cmd, PageServiceCmd::Set);
2839 4 : }
2840 :
2841 : #[test]
2842 4 : fn pageservice_cmd_err_handling() {
2843 4 : let tenant_id = TenantId::generate();
2844 4 : let timeline_id = TimelineId::generate();
2845 4 : let cmd = PageServiceCmd::parse("unknown_command");
2846 4 : assert!(cmd.is_err());
2847 4 : let cmd = PageServiceCmd::parse("pagestream_v2");
2848 4 : assert!(cmd.is_err());
2849 4 : let cmd = PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id}xxx"));
2850 4 : assert!(cmd.is_err());
2851 4 : let cmd = PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id}xxx {timeline_id}xxx"));
2852 4 : assert!(cmd.is_err());
2853 4 : let cmd = PageServiceCmd::parse(&format!(
2854 4 : "basebackup {tenant_id} {timeline_id} --gzip --gzip"
2855 4 : ));
2856 4 : assert!(cmd.is_err());
2857 4 : let cmd = PageServiceCmd::parse(&format!(
2858 4 : "basebackup {tenant_id} {timeline_id} --gzip --unknown"
2859 4 : ));
2860 4 : assert!(cmd.is_err());
2861 4 : let cmd = PageServiceCmd::parse(&format!(
2862 4 : "basebackup {tenant_id} {timeline_id} --gzip 0/16ABCDE"
2863 4 : ));
2864 4 : assert!(cmd.is_err());
2865 4 : let cmd = PageServiceCmd::parse(&format!("lease {tenant_id} {timeline_id} gzip 0/16ABCDE"));
2866 4 : assert!(cmd.is_err());
2867 4 : }
2868 : }
|