Line data Source code
1 : //! The Page Service listens for client connections and serves their GetPage@LSN
2 : //! requests.
3 :
4 : use std::borrow::Cow;
5 : use std::num::NonZeroUsize;
6 : use std::os::fd::AsRawFd;
7 : use std::str::FromStr;
8 : use std::sync::Arc;
9 : use std::time::{Duration, Instant, SystemTime};
10 : use std::{io, str};
11 :
12 : use anyhow::{Context, bail};
13 : use async_compression::tokio::write::GzipEncoder;
14 : use bytes::Buf;
15 : use futures::FutureExt;
16 : use itertools::Itertools;
17 : use once_cell::sync::OnceCell;
18 : use pageserver_api::config::{
19 : PageServicePipeliningConfig, PageServicePipeliningConfigPipelined,
20 : PageServiceProtocolPipelinedExecutionStrategy,
21 : };
22 : use pageserver_api::key::rel_block_to_key;
23 : use pageserver_api::models::{
24 : self, PageTraceEvent, PagestreamBeMessage, PagestreamDbSizeRequest, PagestreamDbSizeResponse,
25 : PagestreamErrorResponse, PagestreamExistsRequest, PagestreamExistsResponse,
26 : PagestreamFeMessage, PagestreamGetPageRequest, PagestreamGetSlruSegmentRequest,
27 : PagestreamGetSlruSegmentResponse, PagestreamNblocksRequest, PagestreamNblocksResponse,
28 : PagestreamProtocolVersion, PagestreamRequest, TenantState,
29 : };
30 : use pageserver_api::reltag::SlruKind;
31 : use pageserver_api::shard::TenantShardId;
32 : use postgres_backend::{
33 : AuthType, PostgresBackend, PostgresBackendReader, QueryError, is_expected_io_error,
34 : };
35 : use postgres_ffi::BLCKSZ;
36 : use postgres_ffi::pg_constants::DEFAULTTABLESPACE_OID;
37 : use pq_proto::framed::ConnectionError;
38 : use pq_proto::{BeMessage, FeMessage, FeStartupPacket, RowDescriptor};
39 : use strum_macros::IntoStaticStr;
40 : use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, BufWriter};
41 : use tokio::task::JoinHandle;
42 : use tokio_util::sync::CancellationToken;
43 : use tracing::*;
44 : use utils::auth::{Claims, Scope, SwappableJwtAuth};
45 : use utils::failpoint_support;
46 : use utils::id::{TenantId, TimelineId};
47 : use utils::logging::log_slow;
48 : use utils::lsn::Lsn;
49 : use utils::simple_rcu::RcuReadGuard;
50 : use utils::sync::gate::{Gate, GateGuard};
51 : use utils::sync::spsc_fold;
52 :
53 : use crate::auth::check_permission;
54 : use crate::basebackup::BasebackupError;
55 : use crate::config::PageServerConf;
56 : use crate::context::{DownloadBehavior, RequestContext};
57 : use crate::metrics::{
58 : self, COMPUTE_COMMANDS_COUNTERS, ComputeCommandKind, LIVE_CONNECTIONS, SmgrOpTimer,
59 : TimelineMetrics,
60 : };
61 : use crate::pgdatadir_mapping::Version;
62 : use crate::span::{
63 : debug_assert_current_span_has_tenant_and_timeline_id,
64 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id,
65 : };
66 : use crate::task_mgr::{self, COMPUTE_REQUEST_RUNTIME, TaskKind};
67 : use crate::tenant::mgr::{
68 : GetActiveTenantError, GetTenantError, ShardResolveResult, ShardSelector, TenantManager,
69 : };
70 : use crate::tenant::storage_layer::IoConcurrency;
71 : use crate::tenant::timeline::{self, WaitLsnError};
72 : use crate::tenant::{GetTimelineError, PageReconstructError, Timeline};
73 : use crate::{basebackup, timed_after_cancellation};
74 :
75 : /// How long we may wait for a [`crate::tenant::mgr::TenantSlot::InProgress`]` and/or a [`crate::tenant::Tenant`] which
76 : /// is not yet in state [`TenantState::Active`].
77 : ///
78 : /// NB: this is a different value than [`crate::http::routes::ACTIVE_TENANT_TIMEOUT`].
79 : const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);
80 :
81 : /// Threshold at which to log slow GetPage requests.
82 : const LOG_SLOW_GETPAGE_THRESHOLD: Duration = Duration::from_secs(30);
83 :
84 : ///////////////////////////////////////////////////////////////////////////////
85 :
86 : pub struct Listener {
87 : cancel: CancellationToken,
88 : /// Cancel the listener task through `listen_cancel` to shut down the listener
89 : /// and get a handle on the existing connections.
90 : task: JoinHandle<Connections>,
91 : }
92 :
93 : pub struct Connections {
94 : cancel: CancellationToken,
95 : tasks: tokio::task::JoinSet<ConnectionHandlerResult>,
96 : gate: Gate,
97 : }
98 :
99 0 : pub fn spawn(
100 0 : conf: &'static PageServerConf,
101 0 : tenant_manager: Arc<TenantManager>,
102 0 : pg_auth: Option<Arc<SwappableJwtAuth>>,
103 0 : tcp_listener: tokio::net::TcpListener,
104 0 : ) -> Listener {
105 0 : let cancel = CancellationToken::new();
106 0 : let libpq_ctx = RequestContext::todo_child(
107 0 : TaskKind::LibpqEndpointListener,
108 0 : // listener task shouldn't need to download anything. (We will
109 0 : // create a separate sub-contexts for each connection, with their
110 0 : // own download behavior. This context is used only to listen and
111 0 : // accept connections.)
112 0 : DownloadBehavior::Error,
113 0 : );
114 0 : let task = COMPUTE_REQUEST_RUNTIME.spawn(task_mgr::exit_on_panic_or_error(
115 0 : "libpq listener",
116 0 : libpq_listener_main(
117 0 : conf,
118 0 : tenant_manager,
119 0 : pg_auth,
120 0 : tcp_listener,
121 0 : conf.pg_auth_type,
122 0 : conf.page_service_pipelining.clone(),
123 0 : libpq_ctx,
124 0 : cancel.clone(),
125 0 : )
126 0 : .map(anyhow::Ok),
127 0 : ));
128 0 :
129 0 : Listener { cancel, task }
130 0 : }
131 :
132 : impl Listener {
133 0 : pub async fn stop_accepting(self) -> Connections {
134 0 : self.cancel.cancel();
135 0 : self.task
136 0 : .await
137 0 : .expect("unreachable: we wrap the listener task in task_mgr::exit_on_panic_or_error")
138 0 : }
139 : }
140 : impl Connections {
141 0 : pub(crate) async fn shutdown(self) {
142 0 : let Self {
143 0 : cancel,
144 0 : mut tasks,
145 0 : gate,
146 0 : } = self;
147 0 : cancel.cancel();
148 0 : while let Some(res) = tasks.join_next().await {
149 0 : Self::handle_connection_completion(res);
150 0 : }
151 0 : gate.close().await;
152 0 : }
153 :
154 0 : fn handle_connection_completion(res: Result<anyhow::Result<()>, tokio::task::JoinError>) {
155 0 : match res {
156 0 : Ok(Ok(())) => {}
157 0 : Ok(Err(e)) => error!("error in page_service connection task: {:?}", e),
158 0 : Err(e) => error!("page_service connection task panicked: {:?}", e),
159 : }
160 0 : }
161 : }
162 :
163 : ///
164 : /// Main loop of the page service.
165 : ///
166 : /// Listens for connections, and launches a new handler task for each.
167 : ///
168 : /// Returns Ok(()) upon cancellation via `cancel`, returning the set of
169 : /// open connections.
170 : ///
171 : #[allow(clippy::too_many_arguments)]
172 0 : pub async fn libpq_listener_main(
173 0 : conf: &'static PageServerConf,
174 0 : tenant_manager: Arc<TenantManager>,
175 0 : auth: Option<Arc<SwappableJwtAuth>>,
176 0 : listener: tokio::net::TcpListener,
177 0 : auth_type: AuthType,
178 0 : pipelining_config: PageServicePipeliningConfig,
179 0 : listener_ctx: RequestContext,
180 0 : listener_cancel: CancellationToken,
181 0 : ) -> Connections {
182 0 : let connections_cancel = CancellationToken::new();
183 0 : let connections_gate = Gate::default();
184 0 : let mut connection_handler_tasks = tokio::task::JoinSet::default();
185 :
186 : loop {
187 0 : let gate_guard = match connections_gate.enter() {
188 0 : Ok(guard) => guard,
189 0 : Err(_) => break,
190 : };
191 :
192 0 : let accepted = tokio::select! {
193 : biased;
194 0 : _ = listener_cancel.cancelled() => break,
195 0 : next = connection_handler_tasks.join_next(), if !connection_handler_tasks.is_empty() => {
196 0 : let res = next.expect("we dont poll while empty");
197 0 : Connections::handle_connection_completion(res);
198 0 : continue;
199 : }
200 0 : accepted = listener.accept() => accepted,
201 0 : };
202 0 :
203 0 : match accepted {
204 0 : Ok((socket, peer_addr)) => {
205 0 : // Connection established. Spawn a new task to handle it.
206 0 : debug!("accepted connection from {}", peer_addr);
207 0 : let local_auth = auth.clone();
208 0 : let connection_ctx = listener_ctx
209 0 : .detached_child(TaskKind::PageRequestHandler, DownloadBehavior::Download);
210 0 : connection_handler_tasks.spawn(page_service_conn_main(
211 0 : conf,
212 0 : tenant_manager.clone(),
213 0 : local_auth,
214 0 : socket,
215 0 : auth_type,
216 0 : pipelining_config.clone(),
217 0 : connection_ctx,
218 0 : connections_cancel.child_token(),
219 0 : gate_guard,
220 0 : ));
221 : }
222 0 : Err(err) => {
223 0 : // accept() failed. Log the error, and loop back to retry on next connection.
224 0 : error!("accept() failed: {:?}", err);
225 : }
226 : }
227 : }
228 :
229 0 : debug!("page_service listener loop terminated");
230 :
231 0 : Connections {
232 0 : cancel: connections_cancel,
233 0 : tasks: connection_handler_tasks,
234 0 : gate: connections_gate,
235 0 : }
236 0 : }
237 :
238 : type ConnectionHandlerResult = anyhow::Result<()>;
239 :
240 : #[instrument(skip_all, fields(peer_addr, application_name))]
241 : #[allow(clippy::too_many_arguments)]
242 : async fn page_service_conn_main(
243 : conf: &'static PageServerConf,
244 : tenant_manager: Arc<TenantManager>,
245 : auth: Option<Arc<SwappableJwtAuth>>,
246 : socket: tokio::net::TcpStream,
247 : auth_type: AuthType,
248 : pipelining_config: PageServicePipeliningConfig,
249 : connection_ctx: RequestContext,
250 : cancel: CancellationToken,
251 : gate_guard: GateGuard,
252 : ) -> ConnectionHandlerResult {
253 : let _guard = LIVE_CONNECTIONS
254 : .with_label_values(&["page_service"])
255 : .guard();
256 :
257 : socket
258 : .set_nodelay(true)
259 : .context("could not set TCP_NODELAY")?;
260 :
261 : let socket_fd = socket.as_raw_fd();
262 :
263 : let peer_addr = socket.peer_addr().context("get peer address")?;
264 : tracing::Span::current().record("peer_addr", field::display(peer_addr));
265 :
266 : // setup read timeout of 10 minutes. the timeout is rather arbitrary for requirements:
267 : // - long enough for most valid compute connections
268 : // - less than infinite to stop us from "leaking" connections to long-gone computes
269 : //
270 : // no write timeout is used, because the kernel is assumed to error writes after some time.
271 : let mut socket = tokio_io_timeout::TimeoutReader::new(socket);
272 :
273 : let default_timeout_ms = 10 * 60 * 1000; // 10 minutes by default
274 0 : let socket_timeout_ms = (|| {
275 0 : fail::fail_point!("simulated-bad-compute-connection", |avg_timeout_ms| {
276 : // Exponential distribution for simulating
277 : // poor network conditions, expect about avg_timeout_ms to be around 15
278 : // in tests
279 0 : if let Some(avg_timeout_ms) = avg_timeout_ms {
280 0 : let avg = avg_timeout_ms.parse::<i64>().unwrap() as f32;
281 0 : let u = rand::random::<f32>();
282 0 : ((1.0 - u).ln() / (-avg)) as u64
283 : } else {
284 0 : default_timeout_ms
285 : }
286 0 : });
287 0 : default_timeout_ms
288 : })();
289 :
290 : // A timeout here does not mean the client died, it can happen if it's just idle for
291 : // a while: we will tear down this PageServerHandler and instantiate a new one if/when
292 : // they reconnect.
293 : socket.set_timeout(Some(std::time::Duration::from_millis(socket_timeout_ms)));
294 : let socket = Box::pin(socket);
295 :
296 : fail::fail_point!("ps::connection-start::pre-login");
297 :
298 : // XXX: pgbackend.run() should take the connection_ctx,
299 : // and create a child per-query context when it invokes process_query.
300 : // But it's in a shared crate, so, we store connection_ctx inside PageServerHandler
301 : // and create the per-query context in process_query ourselves.
302 : let mut conn_handler = PageServerHandler::new(
303 : conf,
304 : tenant_manager,
305 : auth,
306 : pipelining_config,
307 : connection_ctx,
308 : cancel.clone(),
309 : gate_guard,
310 : );
311 : let pgbackend = PostgresBackend::new_from_io(socket_fd, socket, peer_addr, auth_type, None)?;
312 :
313 : match pgbackend.run(&mut conn_handler, &cancel).await {
314 : Ok(()) => {
315 : // we've been requested to shut down
316 : Ok(())
317 : }
318 : Err(QueryError::Disconnected(ConnectionError::Io(io_error))) => {
319 : if is_expected_io_error(&io_error) {
320 : info!("Postgres client disconnected ({io_error})");
321 : Ok(())
322 : } else {
323 : let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id();
324 : Err(io_error).context(format!(
325 : "Postgres connection error for tenant_id={:?} client at peer_addr={}",
326 : tenant_id, peer_addr
327 : ))
328 : }
329 : }
330 : other => {
331 : let tenant_id = conn_handler.timeline_handles.as_ref().unwrap().tenant_id();
332 : other.context(format!(
333 : "Postgres query error for tenant_id={:?} client peer_addr={}",
334 : tenant_id, peer_addr
335 : ))
336 : }
337 : }
338 : }
339 :
340 : struct PageServerHandler {
341 : conf: &'static PageServerConf,
342 : auth: Option<Arc<SwappableJwtAuth>>,
343 : claims: Option<Claims>,
344 :
345 : /// The context created for the lifetime of the connection
346 : /// services by this PageServerHandler.
347 : /// For each query received over the connection,
348 : /// `process_query` creates a child context from this one.
349 : connection_ctx: RequestContext,
350 :
351 : cancel: CancellationToken,
352 :
353 : /// None only while pagestream protocol is being processed.
354 : timeline_handles: Option<TimelineHandles>,
355 :
356 : pipelining_config: PageServicePipeliningConfig,
357 :
358 : gate_guard: GateGuard,
359 : }
360 :
361 : struct TimelineHandles {
362 : wrapper: TenantManagerWrapper,
363 : /// Note on size: the typical size of this map is 1. The largest size we expect
364 : /// to see is the number of shards divided by the number of pageservers (typically < 2),
365 : /// or the ratio used when splitting shards (i.e. how many children created from one)
366 : /// parent shard, where a "large" number might be ~8.
367 : handles: timeline::handle::Cache<TenantManagerTypes>,
368 : }
369 :
370 : impl TimelineHandles {
371 0 : fn new(tenant_manager: Arc<TenantManager>) -> Self {
372 0 : Self {
373 0 : wrapper: TenantManagerWrapper {
374 0 : tenant_manager,
375 0 : tenant_id: OnceCell::new(),
376 0 : },
377 0 : handles: Default::default(),
378 0 : }
379 0 : }
380 0 : async fn get(
381 0 : &mut self,
382 0 : tenant_id: TenantId,
383 0 : timeline_id: TimelineId,
384 0 : shard_selector: ShardSelector,
385 0 : ) -> Result<timeline::handle::Handle<TenantManagerTypes>, GetActiveTimelineError> {
386 0 : if *self.wrapper.tenant_id.get_or_init(|| tenant_id) != tenant_id {
387 0 : return Err(GetActiveTimelineError::Tenant(
388 0 : GetActiveTenantError::SwitchedTenant,
389 0 : ));
390 0 : }
391 0 : self.handles
392 0 : .get(timeline_id, shard_selector, &self.wrapper)
393 0 : .await
394 0 : .map_err(|e| match e {
395 0 : timeline::handle::GetError::TenantManager(e) => e,
396 : timeline::handle::GetError::PerTimelineStateShutDown => {
397 0 : trace!("per-timeline state shut down");
398 0 : GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown)
399 : }
400 0 : })
401 0 : }
402 :
403 0 : fn tenant_id(&self) -> Option<TenantId> {
404 0 : self.wrapper.tenant_id.get().copied()
405 0 : }
406 : }
407 :
408 : pub(crate) struct TenantManagerWrapper {
409 : tenant_manager: Arc<TenantManager>,
410 : // We do not support switching tenant_id on a connection at this point.
411 : // We can can add support for this later if needed without changing
412 : // the protocol.
413 : tenant_id: once_cell::sync::OnceCell<TenantId>,
414 : }
415 :
416 : #[derive(Debug)]
417 : pub(crate) struct TenantManagerTypes;
418 :
419 : impl timeline::handle::Types for TenantManagerTypes {
420 : type TenantManagerError = GetActiveTimelineError;
421 : type TenantManager = TenantManagerWrapper;
422 : type Timeline = TenantManagerCacheItem;
423 : }
424 :
425 : pub(crate) struct TenantManagerCacheItem {
426 : pub(crate) timeline: Arc<Timeline>,
427 : // allow() for cheap propagation through RequestContext inside a task
428 : #[allow(clippy::redundant_allocation)]
429 : pub(crate) metrics: Arc<Arc<TimelineMetrics>>,
430 : #[allow(dead_code)] // we store it to keep the gate open
431 : pub(crate) gate_guard: GateGuard,
432 : }
433 :
434 : impl std::ops::Deref for TenantManagerCacheItem {
435 : type Target = Arc<Timeline>;
436 0 : fn deref(&self) -> &Self::Target {
437 0 : &self.timeline
438 0 : }
439 : }
440 :
441 : impl timeline::handle::Timeline<TenantManagerTypes> for TenantManagerCacheItem {
442 0 : fn shard_timeline_id(&self) -> timeline::handle::ShardTimelineId {
443 0 : Timeline::shard_timeline_id(&self.timeline)
444 0 : }
445 :
446 0 : fn per_timeline_state(&self) -> &timeline::handle::PerTimelineState<TenantManagerTypes> {
447 0 : &self.timeline.handles
448 0 : }
449 :
450 0 : fn get_shard_identity(&self) -> &pageserver_api::shard::ShardIdentity {
451 0 : Timeline::get_shard_identity(&self.timeline)
452 0 : }
453 : }
454 :
455 : impl timeline::handle::TenantManager<TenantManagerTypes> for TenantManagerWrapper {
456 0 : async fn resolve(
457 0 : &self,
458 0 : timeline_id: TimelineId,
459 0 : shard_selector: ShardSelector,
460 0 : ) -> Result<TenantManagerCacheItem, GetActiveTimelineError> {
461 0 : let tenant_id = self.tenant_id.get().expect("we set this in get()");
462 0 : let timeout = ACTIVE_TENANT_TIMEOUT;
463 0 : let wait_start = Instant::now();
464 0 : let deadline = wait_start + timeout;
465 0 : let tenant_shard = loop {
466 0 : let resolved = self
467 0 : .tenant_manager
468 0 : .resolve_attached_shard(tenant_id, shard_selector);
469 0 : match resolved {
470 0 : ShardResolveResult::Found(tenant_shard) => break tenant_shard,
471 : ShardResolveResult::NotFound => {
472 0 : return Err(GetActiveTimelineError::Tenant(
473 0 : GetActiveTenantError::NotFound(GetTenantError::NotFound(*tenant_id)),
474 0 : ));
475 : }
476 0 : ShardResolveResult::InProgress(barrier) => {
477 0 : // We can't authoritatively answer right now: wait for InProgress state
478 0 : // to end, then try again
479 0 : tokio::select! {
480 0 : _ = barrier.wait() => {
481 0 : // The barrier completed: proceed around the loop to try looking up again
482 0 : },
483 0 : _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
484 0 : return Err(GetActiveTimelineError::Tenant(GetActiveTenantError::WaitForActiveTimeout {
485 0 : latest_state: None,
486 0 : wait_time: timeout,
487 0 : }));
488 : }
489 : }
490 : }
491 : };
492 : };
493 :
494 0 : tracing::debug!("Waiting for tenant to enter active state...");
495 0 : tenant_shard
496 0 : .wait_to_become_active(deadline.duration_since(Instant::now()))
497 0 : .await
498 0 : .map_err(GetActiveTimelineError::Tenant)?;
499 :
500 0 : let timeline = tenant_shard
501 0 : .get_timeline(timeline_id, true)
502 0 : .map_err(GetActiveTimelineError::Timeline)?;
503 :
504 0 : let gate_guard = match timeline.gate.enter() {
505 0 : Ok(guard) => guard,
506 : Err(_) => {
507 0 : return Err(GetActiveTimelineError::Timeline(
508 0 : GetTimelineError::ShuttingDown,
509 0 : ));
510 : }
511 : };
512 :
513 0 : let metrics = Arc::new(Arc::clone(&timeline.metrics));
514 0 :
515 0 : Ok(TenantManagerCacheItem {
516 0 : timeline,
517 0 : metrics,
518 0 : gate_guard,
519 0 : })
520 0 : }
521 : }
522 :
523 : #[derive(thiserror::Error, Debug)]
524 : enum PageStreamError {
525 : /// We encountered an error that should prompt the client to reconnect:
526 : /// in practice this means we drop the connection without sending a response.
527 : #[error("Reconnect required: {0}")]
528 : Reconnect(Cow<'static, str>),
529 :
530 : /// We were instructed to shutdown while processing the query
531 : #[error("Shutting down")]
532 : Shutdown,
533 :
534 : /// Something went wrong reading a page: this likely indicates a pageserver bug
535 : #[error("Read error")]
536 : Read(#[source] PageReconstructError),
537 :
538 : /// Ran out of time waiting for an LSN
539 : #[error("LSN timeout: {0}")]
540 : LsnTimeout(WaitLsnError),
541 :
542 : /// The entity required to serve the request (tenant or timeline) is not found,
543 : /// or is not found in a suitable state to serve a request.
544 : #[error("Not found: {0}")]
545 : NotFound(Cow<'static, str>),
546 :
547 : /// Request asked for something that doesn't make sense, like an invalid LSN
548 : #[error("Bad request: {0}")]
549 : BadRequest(Cow<'static, str>),
550 : }
551 :
552 : impl From<PageReconstructError> for PageStreamError {
553 0 : fn from(value: PageReconstructError) -> Self {
554 0 : match value {
555 0 : PageReconstructError::Cancelled => Self::Shutdown,
556 0 : e => Self::Read(e),
557 : }
558 0 : }
559 : }
560 :
561 : impl From<GetActiveTimelineError> for PageStreamError {
562 0 : fn from(value: GetActiveTimelineError) -> Self {
563 0 : match value {
564 : GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled)
565 : | GetActiveTimelineError::Tenant(GetActiveTenantError::WillNotBecomeActive(
566 : TenantState::Stopping { .. },
567 : ))
568 0 : | GetActiveTimelineError::Timeline(GetTimelineError::ShuttingDown) => Self::Shutdown,
569 0 : GetActiveTimelineError::Tenant(e) => Self::NotFound(format!("{e}").into()),
570 0 : GetActiveTimelineError::Timeline(e) => Self::NotFound(format!("{e}").into()),
571 : }
572 0 : }
573 : }
574 :
575 : impl From<WaitLsnError> for PageStreamError {
576 0 : fn from(value: WaitLsnError) -> Self {
577 0 : match value {
578 0 : e @ WaitLsnError::Timeout(_) => Self::LsnTimeout(e),
579 0 : WaitLsnError::Shutdown => Self::Shutdown,
580 0 : e @ WaitLsnError::BadState { .. } => Self::Reconnect(format!("{e}").into()),
581 : }
582 0 : }
583 : }
584 :
585 : impl From<WaitLsnError> for QueryError {
586 0 : fn from(value: WaitLsnError) -> Self {
587 0 : match value {
588 0 : e @ WaitLsnError::Timeout(_) => Self::Other(anyhow::Error::new(e)),
589 0 : WaitLsnError::Shutdown => Self::Shutdown,
590 0 : WaitLsnError::BadState { .. } => Self::Reconnect,
591 : }
592 0 : }
593 : }
594 :
595 : #[derive(thiserror::Error, Debug)]
596 : struct BatchedPageStreamError {
597 : req: PagestreamRequest,
598 : err: PageStreamError,
599 : }
600 :
601 : impl std::fmt::Display for BatchedPageStreamError {
602 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
603 0 : self.err.fmt(f)
604 0 : }
605 : }
606 :
607 : struct BatchedGetPageRequest {
608 : req: PagestreamGetPageRequest,
609 : timer: SmgrOpTimer,
610 : }
611 :
612 : #[cfg(feature = "testing")]
613 : struct BatchedTestRequest {
614 : req: models::PagestreamTestRequest,
615 : timer: SmgrOpTimer,
616 : }
617 :
618 : /// NB: we only hold [`timeline::handle::WeakHandle`] inside this enum,
619 : /// so that we don't keep the [`Timeline::gate`] open while the batch
620 : /// is being built up inside the [`spsc_fold`] (pagestream pipelining).
621 : #[derive(IntoStaticStr)]
622 : enum BatchedFeMessage {
623 : Exists {
624 : span: Span,
625 : timer: SmgrOpTimer,
626 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
627 : req: models::PagestreamExistsRequest,
628 : },
629 : Nblocks {
630 : span: Span,
631 : timer: SmgrOpTimer,
632 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
633 : req: models::PagestreamNblocksRequest,
634 : },
635 : GetPage {
636 : span: Span,
637 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
638 : effective_request_lsn: Lsn,
639 : pages: smallvec::SmallVec<[BatchedGetPageRequest; 1]>,
640 : },
641 : DbSize {
642 : span: Span,
643 : timer: SmgrOpTimer,
644 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
645 : req: models::PagestreamDbSizeRequest,
646 : },
647 : GetSlruSegment {
648 : span: Span,
649 : timer: SmgrOpTimer,
650 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
651 : req: models::PagestreamGetSlruSegmentRequest,
652 : },
653 : #[cfg(feature = "testing")]
654 : Test {
655 : span: Span,
656 : shard: timeline::handle::WeakHandle<TenantManagerTypes>,
657 : requests: Vec<BatchedTestRequest>,
658 : },
659 : RespondError {
660 : span: Span,
661 : error: BatchedPageStreamError,
662 : },
663 : }
664 :
665 : impl BatchedFeMessage {
666 0 : fn as_static_str(&self) -> &'static str {
667 0 : self.into()
668 0 : }
669 :
670 0 : fn observe_execution_start(&mut self, at: Instant) {
671 0 : match self {
672 0 : BatchedFeMessage::Exists { timer, .. }
673 0 : | BatchedFeMessage::Nblocks { timer, .. }
674 0 : | BatchedFeMessage::DbSize { timer, .. }
675 0 : | BatchedFeMessage::GetSlruSegment { timer, .. } => {
676 0 : timer.observe_execution_start(at);
677 0 : }
678 0 : BatchedFeMessage::GetPage { pages, .. } => {
679 0 : for page in pages {
680 0 : page.timer.observe_execution_start(at);
681 0 : }
682 : }
683 : #[cfg(feature = "testing")]
684 0 : BatchedFeMessage::Test { requests, .. } => {
685 0 : for req in requests {
686 0 : req.timer.observe_execution_start(at);
687 0 : }
688 : }
689 0 : BatchedFeMessage::RespondError { .. } => {}
690 : }
691 0 : }
692 : }
693 :
694 : impl PageServerHandler {
695 0 : pub fn new(
696 0 : conf: &'static PageServerConf,
697 0 : tenant_manager: Arc<TenantManager>,
698 0 : auth: Option<Arc<SwappableJwtAuth>>,
699 0 : pipelining_config: PageServicePipeliningConfig,
700 0 : connection_ctx: RequestContext,
701 0 : cancel: CancellationToken,
702 0 : gate_guard: GateGuard,
703 0 : ) -> Self {
704 0 : PageServerHandler {
705 0 : conf,
706 0 : auth,
707 0 : claims: None,
708 0 : connection_ctx,
709 0 : timeline_handles: Some(TimelineHandles::new(tenant_manager)),
710 0 : cancel,
711 0 : pipelining_config,
712 0 : gate_guard,
713 0 : }
714 0 : }
715 :
716 : /// This function always respects cancellation of any timeline in `[Self::shard_timelines]`. Pass in
717 : /// a cancellation token at the next scope up (such as a tenant cancellation token) to ensure we respect
718 : /// cancellation if there aren't any timelines in the cache.
719 : ///
720 : /// If calling from a function that doesn't use the `[Self::shard_timelines]` cache, then pass in the
721 : /// timeline cancellation token.
722 0 : async fn flush_cancellable<IO>(
723 0 : &self,
724 0 : pgb: &mut PostgresBackend<IO>,
725 0 : cancel: &CancellationToken,
726 0 : ) -> Result<(), QueryError>
727 0 : where
728 0 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
729 0 : {
730 0 : tokio::select!(
731 0 : flush_r = pgb.flush() => {
732 0 : Ok(flush_r?)
733 : },
734 0 : _ = cancel.cancelled() => {
735 0 : Err(QueryError::Shutdown)
736 : }
737 : )
738 0 : }
739 :
740 : #[allow(clippy::too_many_arguments)]
741 0 : async fn pagestream_read_message<IO>(
742 0 : pgb: &mut PostgresBackendReader<IO>,
743 0 : tenant_id: TenantId,
744 0 : timeline_id: TimelineId,
745 0 : timeline_handles: &mut TimelineHandles,
746 0 : cancel: &CancellationToken,
747 0 : ctx: &RequestContext,
748 0 : protocol_version: PagestreamProtocolVersion,
749 0 : parent_span: Span,
750 0 : ) -> Result<Option<BatchedFeMessage>, QueryError>
751 0 : where
752 0 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
753 0 : {
754 0 : let msg = tokio::select! {
755 : biased;
756 0 : _ = cancel.cancelled() => {
757 0 : return Err(QueryError::Shutdown)
758 : }
759 0 : msg = pgb.read_message() => { msg }
760 0 : };
761 0 :
762 0 : let received_at = Instant::now();
763 :
764 0 : let copy_data_bytes = match msg? {
765 0 : Some(FeMessage::CopyData(bytes)) => bytes,
766 : Some(FeMessage::Terminate) => {
767 0 : return Ok(None);
768 : }
769 0 : Some(m) => {
770 0 : return Err(QueryError::Other(anyhow::anyhow!(
771 0 : "unexpected message: {m:?} during COPY"
772 0 : )));
773 : }
774 : None => {
775 0 : return Ok(None);
776 : } // client disconnected
777 : };
778 0 : trace!("query: {copy_data_bytes:?}");
779 :
780 0 : fail::fail_point!("ps::handle-pagerequest-message");
781 :
782 : // parse request
783 0 : let neon_fe_msg =
784 0 : PagestreamFeMessage::parse(&mut copy_data_bytes.reader(), protocol_version)?;
785 :
786 : // TODO: turn in to async closure once available to avoid repeating received_at
787 0 : async fn record_op_start_and_throttle(
788 0 : shard: &timeline::handle::Handle<TenantManagerTypes>,
789 0 : op: metrics::SmgrQueryType,
790 0 : received_at: Instant,
791 0 : ) -> Result<SmgrOpTimer, QueryError> {
792 0 : // It's important to start the smgr op metric recorder as early as possible
793 0 : // so that the _started counters are incremented before we do
794 0 : // any serious waiting, e.g., for throttle, batching, or actual request handling.
795 0 : let mut timer = shard.query_metrics.start_smgr_op(op, received_at);
796 0 : let now = Instant::now();
797 0 : timer.observe_throttle_start(now);
798 0 : let throttled = tokio::select! {
799 0 : res = shard.pagestream_throttle.throttle(1, now) => res,
800 0 : _ = shard.cancel.cancelled() => return Err(QueryError::Shutdown),
801 : };
802 0 : timer.observe_throttle_done(throttled);
803 0 : Ok(timer)
804 0 : }
805 :
806 0 : let batched_msg = match neon_fe_msg {
807 0 : PagestreamFeMessage::Exists(req) => {
808 0 : let shard = timeline_handles
809 0 : .get(tenant_id, timeline_id, ShardSelector::Zero)
810 0 : .await?;
811 0 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
812 0 : let span = tracing::info_span!(parent: &parent_span, "handle_get_rel_exists_request", rel = %req.rel, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
813 0 : let timer = record_op_start_and_throttle(
814 0 : &shard,
815 0 : metrics::SmgrQueryType::GetRelExists,
816 0 : received_at,
817 0 : )
818 0 : .await?;
819 0 : BatchedFeMessage::Exists {
820 0 : span,
821 0 : timer,
822 0 : shard: shard.downgrade(),
823 0 : req,
824 0 : }
825 : }
826 0 : PagestreamFeMessage::Nblocks(req) => {
827 0 : let shard = timeline_handles
828 0 : .get(tenant_id, timeline_id, ShardSelector::Zero)
829 0 : .await?;
830 0 : let span = tracing::info_span!(parent: &parent_span, "handle_get_nblocks_request", rel = %req.rel, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
831 0 : let timer = record_op_start_and_throttle(
832 0 : &shard,
833 0 : metrics::SmgrQueryType::GetRelSize,
834 0 : received_at,
835 0 : )
836 0 : .await?;
837 0 : BatchedFeMessage::Nblocks {
838 0 : span,
839 0 : timer,
840 0 : shard: shard.downgrade(),
841 0 : req,
842 0 : }
843 : }
844 0 : PagestreamFeMessage::DbSize(req) => {
845 0 : let shard = timeline_handles
846 0 : .get(tenant_id, timeline_id, ShardSelector::Zero)
847 0 : .await?;
848 0 : let span = tracing::info_span!(parent: &parent_span, "handle_db_size_request", dbnode = %req.dbnode, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
849 0 : let timer = record_op_start_and_throttle(
850 0 : &shard,
851 0 : metrics::SmgrQueryType::GetDbSize,
852 0 : received_at,
853 0 : )
854 0 : .await?;
855 0 : BatchedFeMessage::DbSize {
856 0 : span,
857 0 : timer,
858 0 : shard: shard.downgrade(),
859 0 : req,
860 0 : }
861 : }
862 0 : PagestreamFeMessage::GetSlruSegment(req) => {
863 0 : let shard = timeline_handles
864 0 : .get(tenant_id, timeline_id, ShardSelector::Zero)
865 0 : .await?;
866 0 : let span = tracing::info_span!(parent: &parent_span, "handle_get_slru_segment_request", kind = %req.kind, segno = %req.segno, req_lsn = %req.hdr.request_lsn, shard_id = %shard.tenant_shard_id.shard_slug());
867 0 : let timer = record_op_start_and_throttle(
868 0 : &shard,
869 0 : metrics::SmgrQueryType::GetSlruSegment,
870 0 : received_at,
871 0 : )
872 0 : .await?;
873 0 : BatchedFeMessage::GetSlruSegment {
874 0 : span,
875 0 : timer,
876 0 : shard: shard.downgrade(),
877 0 : req,
878 0 : }
879 : }
880 0 : PagestreamFeMessage::GetPage(req) => {
881 : // avoid a somewhat costly Span::record() by constructing the entire span in one go.
882 : macro_rules! mkspan {
883 : (before shard routing) => {{
884 : tracing::info_span!(parent: &parent_span, "handle_get_page_request", rel = %req.rel, blkno = %req.blkno, req_lsn = %req.hdr.request_lsn)
885 : }};
886 : ($shard_id:expr) => {{
887 : tracing::info_span!(parent: &parent_span, "handle_get_page_request", rel = %req.rel, blkno = %req.blkno, req_lsn = %req.hdr.request_lsn, shard_id = %$shard_id)
888 : }};
889 : }
890 :
891 : macro_rules! respond_error {
892 : ($span:expr, $error:expr) => {{
893 : let error = BatchedFeMessage::RespondError {
894 : span: $span,
895 : error: BatchedPageStreamError {
896 : req: req.hdr,
897 : err: $error,
898 : },
899 : };
900 : Ok(Some(error))
901 : }};
902 : }
903 :
904 0 : let key = rel_block_to_key(req.rel, req.blkno);
905 0 : let shard = match timeline_handles
906 0 : .get(tenant_id, timeline_id, ShardSelector::Page(key))
907 0 : .await
908 : {
909 0 : Ok(tl) => tl,
910 0 : Err(e) => {
911 0 : let span = mkspan!(before shard routing);
912 0 : match e {
913 : GetActiveTimelineError::Tenant(GetActiveTenantError::NotFound(_)) => {
914 : // We already know this tenant exists in general, because we resolved it at
915 : // start of connection. Getting a NotFound here indicates that the shard containing
916 : // the requested page is not present on this node: the client's knowledge of shard->pageserver
917 : // mapping is out of date.
918 : //
919 : // Closing the connection by returning ``::Reconnect` has the side effect of rate-limiting above message, via
920 : // client's reconnect backoff, as well as hopefully prompting the client to load its updated configuration
921 : // and talk to a different pageserver.
922 0 : return respond_error!(
923 0 : span,
924 0 : PageStreamError::Reconnect(
925 0 : "getpage@lsn request routed to wrong shard".into()
926 0 : )
927 0 : );
928 : }
929 0 : e => {
930 0 : return respond_error!(span, e.into());
931 : }
932 : }
933 : }
934 : };
935 0 : let span = mkspan!(shard.tenant_shard_id.shard_slug());
936 :
937 0 : let timer = record_op_start_and_throttle(
938 0 : &shard,
939 0 : metrics::SmgrQueryType::GetPageAtLsn,
940 0 : received_at,
941 0 : )
942 0 : .await?;
943 :
944 : // We're holding the Handle
945 0 : let effective_request_lsn = match Self::wait_or_get_last_lsn(
946 0 : &shard,
947 0 : req.hdr.request_lsn,
948 0 : req.hdr.not_modified_since,
949 0 : &shard.get_applied_gc_cutoff_lsn(),
950 0 : ctx,
951 0 : )
952 0 : // TODO: if we actually need to wait for lsn here, it delays the entire batch which doesn't need to wait
953 0 : .await
954 : {
955 0 : Ok(lsn) => lsn,
956 0 : Err(e) => {
957 0 : return respond_error!(span, e);
958 : }
959 : };
960 : BatchedFeMessage::GetPage {
961 0 : span,
962 0 : shard: shard.downgrade(),
963 0 : effective_request_lsn,
964 0 : pages: smallvec::smallvec![BatchedGetPageRequest { req, timer }],
965 : }
966 : }
967 : #[cfg(feature = "testing")]
968 0 : PagestreamFeMessage::Test(req) => {
969 0 : let shard = timeline_handles
970 0 : .get(tenant_id, timeline_id, ShardSelector::Zero)
971 0 : .await?;
972 0 : let span = tracing::info_span!(parent: &parent_span, "handle_test_request", shard_id = %shard.tenant_shard_id.shard_slug());
973 0 : let timer =
974 0 : record_op_start_and_throttle(&shard, metrics::SmgrQueryType::Test, received_at)
975 0 : .await?;
976 0 : BatchedFeMessage::Test {
977 0 : span,
978 0 : shard: shard.downgrade(),
979 0 : requests: vec![BatchedTestRequest { req, timer }],
980 0 : }
981 : }
982 : };
983 0 : Ok(Some(batched_msg))
984 0 : }
985 :
986 : /// Post-condition: `batch` is Some()
987 : #[instrument(skip_all, level = tracing::Level::TRACE)]
988 : #[allow(clippy::boxed_local)]
989 : fn pagestream_do_batch(
990 : max_batch_size: NonZeroUsize,
991 : batch: &mut Result<BatchedFeMessage, QueryError>,
992 : this_msg: Result<BatchedFeMessage, QueryError>,
993 : ) -> Result<(), Result<BatchedFeMessage, QueryError>> {
994 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
995 :
996 : let this_msg = match this_msg {
997 : Ok(this_msg) => this_msg,
998 : Err(e) => return Err(Err(e)),
999 : };
1000 :
1001 : match (&mut *batch, this_msg) {
1002 : // something batched already, let's see if we can add this message to the batch
1003 : (
1004 : Ok(BatchedFeMessage::GetPage {
1005 : span: _,
1006 : shard: accum_shard,
1007 : pages: accum_pages,
1008 : effective_request_lsn: accum_lsn,
1009 : }),
1010 : BatchedFeMessage::GetPage {
1011 : span: _,
1012 : shard: this_shard,
1013 : pages: this_pages,
1014 : effective_request_lsn: this_lsn,
1015 : },
1016 0 : ) if (|| {
1017 0 : assert_eq!(this_pages.len(), 1);
1018 0 : if accum_pages.len() >= max_batch_size.get() {
1019 0 : trace!(%accum_lsn, %this_lsn, %max_batch_size, "stopping batching because of batch size");
1020 0 : assert_eq!(accum_pages.len(), max_batch_size.get());
1021 0 : return false;
1022 0 : }
1023 0 : if !accum_shard.is_same_handle_as(&this_shard) {
1024 0 : trace!(%accum_lsn, %this_lsn, "stopping batching because timeline object mismatch");
1025 : // TODO: we _could_ batch & execute each shard seperately (and in parallel).
1026 : // But the current logic for keeping responses in order does not support that.
1027 0 : return false;
1028 0 : }
1029 0 : // the vectored get currently only supports a single LSN, so, bounce as soon
1030 0 : // as the effective request_lsn changes
1031 0 : if *accum_lsn != this_lsn {
1032 0 : trace!(%accum_lsn, %this_lsn, "stopping batching because LSN changed");
1033 0 : return false;
1034 0 : }
1035 0 : true
1036 : })() =>
1037 : {
1038 : // ok to batch
1039 : accum_pages.extend(this_pages);
1040 : Ok(())
1041 : }
1042 : #[cfg(feature = "testing")]
1043 : (
1044 : Ok(BatchedFeMessage::Test {
1045 : shard: accum_shard,
1046 : requests: accum_requests,
1047 : ..
1048 : }),
1049 : BatchedFeMessage::Test {
1050 : shard: this_shard,
1051 : requests: this_requests,
1052 : ..
1053 : },
1054 0 : ) if (|| {
1055 0 : assert!(this_requests.len() == 1);
1056 0 : if accum_requests.len() >= max_batch_size.get() {
1057 0 : trace!(%max_batch_size, "stopping batching because of batch size");
1058 0 : assert_eq!(accum_requests.len(), max_batch_size.get());
1059 0 : return false;
1060 0 : }
1061 0 : if !accum_shard.is_same_handle_as(&this_shard) {
1062 0 : trace!("stopping batching because timeline object mismatch");
1063 : // TODO: we _could_ batch & execute each shard seperately (and in parallel).
1064 : // But the current logic for keeping responses in order does not support that.
1065 0 : return false;
1066 0 : }
1067 0 : let this_batch_key = this_requests[0].req.batch_key;
1068 0 : let accum_batch_key = accum_requests[0].req.batch_key;
1069 0 : if this_requests[0].req.batch_key != accum_requests[0].req.batch_key {
1070 0 : trace!(%accum_batch_key, %this_batch_key, "stopping batching because batch key changed");
1071 0 : return false;
1072 0 : }
1073 0 : true
1074 : })() =>
1075 : {
1076 : // ok to batch
1077 : accum_requests.extend(this_requests);
1078 : Ok(())
1079 : }
1080 : // something batched already but this message is unbatchable
1081 : (_, this_msg) => {
1082 : // by default, don't continue batching
1083 : Err(Ok(this_msg))
1084 : }
1085 : }
1086 : }
1087 :
1088 0 : #[instrument(level = tracing::Level::DEBUG, skip_all)]
1089 : async fn pagesteam_handle_batched_message<IO>(
1090 : &mut self,
1091 : pgb_writer: &mut PostgresBackend<IO>,
1092 : batch: BatchedFeMessage,
1093 : io_concurrency: IoConcurrency,
1094 : cancel: &CancellationToken,
1095 : protocol_version: PagestreamProtocolVersion,
1096 : ctx: &RequestContext,
1097 : ) -> Result<(), QueryError>
1098 : where
1099 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
1100 : {
1101 : let started_at = Instant::now();
1102 : let batch = {
1103 : let mut batch = batch;
1104 : batch.observe_execution_start(started_at);
1105 : batch
1106 : };
1107 :
1108 : // Dispatch the batch to the appropriate request handler.
1109 : let log_slow_name = batch.as_static_str();
1110 : let (mut handler_results, span) = {
1111 : // TODO: we unfortunately have to pin the future on the heap, since GetPage futures are huge and
1112 : // won't fit on the stack.
1113 : let mut boxpinned =
1114 : Box::pin(self.pagestream_dispatch_batched_message(batch, io_concurrency, ctx));
1115 : log_slow(
1116 : log_slow_name,
1117 : LOG_SLOW_GETPAGE_THRESHOLD,
1118 : boxpinned.as_mut(),
1119 : )
1120 : .await?
1121 : };
1122 :
1123 : // We purposefully don't count flush time into the smgr operation timer.
1124 : //
1125 : // The reason is that current compute client will not perform protocol processing
1126 : // if the postgres backend process is doing things other than `->smgr_read()`.
1127 : // This is especially the case for prefetch.
1128 : //
1129 : // If the compute doesn't read from the connection, eventually TCP will backpressure
1130 : // all the way into our flush call below.
1131 : //
1132 : // The timer's underlying metric is used for a storage-internal latency SLO and
1133 : // we don't want to include latency in it that we can't control.
1134 : // And as pointed out above, in this case, we don't control the time that flush will take.
1135 : //
1136 : // We put each response in the batch onto the wire in a separate pgb_writer.flush()
1137 : // call, which (all unmeasured) adds syscall overhead but reduces time to first byte
1138 : // and avoids building up a "giant" contiguous userspace buffer to hold the entire response.
1139 : // TODO: vectored socket IO would be great, but pgb_writer doesn't support that.
1140 : let flush_timers = {
1141 : let flushing_start_time = Instant::now();
1142 : let mut flush_timers = Vec::with_capacity(handler_results.len());
1143 : for handler_result in &mut handler_results {
1144 : let flush_timer = match handler_result {
1145 : Ok((_, timer)) => Some(
1146 : timer
1147 : .observe_execution_end(flushing_start_time)
1148 : .expect("we are the first caller"),
1149 : ),
1150 : Err(_) => {
1151 : // TODO: measure errors
1152 : None
1153 : }
1154 : };
1155 : flush_timers.push(flush_timer);
1156 : }
1157 : assert_eq!(flush_timers.len(), handler_results.len());
1158 : flush_timers
1159 : };
1160 :
1161 : // Map handler result to protocol behavior.
1162 : // Some handler errors cause exit from pagestream protocol.
1163 : // Other handler errors are sent back as an error message and we stay in pagestream protocol.
1164 : for (handler_result, flushing_timer) in handler_results.into_iter().zip(flush_timers) {
1165 : let response_msg = match handler_result {
1166 : Err(e) => match &e.err {
1167 : PageStreamError::Shutdown => {
1168 : // If we fail to fulfil a request during shutdown, which may be _because_ of
1169 : // shutdown, then do not send the error to the client. Instead just drop the
1170 : // connection.
1171 0 : span.in_scope(|| info!("dropping connection due to shutdown"));
1172 : return Err(QueryError::Shutdown);
1173 : }
1174 : PageStreamError::Reconnect(reason) => {
1175 0 : span.in_scope(|| info!("handler requested reconnect: {reason}"));
1176 : return Err(QueryError::Reconnect);
1177 : }
1178 : PageStreamError::Read(_)
1179 : | PageStreamError::LsnTimeout(_)
1180 : | PageStreamError::NotFound(_)
1181 : | PageStreamError::BadRequest(_) => {
1182 : // print the all details to the log with {:#}, but for the client the
1183 : // error message is enough. Do not log if shutting down, as the anyhow::Error
1184 : // here includes cancellation which is not an error.
1185 : let full = utils::error::report_compact_sources(&e.err);
1186 0 : span.in_scope(|| {
1187 0 : error!("error reading relation or page version: {full:#}")
1188 0 : });
1189 :
1190 : PagestreamBeMessage::Error(PagestreamErrorResponse {
1191 : req: e.req,
1192 : message: e.err.to_string(),
1193 : })
1194 : }
1195 : },
1196 : Ok((response_msg, _op_timer_already_observed)) => response_msg,
1197 : };
1198 :
1199 : //
1200 : // marshal & transmit response message
1201 : //
1202 :
1203 : pgb_writer.write_message_noflush(&BeMessage::CopyData(
1204 : &response_msg.serialize(protocol_version),
1205 : ))?;
1206 :
1207 : failpoint_support::sleep_millis_async!("before-pagestream-msg-flush", cancel);
1208 :
1209 : // what we want to do
1210 : let socket_fd = pgb_writer.socket_fd;
1211 : let flush_fut = pgb_writer.flush();
1212 : // metric for how long flushing takes
1213 : let flush_fut = match flushing_timer {
1214 : Some(flushing_timer) => futures::future::Either::Left(flushing_timer.measure(
1215 : Instant::now(),
1216 : flush_fut,
1217 : socket_fd,
1218 : )),
1219 : None => futures::future::Either::Right(flush_fut),
1220 : };
1221 : // do it while respecting cancellation
1222 0 : let _: () = async move {
1223 0 : tokio::select! {
1224 : biased;
1225 0 : _ = cancel.cancelled() => {
1226 : // We were requested to shut down.
1227 0 : info!("shutdown request received in page handler");
1228 0 : return Err(QueryError::Shutdown)
1229 : }
1230 0 : res = flush_fut => {
1231 0 : res?;
1232 : }
1233 : }
1234 0 : Ok(())
1235 0 : }
1236 : .await?;
1237 : }
1238 : Ok(())
1239 : }
1240 :
1241 : /// Helper which dispatches a batched message to the appropriate handler.
1242 : /// Returns a vec of results, along with the extracted trace span.
1243 0 : async fn pagestream_dispatch_batched_message(
1244 0 : &mut self,
1245 0 : batch: BatchedFeMessage,
1246 0 : io_concurrency: IoConcurrency,
1247 0 : ctx: &RequestContext,
1248 0 : ) -> Result<
1249 0 : (
1250 0 : Vec<Result<(PagestreamBeMessage, SmgrOpTimer), BatchedPageStreamError>>,
1251 0 : Span,
1252 0 : ),
1253 0 : QueryError,
1254 0 : > {
1255 : macro_rules! upgrade_handle_and_set_context {
1256 : ($shard:ident) => {{
1257 : let weak_handle = &$shard;
1258 : let handle = weak_handle.upgrade()?;
1259 : let ctx = ctx.with_scope_page_service_pagestream(&handle);
1260 : (handle, ctx)
1261 : }};
1262 : }
1263 0 : Ok(match batch {
1264 : BatchedFeMessage::Exists {
1265 0 : span,
1266 0 : timer,
1267 0 : shard,
1268 0 : req,
1269 0 : } => {
1270 0 : fail::fail_point!("ps::handle-pagerequest-message::exists");
1271 0 : let (shard, ctx) = upgrade_handle_and_set_context!(shard);
1272 : (
1273 0 : vec![
1274 0 : self.handle_get_rel_exists_request(&shard, &req, &ctx)
1275 0 : .instrument(span.clone())
1276 0 : .await
1277 0 : .map(|msg| (msg, timer))
1278 0 : .map_err(|err| BatchedPageStreamError { err, req: req.hdr }),
1279 0 : ],
1280 0 : span,
1281 : )
1282 : }
1283 : BatchedFeMessage::Nblocks {
1284 0 : span,
1285 0 : timer,
1286 0 : shard,
1287 0 : req,
1288 0 : } => {
1289 0 : fail::fail_point!("ps::handle-pagerequest-message::nblocks");
1290 0 : let (shard, ctx) = upgrade_handle_and_set_context!(shard);
1291 : (
1292 0 : vec![
1293 0 : self.handle_get_nblocks_request(&shard, &req, &ctx)
1294 0 : .instrument(span.clone())
1295 0 : .await
1296 0 : .map(|msg| (msg, timer))
1297 0 : .map_err(|err| BatchedPageStreamError { err, req: req.hdr }),
1298 0 : ],
1299 0 : span,
1300 : )
1301 : }
1302 : BatchedFeMessage::GetPage {
1303 0 : span,
1304 0 : shard,
1305 0 : effective_request_lsn,
1306 0 : pages,
1307 0 : } => {
1308 0 : fail::fail_point!("ps::handle-pagerequest-message::getpage");
1309 0 : let (shard, ctx) = upgrade_handle_and_set_context!(shard);
1310 : (
1311 : {
1312 0 : let npages = pages.len();
1313 0 : trace!(npages, "handling getpage request");
1314 0 : let res = self
1315 0 : .handle_get_page_at_lsn_request_batched(
1316 0 : &shard,
1317 0 : effective_request_lsn,
1318 0 : pages,
1319 0 : io_concurrency,
1320 0 : &ctx,
1321 0 : )
1322 0 : .instrument(span.clone())
1323 0 : .await;
1324 0 : assert_eq!(res.len(), npages);
1325 0 : res
1326 0 : },
1327 0 : span,
1328 : )
1329 : }
1330 : BatchedFeMessage::DbSize {
1331 0 : span,
1332 0 : timer,
1333 0 : shard,
1334 0 : req,
1335 0 : } => {
1336 0 : fail::fail_point!("ps::handle-pagerequest-message::dbsize");
1337 0 : let (shard, ctx) = upgrade_handle_and_set_context!(shard);
1338 : (
1339 0 : vec![
1340 0 : self.handle_db_size_request(&shard, &req, &ctx)
1341 0 : .instrument(span.clone())
1342 0 : .await
1343 0 : .map(|msg| (msg, timer))
1344 0 : .map_err(|err| BatchedPageStreamError { err, req: req.hdr }),
1345 0 : ],
1346 0 : span,
1347 : )
1348 : }
1349 : BatchedFeMessage::GetSlruSegment {
1350 0 : span,
1351 0 : timer,
1352 0 : shard,
1353 0 : req,
1354 0 : } => {
1355 0 : fail::fail_point!("ps::handle-pagerequest-message::slrusegment");
1356 0 : let (shard, ctx) = upgrade_handle_and_set_context!(shard);
1357 : (
1358 0 : vec![
1359 0 : self.handle_get_slru_segment_request(&shard, &req, &ctx)
1360 0 : .instrument(span.clone())
1361 0 : .await
1362 0 : .map(|msg| (msg, timer))
1363 0 : .map_err(|err| BatchedPageStreamError { err, req: req.hdr }),
1364 0 : ],
1365 0 : span,
1366 : )
1367 : }
1368 : #[cfg(feature = "testing")]
1369 : BatchedFeMessage::Test {
1370 0 : span,
1371 0 : shard,
1372 0 : requests,
1373 0 : } => {
1374 0 : fail::fail_point!("ps::handle-pagerequest-message::test");
1375 0 : let (shard, ctx) = upgrade_handle_and_set_context!(shard);
1376 : (
1377 : {
1378 0 : let npages = requests.len();
1379 0 : trace!(npages, "handling getpage request");
1380 0 : let res = self
1381 0 : .handle_test_request_batch(&shard, requests, &ctx)
1382 0 : .instrument(span.clone())
1383 0 : .await;
1384 0 : assert_eq!(res.len(), npages);
1385 0 : res
1386 0 : },
1387 0 : span,
1388 : )
1389 : }
1390 0 : BatchedFeMessage::RespondError { span, error } => {
1391 0 : // We've already decided to respond with an error, so we don't need to
1392 0 : // call the handler.
1393 0 : (vec![Err(error)], span)
1394 : }
1395 : })
1396 0 : }
1397 :
1398 : /// Pagestream sub-protocol handler.
1399 : ///
1400 : /// It is a simple request-response protocol inside a COPYBOTH session.
1401 : ///
1402 : /// # Coding Discipline
1403 : ///
1404 : /// Coding discipline within this function: all interaction with the `pgb` connection
1405 : /// needs to be sensitive to connection shutdown, currently signalled via [`Self::cancel`].
1406 : /// This is so that we can shutdown page_service quickly.
1407 : #[instrument(skip_all)]
1408 : async fn handle_pagerequests<IO>(
1409 : &mut self,
1410 : pgb: &mut PostgresBackend<IO>,
1411 : tenant_id: TenantId,
1412 : timeline_id: TimelineId,
1413 : protocol_version: PagestreamProtocolVersion,
1414 : ctx: RequestContext,
1415 : ) -> Result<(), QueryError>
1416 : where
1417 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
1418 : {
1419 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
1420 :
1421 : // switch client to COPYBOTH
1422 : pgb.write_message_noflush(&BeMessage::CopyBothResponse)?;
1423 : tokio::select! {
1424 : biased;
1425 : _ = self.cancel.cancelled() => {
1426 : return Err(QueryError::Shutdown)
1427 : }
1428 : res = pgb.flush() => {
1429 : res?;
1430 : }
1431 : }
1432 :
1433 : let io_concurrency = IoConcurrency::spawn_from_conf(
1434 : self.conf,
1435 : match self.gate_guard.try_clone() {
1436 : Ok(guard) => guard,
1437 : Err(_) => {
1438 : info!("shutdown request received in page handler");
1439 : return Err(QueryError::Shutdown);
1440 : }
1441 : },
1442 : );
1443 :
1444 : let pgb_reader = pgb
1445 : .split()
1446 : .context("implementation error: split pgb into reader and writer")?;
1447 :
1448 : let timeline_handles = self
1449 : .timeline_handles
1450 : .take()
1451 : .expect("implementation error: timeline_handles should not be locked");
1452 :
1453 : let request_span = info_span!("request");
1454 : let ((pgb_reader, timeline_handles), result) = match self.pipelining_config.clone() {
1455 : PageServicePipeliningConfig::Pipelined(pipelining_config) => {
1456 : self.handle_pagerequests_pipelined(
1457 : pgb,
1458 : pgb_reader,
1459 : tenant_id,
1460 : timeline_id,
1461 : timeline_handles,
1462 : request_span,
1463 : pipelining_config,
1464 : protocol_version,
1465 : io_concurrency,
1466 : &ctx,
1467 : )
1468 : .await
1469 : }
1470 : PageServicePipeliningConfig::Serial => {
1471 : self.handle_pagerequests_serial(
1472 : pgb,
1473 : pgb_reader,
1474 : tenant_id,
1475 : timeline_id,
1476 : timeline_handles,
1477 : request_span,
1478 : protocol_version,
1479 : io_concurrency,
1480 : &ctx,
1481 : )
1482 : .await
1483 : }
1484 : };
1485 :
1486 : debug!("pagestream subprotocol shut down cleanly");
1487 :
1488 : pgb.unsplit(pgb_reader)
1489 : .context("implementation error: unsplit pgb")?;
1490 :
1491 : let replaced = self.timeline_handles.replace(timeline_handles);
1492 : assert!(replaced.is_none());
1493 :
1494 : result
1495 : }
1496 :
1497 : #[allow(clippy::too_many_arguments)]
1498 0 : async fn handle_pagerequests_serial<IO>(
1499 0 : &mut self,
1500 0 : pgb_writer: &mut PostgresBackend<IO>,
1501 0 : mut pgb_reader: PostgresBackendReader<IO>,
1502 0 : tenant_id: TenantId,
1503 0 : timeline_id: TimelineId,
1504 0 : mut timeline_handles: TimelineHandles,
1505 0 : request_span: Span,
1506 0 : protocol_version: PagestreamProtocolVersion,
1507 0 : io_concurrency: IoConcurrency,
1508 0 : ctx: &RequestContext,
1509 0 : ) -> (
1510 0 : (PostgresBackendReader<IO>, TimelineHandles),
1511 0 : Result<(), QueryError>,
1512 0 : )
1513 0 : where
1514 0 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
1515 0 : {
1516 0 : let cancel = self.cancel.clone();
1517 0 : let err = loop {
1518 0 : let msg = Self::pagestream_read_message(
1519 0 : &mut pgb_reader,
1520 0 : tenant_id,
1521 0 : timeline_id,
1522 0 : &mut timeline_handles,
1523 0 : &cancel,
1524 0 : ctx,
1525 0 : protocol_version,
1526 0 : request_span.clone(),
1527 0 : )
1528 0 : .await;
1529 0 : let msg = match msg {
1530 0 : Ok(msg) => msg,
1531 0 : Err(e) => break e,
1532 : };
1533 0 : let msg = match msg {
1534 0 : Some(msg) => msg,
1535 : None => {
1536 0 : debug!("pagestream subprotocol end observed");
1537 0 : return ((pgb_reader, timeline_handles), Ok(()));
1538 : }
1539 : };
1540 :
1541 0 : let result = self
1542 0 : .pagesteam_handle_batched_message(
1543 0 : pgb_writer,
1544 0 : msg,
1545 0 : io_concurrency.clone(),
1546 0 : &cancel,
1547 0 : protocol_version,
1548 0 : ctx,
1549 0 : )
1550 0 : .await;
1551 0 : match result {
1552 0 : Ok(()) => {}
1553 0 : Err(e) => break e,
1554 : }
1555 : };
1556 0 : ((pgb_reader, timeline_handles), Err(err))
1557 0 : }
1558 :
1559 : /// # Cancel-Safety
1560 : ///
1561 : /// May leak tokio tasks if not polled to completion.
1562 : #[allow(clippy::too_many_arguments)]
1563 0 : async fn handle_pagerequests_pipelined<IO>(
1564 0 : &mut self,
1565 0 : pgb_writer: &mut PostgresBackend<IO>,
1566 0 : pgb_reader: PostgresBackendReader<IO>,
1567 0 : tenant_id: TenantId,
1568 0 : timeline_id: TimelineId,
1569 0 : mut timeline_handles: TimelineHandles,
1570 0 : request_span: Span,
1571 0 : pipelining_config: PageServicePipeliningConfigPipelined,
1572 0 : protocol_version: PagestreamProtocolVersion,
1573 0 : io_concurrency: IoConcurrency,
1574 0 : ctx: &RequestContext,
1575 0 : ) -> (
1576 0 : (PostgresBackendReader<IO>, TimelineHandles),
1577 0 : Result<(), QueryError>,
1578 0 : )
1579 0 : where
1580 0 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
1581 0 : {
1582 0 : //
1583 0 : // Pipelined pagestream handling consists of
1584 0 : // - a Batcher that reads requests off the wire and
1585 0 : // and batches them if possible,
1586 0 : // - an Executor that processes the batched requests.
1587 0 : //
1588 0 : // The batch is built up inside an `spsc_fold` channel,
1589 0 : // shared betwen Batcher (Sender) and Executor (Receiver).
1590 0 : //
1591 0 : // The Batcher continously folds client requests into the batch,
1592 0 : // while the Executor can at any time take out what's in the batch
1593 0 : // in order to process it.
1594 0 : // This means the next batch builds up while the Executor
1595 0 : // executes the last batch.
1596 0 : //
1597 0 : // CANCELLATION
1598 0 : //
1599 0 : // We run both Batcher and Executor futures to completion before
1600 0 : // returning from this function.
1601 0 : //
1602 0 : // If Executor exits first, it signals cancellation to the Batcher
1603 0 : // via a CancellationToken that is child of `self.cancel`.
1604 0 : // If Batcher exits first, it signals cancellation to the Executor
1605 0 : // by dropping the spsc_fold channel Sender.
1606 0 : //
1607 0 : // CLEAN SHUTDOWN
1608 0 : //
1609 0 : // Clean shutdown means that the client ends the COPYBOTH session.
1610 0 : // In response to such a client message, the Batcher exits.
1611 0 : // The Executor continues to run, draining the spsc_fold channel.
1612 0 : // Once drained, the spsc_fold recv will fail with a distinct error
1613 0 : // indicating that the sender disconnected.
1614 0 : // The Executor exits with Ok(()) in response to that error.
1615 0 : //
1616 0 : // Server initiated shutdown is not clean shutdown, but instead
1617 0 : // is an error Err(QueryError::Shutdown) that is propagated through
1618 0 : // error propagation.
1619 0 : //
1620 0 : // ERROR PROPAGATION
1621 0 : //
1622 0 : // When the Batcher encounter an error, it sends it as a value
1623 0 : // through the spsc_fold channel and exits afterwards.
1624 0 : // When the Executor observes such an error in the channel,
1625 0 : // it exits returning that error value.
1626 0 : //
1627 0 : // This design ensures that the Executor stage will still process
1628 0 : // the batch that was in flight when the Batcher encountered an error,
1629 0 : // thereby beahving identical to a serial implementation.
1630 0 :
1631 0 : let PageServicePipeliningConfigPipelined {
1632 0 : max_batch_size,
1633 0 : execution,
1634 0 : } = pipelining_config;
1635 :
1636 : // Macro to _define_ a pipeline stage.
1637 : macro_rules! pipeline_stage {
1638 : ($name:literal, $cancel:expr, $make_fut:expr) => {{
1639 : let cancel: CancellationToken = $cancel;
1640 : let stage_fut = $make_fut(cancel.clone());
1641 0 : async move {
1642 0 : scopeguard::defer! {
1643 0 : debug!("exiting");
1644 0 : }
1645 0 : timed_after_cancellation(stage_fut, $name, Duration::from_millis(100), &cancel)
1646 0 : .await
1647 0 : }
1648 : .instrument(tracing::info_span!($name))
1649 : }};
1650 : }
1651 :
1652 : //
1653 : // Batcher
1654 : //
1655 :
1656 0 : let cancel_batcher = self.cancel.child_token();
1657 0 : let (mut batch_tx, mut batch_rx) = spsc_fold::channel();
1658 0 : let batcher = pipeline_stage!("batcher", cancel_batcher.clone(), move |cancel_batcher| {
1659 0 : let ctx = ctx.attached_child();
1660 0 : async move {
1661 0 : let mut pgb_reader = pgb_reader;
1662 0 : let mut exit = false;
1663 0 : while !exit {
1664 0 : let read_res = Self::pagestream_read_message(
1665 0 : &mut pgb_reader,
1666 0 : tenant_id,
1667 0 : timeline_id,
1668 0 : &mut timeline_handles,
1669 0 : &cancel_batcher,
1670 0 : &ctx,
1671 0 : protocol_version,
1672 0 : request_span.clone(),
1673 0 : )
1674 0 : .await;
1675 0 : let Some(read_res) = read_res.transpose() else {
1676 0 : debug!("client-initiated shutdown");
1677 0 : break;
1678 : };
1679 0 : exit |= read_res.is_err();
1680 0 : let could_send = batch_tx
1681 0 : .send(read_res, |batch, res| {
1682 0 : Self::pagestream_do_batch(max_batch_size, batch, res)
1683 0 : })
1684 0 : .await;
1685 0 : exit |= could_send.is_err();
1686 : }
1687 0 : (pgb_reader, timeline_handles)
1688 0 : }
1689 0 : });
1690 :
1691 : //
1692 : // Executor
1693 : //
1694 :
1695 0 : let executor = pipeline_stage!("executor", self.cancel.clone(), move |cancel| {
1696 0 : let ctx = ctx.attached_child();
1697 0 : async move {
1698 0 : let _cancel_batcher = cancel_batcher.drop_guard();
1699 : loop {
1700 0 : let maybe_batch = batch_rx.recv().await;
1701 0 : let batch = match maybe_batch {
1702 0 : Ok(batch) => batch,
1703 : Err(spsc_fold::RecvError::SenderGone) => {
1704 0 : debug!("upstream gone");
1705 0 : return Ok(());
1706 : }
1707 : };
1708 0 : let batch = match batch {
1709 0 : Ok(batch) => batch,
1710 0 : Err(e) => {
1711 0 : return Err(e);
1712 : }
1713 : };
1714 0 : self.pagesteam_handle_batched_message(
1715 0 : pgb_writer,
1716 0 : batch,
1717 0 : io_concurrency.clone(),
1718 0 : &cancel,
1719 0 : protocol_version,
1720 0 : &ctx,
1721 0 : )
1722 0 : .await?;
1723 : }
1724 0 : }
1725 0 : });
1726 :
1727 : //
1728 : // Execute the stages.
1729 : //
1730 :
1731 0 : match execution {
1732 : PageServiceProtocolPipelinedExecutionStrategy::ConcurrentFutures => {
1733 0 : tokio::join!(batcher, executor)
1734 : }
1735 : PageServiceProtocolPipelinedExecutionStrategy::Tasks => {
1736 : // These tasks are not tracked anywhere.
1737 0 : let read_messages_task = tokio::spawn(batcher);
1738 0 : let (read_messages_task_res, executor_res_) =
1739 0 : tokio::join!(read_messages_task, executor,);
1740 0 : (
1741 0 : read_messages_task_res.expect("propagated panic from read_messages"),
1742 0 : executor_res_,
1743 0 : )
1744 : }
1745 : }
1746 0 : }
1747 :
1748 : /// Helper function to handle the LSN from client request.
1749 : ///
1750 : /// Each GetPage (and Exists and Nblocks) request includes information about
1751 : /// which version of the page is being requested. The primary compute node
1752 : /// will always request the latest page version, by setting 'request_lsn' to
1753 : /// the last inserted or flushed WAL position, while a standby will request
1754 : /// a version at the LSN that it's currently caught up to.
1755 : ///
1756 : /// In either case, if the page server hasn't received the WAL up to the
1757 : /// requested LSN yet, we will wait for it to arrive. The return value is
1758 : /// the LSN that should be used to look up the page versions.
1759 : ///
1760 : /// In addition to the request LSN, each request carries another LSN,
1761 : /// 'not_modified_since', which is a hint to the pageserver that the client
1762 : /// knows that the page has not been modified between 'not_modified_since'
1763 : /// and the request LSN. This allows skipping the wait, as long as the WAL
1764 : /// up to 'not_modified_since' has arrived. If the client doesn't have any
1765 : /// information about when the page was modified, it will use
1766 : /// not_modified_since == lsn. If the client lies and sends a too low
1767 : /// not_modified_hint such that there are in fact later page versions, the
1768 : /// behavior is undefined: the pageserver may return any of the page versions
1769 : /// or an error.
1770 0 : async fn wait_or_get_last_lsn(
1771 0 : timeline: &Timeline,
1772 0 : request_lsn: Lsn,
1773 0 : not_modified_since: Lsn,
1774 0 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
1775 0 : ctx: &RequestContext,
1776 0 : ) -> Result<Lsn, PageStreamError> {
1777 0 : let last_record_lsn = timeline.get_last_record_lsn();
1778 0 :
1779 0 : // Sanity check the request
1780 0 : if request_lsn < not_modified_since {
1781 0 : return Err(PageStreamError::BadRequest(
1782 0 : format!(
1783 0 : "invalid request with request LSN {} and not_modified_since {}",
1784 0 : request_lsn, not_modified_since,
1785 0 : )
1786 0 : .into(),
1787 0 : ));
1788 0 : }
1789 0 :
1790 0 : // Check explicitly for INVALID just to get a less scary error message if the request is obviously bogus
1791 0 : if request_lsn == Lsn::INVALID {
1792 0 : return Err(PageStreamError::BadRequest(
1793 0 : "invalid LSN(0) in request".into(),
1794 0 : ));
1795 0 : }
1796 0 :
1797 0 : // Clients should only read from recent LSNs on their timeline, or from locations holding an LSN lease.
1798 0 : //
1799 0 : // We may have older data available, but we make a best effort to detect this case and return an error,
1800 0 : // to distinguish a misbehaving client (asking for old LSN) from a storage issue (data missing at a legitimate LSN).
1801 0 : if request_lsn < **latest_gc_cutoff_lsn && !timeline.is_gc_blocked_by_lsn_lease_deadline() {
1802 0 : let gc_info = &timeline.gc_info.read().unwrap();
1803 0 : if !gc_info.lsn_covered_by_lease(request_lsn) {
1804 0 : return Err(
1805 0 : PageStreamError::BadRequest(format!(
1806 0 : "tried to request a page version that was garbage collected. requested at {} gc cutoff {}",
1807 0 : request_lsn, **latest_gc_cutoff_lsn
1808 0 : ).into())
1809 0 : );
1810 0 : }
1811 0 : }
1812 :
1813 : // Wait for WAL up to 'not_modified_since' to arrive, if necessary
1814 0 : if not_modified_since > last_record_lsn {
1815 0 : timeline
1816 0 : .wait_lsn(
1817 0 : not_modified_since,
1818 0 : crate::tenant::timeline::WaitLsnWaiter::PageService,
1819 0 : timeline::WaitLsnTimeout::Default,
1820 0 : ctx,
1821 0 : )
1822 0 : .await?;
1823 : // Since we waited for 'not_modified_since' to arrive, that is now the last
1824 : // record LSN. (Or close enough for our purposes; the last-record LSN can
1825 : // advance immediately after we return anyway)
1826 0 : Ok(not_modified_since)
1827 : } else {
1828 : // It might be better to use max(not_modified_since, latest_gc_cutoff_lsn)
1829 : // here instead. That would give the same result, since we know that there
1830 : // haven't been any modifications since 'not_modified_since'. Using an older
1831 : // LSN might be faster, because that could allow skipping recent layers when
1832 : // finding the page. However, we have historically used 'last_record_lsn', so
1833 : // stick to that for now.
1834 0 : Ok(std::cmp::min(last_record_lsn, request_lsn))
1835 : }
1836 0 : }
1837 :
1838 : /// Handles the lsn lease request.
1839 : /// If a lease cannot be obtained, the client will receive NULL.
1840 : #[instrument(skip_all, fields(shard_id, %lsn))]
1841 : async fn handle_make_lsn_lease<IO>(
1842 : &mut self,
1843 : pgb: &mut PostgresBackend<IO>,
1844 : tenant_shard_id: TenantShardId,
1845 : timeline_id: TimelineId,
1846 : lsn: Lsn,
1847 : ctx: &RequestContext,
1848 : ) -> Result<(), QueryError>
1849 : where
1850 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
1851 : {
1852 : let timeline = self
1853 : .timeline_handles
1854 : .as_mut()
1855 : .unwrap()
1856 : .get(
1857 : tenant_shard_id.tenant_id,
1858 : timeline_id,
1859 : ShardSelector::Known(tenant_shard_id.to_index()),
1860 : )
1861 : .await?;
1862 : set_tracing_field_shard_id(&timeline);
1863 :
1864 : let lease = timeline
1865 : .renew_lsn_lease(lsn, timeline.get_lsn_lease_length(), ctx)
1866 0 : .inspect_err(|e| {
1867 0 : warn!("{e}");
1868 0 : })
1869 : .ok();
1870 0 : let valid_until_str = lease.map(|l| {
1871 0 : l.valid_until
1872 0 : .duration_since(SystemTime::UNIX_EPOCH)
1873 0 : .expect("valid_until is earlier than UNIX_EPOCH")
1874 0 : .as_millis()
1875 0 : .to_string()
1876 0 : });
1877 :
1878 : info!(
1879 : "acquired lease for {} until {}",
1880 : lsn,
1881 : valid_until_str.as_deref().unwrap_or("<unknown>")
1882 : );
1883 :
1884 0 : let bytes = valid_until_str.as_ref().map(|x| x.as_bytes());
1885 :
1886 : pgb.write_message_noflush(&BeMessage::RowDescription(&[RowDescriptor::text_col(
1887 : b"valid_until",
1888 : )]))?
1889 : .write_message_noflush(&BeMessage::DataRow(&[bytes]))?;
1890 :
1891 : Ok(())
1892 : }
1893 :
1894 : #[instrument(skip_all, fields(shard_id))]
1895 : async fn handle_get_rel_exists_request(
1896 : &mut self,
1897 : timeline: &Timeline,
1898 : req: &PagestreamExistsRequest,
1899 : ctx: &RequestContext,
1900 : ) -> Result<PagestreamBeMessage, PageStreamError> {
1901 : let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
1902 : let lsn = Self::wait_or_get_last_lsn(
1903 : timeline,
1904 : req.hdr.request_lsn,
1905 : req.hdr.not_modified_since,
1906 : &latest_gc_cutoff_lsn,
1907 : ctx,
1908 : )
1909 : .await?;
1910 :
1911 : let exists = timeline
1912 : .get_rel_exists(req.rel, Version::Lsn(lsn), ctx)
1913 : .await?;
1914 :
1915 : Ok(PagestreamBeMessage::Exists(PagestreamExistsResponse {
1916 : req: *req,
1917 : exists,
1918 : }))
1919 : }
1920 :
1921 : #[instrument(skip_all, fields(shard_id))]
1922 : async fn handle_get_nblocks_request(
1923 : &mut self,
1924 : timeline: &Timeline,
1925 : req: &PagestreamNblocksRequest,
1926 : ctx: &RequestContext,
1927 : ) -> Result<PagestreamBeMessage, PageStreamError> {
1928 : let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
1929 : let lsn = Self::wait_or_get_last_lsn(
1930 : timeline,
1931 : req.hdr.request_lsn,
1932 : req.hdr.not_modified_since,
1933 : &latest_gc_cutoff_lsn,
1934 : ctx,
1935 : )
1936 : .await?;
1937 :
1938 : let n_blocks = timeline
1939 : .get_rel_size(req.rel, Version::Lsn(lsn), ctx)
1940 : .await?;
1941 :
1942 : Ok(PagestreamBeMessage::Nblocks(PagestreamNblocksResponse {
1943 : req: *req,
1944 : n_blocks,
1945 : }))
1946 : }
1947 :
1948 : #[instrument(skip_all, fields(shard_id))]
1949 : async fn handle_db_size_request(
1950 : &mut self,
1951 : timeline: &Timeline,
1952 : req: &PagestreamDbSizeRequest,
1953 : ctx: &RequestContext,
1954 : ) -> Result<PagestreamBeMessage, PageStreamError> {
1955 : let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
1956 : let lsn = Self::wait_or_get_last_lsn(
1957 : timeline,
1958 : req.hdr.request_lsn,
1959 : req.hdr.not_modified_since,
1960 : &latest_gc_cutoff_lsn,
1961 : ctx,
1962 : )
1963 : .await?;
1964 :
1965 : let total_blocks = timeline
1966 : .get_db_size(DEFAULTTABLESPACE_OID, req.dbnode, Version::Lsn(lsn), ctx)
1967 : .await?;
1968 : let db_size = total_blocks as i64 * BLCKSZ as i64;
1969 :
1970 : Ok(PagestreamBeMessage::DbSize(PagestreamDbSizeResponse {
1971 : req: *req,
1972 : db_size,
1973 : }))
1974 : }
1975 :
1976 : #[instrument(skip_all)]
1977 : async fn handle_get_page_at_lsn_request_batched(
1978 : &mut self,
1979 : timeline: &Timeline,
1980 : effective_lsn: Lsn,
1981 : requests: smallvec::SmallVec<[BatchedGetPageRequest; 1]>,
1982 : io_concurrency: IoConcurrency,
1983 : ctx: &RequestContext,
1984 : ) -> Vec<Result<(PagestreamBeMessage, SmgrOpTimer), BatchedPageStreamError>> {
1985 : debug_assert_current_span_has_tenant_and_timeline_id();
1986 :
1987 : timeline
1988 : .query_metrics
1989 : .observe_getpage_batch_start(requests.len());
1990 :
1991 : // If a page trace is running, submit an event for this request.
1992 : if let Some(page_trace) = timeline.page_trace.load().as_ref() {
1993 : let time = SystemTime::now();
1994 : for batch in &requests {
1995 : let key = rel_block_to_key(batch.req.rel, batch.req.blkno).to_compact();
1996 : // Ignore error (trace buffer may be full or tracer may have disconnected).
1997 : _ = page_trace.try_send(PageTraceEvent {
1998 : key,
1999 : effective_lsn,
2000 : time,
2001 : });
2002 : }
2003 : }
2004 :
2005 : let results = timeline
2006 : .get_rel_page_at_lsn_batched(
2007 0 : requests.iter().map(|p| (&p.req.rel, &p.req.blkno)),
2008 : effective_lsn,
2009 : io_concurrency,
2010 : ctx,
2011 : )
2012 : .await;
2013 : assert_eq!(results.len(), requests.len());
2014 :
2015 : // TODO: avoid creating the new Vec here
2016 : Vec::from_iter(
2017 : requests
2018 : .into_iter()
2019 : .zip(results.into_iter())
2020 0 : .map(|(req, res)| {
2021 0 : res.map(|page| {
2022 0 : (
2023 0 : PagestreamBeMessage::GetPage(models::PagestreamGetPageResponse {
2024 0 : req: req.req,
2025 0 : page,
2026 0 : }),
2027 0 : req.timer,
2028 0 : )
2029 0 : })
2030 0 : .map_err(|e| BatchedPageStreamError {
2031 0 : err: PageStreamError::from(e),
2032 0 : req: req.req.hdr,
2033 0 : })
2034 0 : }),
2035 : )
2036 : }
2037 :
2038 : #[instrument(skip_all, fields(shard_id))]
2039 : async fn handle_get_slru_segment_request(
2040 : &mut self,
2041 : timeline: &Timeline,
2042 : req: &PagestreamGetSlruSegmentRequest,
2043 : ctx: &RequestContext,
2044 : ) -> Result<PagestreamBeMessage, PageStreamError> {
2045 : let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
2046 : let lsn = Self::wait_or_get_last_lsn(
2047 : timeline,
2048 : req.hdr.request_lsn,
2049 : req.hdr.not_modified_since,
2050 : &latest_gc_cutoff_lsn,
2051 : ctx,
2052 : )
2053 : .await?;
2054 :
2055 : let kind = SlruKind::from_repr(req.kind)
2056 : .ok_or(PageStreamError::BadRequest("invalid SLRU kind".into()))?;
2057 : let segment = timeline.get_slru_segment(kind, req.segno, lsn, ctx).await?;
2058 :
2059 : Ok(PagestreamBeMessage::GetSlruSegment(
2060 : PagestreamGetSlruSegmentResponse { req: *req, segment },
2061 : ))
2062 : }
2063 :
2064 : // NB: this impl mimics what we do for batched getpage requests.
2065 : #[cfg(feature = "testing")]
2066 : #[instrument(skip_all, fields(shard_id))]
2067 : async fn handle_test_request_batch(
2068 : &mut self,
2069 : timeline: &Timeline,
2070 : requests: Vec<BatchedTestRequest>,
2071 : _ctx: &RequestContext,
2072 : ) -> Vec<Result<(PagestreamBeMessage, SmgrOpTimer), BatchedPageStreamError>> {
2073 : // real requests would do something with the timeline
2074 : let mut results = Vec::with_capacity(requests.len());
2075 : for _req in requests.iter() {
2076 : tokio::task::yield_now().await;
2077 :
2078 : results.push({
2079 : if timeline.cancel.is_cancelled() {
2080 : Err(PageReconstructError::Cancelled)
2081 : } else {
2082 : Ok(())
2083 : }
2084 : });
2085 : }
2086 :
2087 : // TODO: avoid creating the new Vec here
2088 : Vec::from_iter(
2089 : requests
2090 : .into_iter()
2091 : .zip(results.into_iter())
2092 0 : .map(|(req, res)| {
2093 0 : res.map(|()| {
2094 0 : (
2095 0 : PagestreamBeMessage::Test(models::PagestreamTestResponse {
2096 0 : req: req.req.clone(),
2097 0 : }),
2098 0 : req.timer,
2099 0 : )
2100 0 : })
2101 0 : .map_err(|e| BatchedPageStreamError {
2102 0 : err: PageStreamError::from(e),
2103 0 : req: req.req.hdr,
2104 0 : })
2105 0 : }),
2106 : )
2107 : }
2108 :
2109 : /// Note on "fullbackup":
2110 : /// Full basebackups should only be used for debugging purposes.
2111 : /// Originally, it was introduced to enable breaking storage format changes,
2112 : /// but that is not applicable anymore.
2113 : ///
2114 : /// # Coding Discipline
2115 : ///
2116 : /// Coding discipline within this function: all interaction with the `pgb` connection
2117 : /// needs to be sensitive to connection shutdown, currently signalled via [`Self::cancel`].
2118 : /// This is so that we can shutdown page_service quickly.
2119 : ///
2120 : /// TODO: wrap the pgb that we pass to the basebackup handler so that it's sensitive
2121 : /// to connection cancellation.
2122 : #[allow(clippy::too_many_arguments)]
2123 : #[instrument(skip_all, fields(shard_id, ?lsn, ?prev_lsn, %full_backup))]
2124 : async fn handle_basebackup_request<IO>(
2125 : &mut self,
2126 : pgb: &mut PostgresBackend<IO>,
2127 : tenant_id: TenantId,
2128 : timeline_id: TimelineId,
2129 : lsn: Option<Lsn>,
2130 : prev_lsn: Option<Lsn>,
2131 : full_backup: bool,
2132 : gzip: bool,
2133 : replica: bool,
2134 : ctx: &RequestContext,
2135 : ) -> Result<(), QueryError>
2136 : where
2137 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
2138 : {
2139 0 : fn map_basebackup_error(err: BasebackupError) -> QueryError {
2140 0 : match err {
2141 : // TODO: passthrough the error site to the final error message?
2142 0 : BasebackupError::Client(e, _) => QueryError::Disconnected(ConnectionError::Io(e)),
2143 0 : BasebackupError::Server(e) => QueryError::Other(e),
2144 0 : BasebackupError::Shutdown => QueryError::Shutdown,
2145 : }
2146 0 : }
2147 :
2148 : let started = std::time::Instant::now();
2149 :
2150 : let timeline = self
2151 : .timeline_handles
2152 : .as_mut()
2153 : .unwrap()
2154 : .get(tenant_id, timeline_id, ShardSelector::Zero)
2155 : .await?;
2156 : set_tracing_field_shard_id(&timeline);
2157 : let ctx = ctx.with_scope_timeline(&timeline);
2158 :
2159 : if timeline.is_archived() == Some(true) {
2160 : tracing::info!(
2161 : "timeline {tenant_id}/{timeline_id} is archived, but got basebackup request for it."
2162 : );
2163 : return Err(QueryError::NotFound("timeline is archived".into()));
2164 : }
2165 :
2166 : let latest_gc_cutoff_lsn = timeline.get_applied_gc_cutoff_lsn();
2167 : if let Some(lsn) = lsn {
2168 : // Backup was requested at a particular LSN. Wait for it to arrive.
2169 : info!("waiting for {}", lsn);
2170 : timeline
2171 : .wait_lsn(
2172 : lsn,
2173 : crate::tenant::timeline::WaitLsnWaiter::PageService,
2174 : crate::tenant::timeline::WaitLsnTimeout::Default,
2175 : &ctx,
2176 : )
2177 : .await?;
2178 : timeline
2179 : .check_lsn_is_in_scope(lsn, &latest_gc_cutoff_lsn)
2180 : .context("invalid basebackup lsn")?;
2181 : }
2182 :
2183 : let lsn_awaited_after = started.elapsed();
2184 :
2185 : // switch client to COPYOUT
2186 : pgb.write_message_noflush(&BeMessage::CopyOutResponse)
2187 : .map_err(QueryError::Disconnected)?;
2188 : self.flush_cancellable(pgb, &self.cancel).await?;
2189 :
2190 : // Send a tarball of the latest layer on the timeline. Compress if not
2191 : // fullbackup. TODO Compress in that case too (tests need to be updated)
2192 : if full_backup {
2193 : let mut writer = pgb.copyout_writer();
2194 : basebackup::send_basebackup_tarball(
2195 : &mut writer,
2196 : &timeline,
2197 : lsn,
2198 : prev_lsn,
2199 : full_backup,
2200 : replica,
2201 : &ctx,
2202 : )
2203 : .await
2204 : .map_err(map_basebackup_error)?;
2205 : } else {
2206 : let mut writer = BufWriter::new(pgb.copyout_writer());
2207 : if gzip {
2208 : let mut encoder = GzipEncoder::with_quality(
2209 : &mut writer,
2210 : // NOTE using fast compression because it's on the critical path
2211 : // for compute startup. For an empty database, we get
2212 : // <100KB with this method. The Level::Best compression method
2213 : // gives us <20KB, but maybe we should add basebackup caching
2214 : // on compute shutdown first.
2215 : async_compression::Level::Fastest,
2216 : );
2217 : basebackup::send_basebackup_tarball(
2218 : &mut encoder,
2219 : &timeline,
2220 : lsn,
2221 : prev_lsn,
2222 : full_backup,
2223 : replica,
2224 : &ctx,
2225 : )
2226 : .await
2227 : .map_err(map_basebackup_error)?;
2228 : // shutdown the encoder to ensure the gzip footer is written
2229 : encoder
2230 : .shutdown()
2231 : .await
2232 0 : .map_err(|e| QueryError::Disconnected(ConnectionError::Io(e)))?;
2233 : } else {
2234 : basebackup::send_basebackup_tarball(
2235 : &mut writer,
2236 : &timeline,
2237 : lsn,
2238 : prev_lsn,
2239 : full_backup,
2240 : replica,
2241 : &ctx,
2242 : )
2243 : .await
2244 : .map_err(map_basebackup_error)?;
2245 : }
2246 0 : writer.flush().await.map_err(|e| {
2247 0 : map_basebackup_error(BasebackupError::Client(
2248 0 : e,
2249 0 : "handle_basebackup_request,flush",
2250 0 : ))
2251 0 : })?;
2252 : }
2253 :
2254 : pgb.write_message_noflush(&BeMessage::CopyDone)
2255 : .map_err(QueryError::Disconnected)?;
2256 : self.flush_cancellable(pgb, &timeline.cancel).await?;
2257 :
2258 : let basebackup_after = started
2259 : .elapsed()
2260 : .checked_sub(lsn_awaited_after)
2261 : .unwrap_or(Duration::ZERO);
2262 :
2263 : info!(
2264 : lsn_await_millis = lsn_awaited_after.as_millis(),
2265 : basebackup_millis = basebackup_after.as_millis(),
2266 : "basebackup complete"
2267 : );
2268 :
2269 : Ok(())
2270 : }
2271 :
2272 : // when accessing management api supply None as an argument
2273 : // when using to authorize tenant pass corresponding tenant id
2274 0 : fn check_permission(&self, tenant_id: Option<TenantId>) -> Result<(), QueryError> {
2275 0 : if self.auth.is_none() {
2276 : // auth is set to Trust, nothing to check so just return ok
2277 0 : return Ok(());
2278 0 : }
2279 0 : // auth is some, just checked above, when auth is some
2280 0 : // then claims are always present because of checks during connection init
2281 0 : // so this expect won't trigger
2282 0 : let claims = self
2283 0 : .claims
2284 0 : .as_ref()
2285 0 : .expect("claims presence already checked");
2286 0 : check_permission(claims, tenant_id).map_err(|e| QueryError::Unauthorized(e.0))
2287 0 : }
2288 : }
2289 :
2290 : /// `basebackup tenant timeline [lsn] [--gzip] [--replica]`
2291 : #[derive(Debug, Clone, Eq, PartialEq)]
2292 : struct BaseBackupCmd {
2293 : tenant_id: TenantId,
2294 : timeline_id: TimelineId,
2295 : lsn: Option<Lsn>,
2296 : gzip: bool,
2297 : replica: bool,
2298 : }
2299 :
2300 : /// `fullbackup tenant timeline [lsn] [prev_lsn]`
2301 : #[derive(Debug, Clone, Eq, PartialEq)]
2302 : struct FullBackupCmd {
2303 : tenant_id: TenantId,
2304 : timeline_id: TimelineId,
2305 : lsn: Option<Lsn>,
2306 : prev_lsn: Option<Lsn>,
2307 : }
2308 :
2309 : /// `pagestream_v2 tenant timeline`
2310 : #[derive(Debug, Clone, Eq, PartialEq)]
2311 : struct PageStreamCmd {
2312 : tenant_id: TenantId,
2313 : timeline_id: TimelineId,
2314 : protocol_version: PagestreamProtocolVersion,
2315 : }
2316 :
2317 : /// `lease lsn tenant timeline lsn`
2318 : #[derive(Debug, Clone, Eq, PartialEq)]
2319 : struct LeaseLsnCmd {
2320 : tenant_shard_id: TenantShardId,
2321 : timeline_id: TimelineId,
2322 : lsn: Lsn,
2323 : }
2324 :
2325 : #[derive(Debug, Clone, Eq, PartialEq)]
2326 : enum PageServiceCmd {
2327 : Set,
2328 : PageStream(PageStreamCmd),
2329 : BaseBackup(BaseBackupCmd),
2330 : FullBackup(FullBackupCmd),
2331 : LeaseLsn(LeaseLsnCmd),
2332 : }
2333 :
2334 : impl PageStreamCmd {
2335 12 : fn parse(query: &str, protocol_version: PagestreamProtocolVersion) -> anyhow::Result<Self> {
2336 12 : let parameters = query.split_whitespace().collect_vec();
2337 12 : if parameters.len() != 2 {
2338 4 : bail!(
2339 4 : "invalid number of parameters for pagestream command: {}",
2340 4 : query
2341 4 : );
2342 8 : }
2343 8 : let tenant_id = TenantId::from_str(parameters[0])
2344 8 : .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
2345 4 : let timeline_id = TimelineId::from_str(parameters[1])
2346 4 : .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
2347 4 : Ok(Self {
2348 4 : tenant_id,
2349 4 : timeline_id,
2350 4 : protocol_version,
2351 4 : })
2352 12 : }
2353 : }
2354 :
2355 : impl FullBackupCmd {
2356 8 : fn parse(query: &str) -> anyhow::Result<Self> {
2357 8 : let parameters = query.split_whitespace().collect_vec();
2358 8 : if parameters.len() < 2 || parameters.len() > 4 {
2359 0 : bail!(
2360 0 : "invalid number of parameters for basebackup command: {}",
2361 0 : query
2362 0 : );
2363 8 : }
2364 8 : let tenant_id = TenantId::from_str(parameters[0])
2365 8 : .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
2366 8 : let timeline_id = TimelineId::from_str(parameters[1])
2367 8 : .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
2368 : // The caller is responsible for providing correct lsn and prev_lsn.
2369 8 : let lsn = if let Some(lsn_str) = parameters.get(2) {
2370 : Some(
2371 4 : Lsn::from_str(lsn_str)
2372 4 : .with_context(|| format!("Failed to parse Lsn from {lsn_str}"))?,
2373 : )
2374 : } else {
2375 4 : None
2376 : };
2377 8 : let prev_lsn = if let Some(prev_lsn_str) = parameters.get(3) {
2378 : Some(
2379 4 : Lsn::from_str(prev_lsn_str)
2380 4 : .with_context(|| format!("Failed to parse Lsn from {prev_lsn_str}"))?,
2381 : )
2382 : } else {
2383 4 : None
2384 : };
2385 8 : Ok(Self {
2386 8 : tenant_id,
2387 8 : timeline_id,
2388 8 : lsn,
2389 8 : prev_lsn,
2390 8 : })
2391 8 : }
2392 : }
2393 :
2394 : impl BaseBackupCmd {
2395 36 : fn parse(query: &str) -> anyhow::Result<Self> {
2396 36 : let parameters = query.split_whitespace().collect_vec();
2397 36 : if parameters.len() < 2 {
2398 0 : bail!(
2399 0 : "invalid number of parameters for basebackup command: {}",
2400 0 : query
2401 0 : );
2402 36 : }
2403 36 : let tenant_id = TenantId::from_str(parameters[0])
2404 36 : .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
2405 36 : let timeline_id = TimelineId::from_str(parameters[1])
2406 36 : .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
2407 : let lsn;
2408 : let flags_parse_from;
2409 36 : if let Some(maybe_lsn) = parameters.get(2) {
2410 32 : if *maybe_lsn == "latest" {
2411 4 : lsn = None;
2412 4 : flags_parse_from = 3;
2413 28 : } else if maybe_lsn.starts_with("--") {
2414 20 : lsn = None;
2415 20 : flags_parse_from = 2;
2416 20 : } else {
2417 : lsn = Some(
2418 8 : Lsn::from_str(maybe_lsn)
2419 8 : .with_context(|| format!("Failed to parse lsn from {maybe_lsn}"))?,
2420 : );
2421 8 : flags_parse_from = 3;
2422 : }
2423 4 : } else {
2424 4 : lsn = None;
2425 4 : flags_parse_from = 2;
2426 4 : }
2427 :
2428 36 : let mut gzip = false;
2429 36 : let mut replica = false;
2430 :
2431 44 : for ¶m in ¶meters[flags_parse_from..] {
2432 44 : match param {
2433 44 : "--gzip" => {
2434 28 : if gzip {
2435 4 : bail!("duplicate parameter for basebackup command: {param}")
2436 24 : }
2437 24 : gzip = true
2438 : }
2439 16 : "--replica" => {
2440 8 : if replica {
2441 0 : bail!("duplicate parameter for basebackup command: {param}")
2442 8 : }
2443 8 : replica = true
2444 : }
2445 8 : _ => bail!("invalid parameter for basebackup command: {param}"),
2446 : }
2447 : }
2448 24 : Ok(Self {
2449 24 : tenant_id,
2450 24 : timeline_id,
2451 24 : lsn,
2452 24 : gzip,
2453 24 : replica,
2454 24 : })
2455 36 : }
2456 : }
2457 :
2458 : impl LeaseLsnCmd {
2459 8 : fn parse(query: &str) -> anyhow::Result<Self> {
2460 8 : let parameters = query.split_whitespace().collect_vec();
2461 8 : if parameters.len() != 3 {
2462 0 : bail!(
2463 0 : "invalid number of parameters for lease lsn command: {}",
2464 0 : query
2465 0 : );
2466 8 : }
2467 8 : let tenant_shard_id = TenantShardId::from_str(parameters[0])
2468 8 : .with_context(|| format!("Failed to parse tenant id from {}", parameters[0]))?;
2469 8 : let timeline_id = TimelineId::from_str(parameters[1])
2470 8 : .with_context(|| format!("Failed to parse timeline id from {}", parameters[1]))?;
2471 8 : let lsn = Lsn::from_str(parameters[2])
2472 8 : .with_context(|| format!("Failed to parse lsn from {}", parameters[2]))?;
2473 8 : Ok(Self {
2474 8 : tenant_shard_id,
2475 8 : timeline_id,
2476 8 : lsn,
2477 8 : })
2478 8 : }
2479 : }
2480 :
2481 : impl PageServiceCmd {
2482 84 : fn parse(query: &str) -> anyhow::Result<Self> {
2483 84 : let query = query.trim();
2484 84 : let Some((cmd, other)) = query.split_once(' ') else {
2485 8 : bail!("cannot parse query: {query}")
2486 : };
2487 76 : match cmd.to_ascii_lowercase().as_str() {
2488 76 : "pagestream_v2" => Ok(Self::PageStream(PageStreamCmd::parse(
2489 12 : other,
2490 12 : PagestreamProtocolVersion::V2,
2491 12 : )?)),
2492 64 : "pagestream_v3" => Ok(Self::PageStream(PageStreamCmd::parse(
2493 0 : other,
2494 0 : PagestreamProtocolVersion::V3,
2495 0 : )?)),
2496 64 : "basebackup" => Ok(Self::BaseBackup(BaseBackupCmd::parse(other)?)),
2497 28 : "fullbackup" => Ok(Self::FullBackup(FullBackupCmd::parse(other)?)),
2498 20 : "lease" => {
2499 12 : let Some((cmd2, other)) = other.split_once(' ') else {
2500 0 : bail!("invalid lease command: {cmd}");
2501 : };
2502 12 : let cmd2 = cmd2.to_ascii_lowercase();
2503 12 : if cmd2 == "lsn" {
2504 8 : Ok(Self::LeaseLsn(LeaseLsnCmd::parse(other)?))
2505 : } else {
2506 4 : bail!("invalid lease command: {cmd}");
2507 : }
2508 : }
2509 8 : "set" => Ok(Self::Set),
2510 0 : _ => Err(anyhow::anyhow!("unsupported command {cmd} in {query}")),
2511 : }
2512 84 : }
2513 : }
2514 :
2515 : impl<IO> postgres_backend::Handler<IO> for PageServerHandler
2516 : where
2517 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin + 'static,
2518 : {
2519 0 : fn check_auth_jwt(
2520 0 : &mut self,
2521 0 : _pgb: &mut PostgresBackend<IO>,
2522 0 : jwt_response: &[u8],
2523 0 : ) -> Result<(), QueryError> {
2524 : // this unwrap is never triggered, because check_auth_jwt only called when auth_type is NeonJWT
2525 : // which requires auth to be present
2526 0 : let data = self
2527 0 : .auth
2528 0 : .as_ref()
2529 0 : .unwrap()
2530 0 : .decode(str::from_utf8(jwt_response).context("jwt response is not UTF-8")?)
2531 0 : .map_err(|e| QueryError::Unauthorized(e.0))?;
2532 :
2533 0 : if matches!(data.claims.scope, Scope::Tenant) && data.claims.tenant_id.is_none() {
2534 0 : return Err(QueryError::Unauthorized(
2535 0 : "jwt token scope is Tenant, but tenant id is missing".into(),
2536 0 : ));
2537 0 : }
2538 0 :
2539 0 : debug!(
2540 0 : "jwt scope check succeeded for scope: {:#?} by tenant id: {:?}",
2541 : data.claims.scope, data.claims.tenant_id,
2542 : );
2543 :
2544 0 : self.claims = Some(data.claims);
2545 0 : Ok(())
2546 0 : }
2547 :
2548 0 : fn startup(
2549 0 : &mut self,
2550 0 : _pgb: &mut PostgresBackend<IO>,
2551 0 : sm: &FeStartupPacket,
2552 0 : ) -> Result<(), QueryError> {
2553 0 : fail::fail_point!("ps::connection-start::startup-packet");
2554 :
2555 0 : if let FeStartupPacket::StartupMessage { params, .. } = sm {
2556 0 : if let Some(app_name) = params.get("application_name") {
2557 0 : Span::current().record("application_name", field::display(app_name));
2558 0 : }
2559 0 : };
2560 :
2561 0 : Ok(())
2562 0 : }
2563 :
2564 : #[instrument(skip_all, fields(tenant_id, timeline_id))]
2565 : async fn process_query(
2566 : &mut self,
2567 : pgb: &mut PostgresBackend<IO>,
2568 : query_string: &str,
2569 : ) -> Result<(), QueryError> {
2570 0 : fail::fail_point!("simulated-bad-compute-connection", |_| {
2571 0 : info!("Hit failpoint for bad connection");
2572 0 : Err(QueryError::SimulatedConnectionError)
2573 0 : });
2574 :
2575 : fail::fail_point!("ps::connection-start::process-query");
2576 :
2577 : let ctx = self.connection_ctx.attached_child();
2578 : debug!("process query {query_string}");
2579 : let query = PageServiceCmd::parse(query_string)?;
2580 : match query {
2581 : PageServiceCmd::PageStream(PageStreamCmd {
2582 : tenant_id,
2583 : timeline_id,
2584 : protocol_version,
2585 : }) => {
2586 : tracing::Span::current()
2587 : .record("tenant_id", field::display(tenant_id))
2588 : .record("timeline_id", field::display(timeline_id));
2589 :
2590 : self.check_permission(Some(tenant_id))?;
2591 : let command_kind = match protocol_version {
2592 : PagestreamProtocolVersion::V2 => ComputeCommandKind::PageStreamV2,
2593 : PagestreamProtocolVersion::V3 => ComputeCommandKind::PageStreamV3,
2594 : };
2595 : COMPUTE_COMMANDS_COUNTERS.for_command(command_kind).inc();
2596 :
2597 : self.handle_pagerequests(pgb, tenant_id, timeline_id, protocol_version, ctx)
2598 : .await?;
2599 : }
2600 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2601 : tenant_id,
2602 : timeline_id,
2603 : lsn,
2604 : gzip,
2605 : replica,
2606 : }) => {
2607 : tracing::Span::current()
2608 : .record("tenant_id", field::display(tenant_id))
2609 : .record("timeline_id", field::display(timeline_id));
2610 :
2611 : self.check_permission(Some(tenant_id))?;
2612 :
2613 : COMPUTE_COMMANDS_COUNTERS
2614 : .for_command(ComputeCommandKind::Basebackup)
2615 : .inc();
2616 : let metric_recording = metrics::BASEBACKUP_QUERY_TIME.start_recording();
2617 0 : let res = async {
2618 0 : self.handle_basebackup_request(
2619 0 : pgb,
2620 0 : tenant_id,
2621 0 : timeline_id,
2622 0 : lsn,
2623 0 : None,
2624 0 : false,
2625 0 : gzip,
2626 0 : replica,
2627 0 : &ctx,
2628 0 : )
2629 0 : .await?;
2630 0 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
2631 0 : Result::<(), QueryError>::Ok(())
2632 0 : }
2633 : .await;
2634 : metric_recording.observe(&res);
2635 : res?;
2636 : }
2637 : // same as basebackup, but result includes relational data as well
2638 : PageServiceCmd::FullBackup(FullBackupCmd {
2639 : tenant_id,
2640 : timeline_id,
2641 : lsn,
2642 : prev_lsn,
2643 : }) => {
2644 : tracing::Span::current()
2645 : .record("tenant_id", field::display(tenant_id))
2646 : .record("timeline_id", field::display(timeline_id));
2647 :
2648 : self.check_permission(Some(tenant_id))?;
2649 :
2650 : COMPUTE_COMMANDS_COUNTERS
2651 : .for_command(ComputeCommandKind::Fullbackup)
2652 : .inc();
2653 :
2654 : // Check that the timeline exists
2655 : self.handle_basebackup_request(
2656 : pgb,
2657 : tenant_id,
2658 : timeline_id,
2659 : lsn,
2660 : prev_lsn,
2661 : true,
2662 : false,
2663 : false,
2664 : &ctx,
2665 : )
2666 : .await?;
2667 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
2668 : }
2669 : PageServiceCmd::Set => {
2670 : // important because psycopg2 executes "SET datestyle TO 'ISO'"
2671 : // on connect
2672 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
2673 : }
2674 : PageServiceCmd::LeaseLsn(LeaseLsnCmd {
2675 : tenant_shard_id,
2676 : timeline_id,
2677 : lsn,
2678 : }) => {
2679 : tracing::Span::current()
2680 : .record("tenant_id", field::display(tenant_shard_id))
2681 : .record("timeline_id", field::display(timeline_id));
2682 :
2683 : self.check_permission(Some(tenant_shard_id.tenant_id))?;
2684 :
2685 : COMPUTE_COMMANDS_COUNTERS
2686 : .for_command(ComputeCommandKind::LeaseLsn)
2687 : .inc();
2688 :
2689 : match self
2690 : .handle_make_lsn_lease(pgb, tenant_shard_id, timeline_id, lsn, &ctx)
2691 : .await
2692 : {
2693 : Ok(()) => {
2694 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?
2695 : }
2696 : Err(e) => {
2697 : error!("error obtaining lsn lease for {lsn}: {e:?}");
2698 : pgb.write_message_noflush(&BeMessage::ErrorResponse(
2699 : &e.to_string(),
2700 : Some(e.pg_error_code()),
2701 : ))?
2702 : }
2703 : };
2704 : }
2705 : }
2706 :
2707 : Ok(())
2708 : }
2709 : }
2710 :
2711 : impl From<GetActiveTenantError> for QueryError {
2712 0 : fn from(e: GetActiveTenantError) -> Self {
2713 0 : match e {
2714 0 : GetActiveTenantError::WaitForActiveTimeout { .. } => QueryError::Disconnected(
2715 0 : ConnectionError::Io(io::Error::new(io::ErrorKind::TimedOut, e.to_string())),
2716 0 : ),
2717 : GetActiveTenantError::Cancelled
2718 : | GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => {
2719 0 : QueryError::Shutdown
2720 : }
2721 0 : e @ GetActiveTenantError::NotFound(_) => QueryError::NotFound(format!("{e}").into()),
2722 0 : e => QueryError::Other(anyhow::anyhow!(e)),
2723 : }
2724 0 : }
2725 : }
2726 :
2727 : #[derive(Debug, thiserror::Error)]
2728 : pub(crate) enum GetActiveTimelineError {
2729 : #[error(transparent)]
2730 : Tenant(GetActiveTenantError),
2731 : #[error(transparent)]
2732 : Timeline(#[from] GetTimelineError),
2733 : }
2734 :
2735 : impl From<GetActiveTimelineError> for QueryError {
2736 0 : fn from(e: GetActiveTimelineError) -> Self {
2737 0 : match e {
2738 0 : GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled) => QueryError::Shutdown,
2739 0 : GetActiveTimelineError::Tenant(e) => e.into(),
2740 0 : GetActiveTimelineError::Timeline(e) => QueryError::NotFound(format!("{e}").into()),
2741 : }
2742 0 : }
2743 : }
2744 :
2745 : impl From<crate::tenant::timeline::handle::HandleUpgradeError> for QueryError {
2746 0 : fn from(e: crate::tenant::timeline::handle::HandleUpgradeError) -> Self {
2747 0 : match e {
2748 0 : crate::tenant::timeline::handle::HandleUpgradeError::ShutDown => QueryError::Shutdown,
2749 0 : }
2750 0 : }
2751 : }
2752 :
2753 0 : fn set_tracing_field_shard_id(timeline: &Timeline) {
2754 0 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
2755 0 : tracing::Span::current().record(
2756 0 : "shard_id",
2757 0 : tracing::field::display(timeline.tenant_shard_id.shard_slug()),
2758 0 : );
2759 0 : debug_assert_current_span_has_tenant_and_timeline_id();
2760 0 : }
2761 :
2762 : struct WaitedForLsn(Lsn);
2763 : impl From<WaitedForLsn> for Lsn {
2764 0 : fn from(WaitedForLsn(lsn): WaitedForLsn) -> Self {
2765 0 : lsn
2766 0 : }
2767 : }
2768 :
2769 : #[cfg(test)]
2770 : mod tests {
2771 : use utils::shard::ShardCount;
2772 :
2773 : use super::*;
2774 :
2775 : #[test]
2776 4 : fn pageservice_cmd_parse() {
2777 4 : let tenant_id = TenantId::generate();
2778 4 : let timeline_id = TimelineId::generate();
2779 4 : let cmd =
2780 4 : PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id} {timeline_id}")).unwrap();
2781 4 : assert_eq!(
2782 4 : cmd,
2783 4 : PageServiceCmd::PageStream(PageStreamCmd {
2784 4 : tenant_id,
2785 4 : timeline_id,
2786 4 : protocol_version: PagestreamProtocolVersion::V2,
2787 4 : })
2788 4 : );
2789 4 : let cmd = PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id}")).unwrap();
2790 4 : assert_eq!(
2791 4 : cmd,
2792 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2793 4 : tenant_id,
2794 4 : timeline_id,
2795 4 : lsn: None,
2796 4 : gzip: false,
2797 4 : replica: false
2798 4 : })
2799 4 : );
2800 4 : let cmd =
2801 4 : PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} --gzip")).unwrap();
2802 4 : assert_eq!(
2803 4 : cmd,
2804 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2805 4 : tenant_id,
2806 4 : timeline_id,
2807 4 : lsn: None,
2808 4 : gzip: true,
2809 4 : replica: false
2810 4 : })
2811 4 : );
2812 4 : let cmd =
2813 4 : PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} latest")).unwrap();
2814 4 : assert_eq!(
2815 4 : cmd,
2816 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2817 4 : tenant_id,
2818 4 : timeline_id,
2819 4 : lsn: None,
2820 4 : gzip: false,
2821 4 : replica: false
2822 4 : })
2823 4 : );
2824 4 : let cmd = PageServiceCmd::parse(&format!("basebackup {tenant_id} {timeline_id} 0/16ABCDE"))
2825 4 : .unwrap();
2826 4 : assert_eq!(
2827 4 : cmd,
2828 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2829 4 : tenant_id,
2830 4 : timeline_id,
2831 4 : lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
2832 4 : gzip: false,
2833 4 : replica: false
2834 4 : })
2835 4 : );
2836 4 : let cmd = PageServiceCmd::parse(&format!(
2837 4 : "basebackup {tenant_id} {timeline_id} --replica --gzip"
2838 4 : ))
2839 4 : .unwrap();
2840 4 : assert_eq!(
2841 4 : cmd,
2842 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2843 4 : tenant_id,
2844 4 : timeline_id,
2845 4 : lsn: None,
2846 4 : gzip: true,
2847 4 : replica: true
2848 4 : })
2849 4 : );
2850 4 : let cmd = PageServiceCmd::parse(&format!(
2851 4 : "basebackup {tenant_id} {timeline_id} 0/16ABCDE --replica --gzip"
2852 4 : ))
2853 4 : .unwrap();
2854 4 : assert_eq!(
2855 4 : cmd,
2856 4 : PageServiceCmd::BaseBackup(BaseBackupCmd {
2857 4 : tenant_id,
2858 4 : timeline_id,
2859 4 : lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
2860 4 : gzip: true,
2861 4 : replica: true
2862 4 : })
2863 4 : );
2864 4 : let cmd = PageServiceCmd::parse(&format!("fullbackup {tenant_id} {timeline_id}")).unwrap();
2865 4 : assert_eq!(
2866 4 : cmd,
2867 4 : PageServiceCmd::FullBackup(FullBackupCmd {
2868 4 : tenant_id,
2869 4 : timeline_id,
2870 4 : lsn: None,
2871 4 : prev_lsn: None
2872 4 : })
2873 4 : );
2874 4 : let cmd = PageServiceCmd::parse(&format!(
2875 4 : "fullbackup {tenant_id} {timeline_id} 0/16ABCDE 0/16ABCDF"
2876 4 : ))
2877 4 : .unwrap();
2878 4 : assert_eq!(
2879 4 : cmd,
2880 4 : PageServiceCmd::FullBackup(FullBackupCmd {
2881 4 : tenant_id,
2882 4 : timeline_id,
2883 4 : lsn: Some(Lsn::from_str("0/16ABCDE").unwrap()),
2884 4 : prev_lsn: Some(Lsn::from_str("0/16ABCDF").unwrap()),
2885 4 : })
2886 4 : );
2887 4 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
2888 4 : let cmd = PageServiceCmd::parse(&format!(
2889 4 : "lease lsn {tenant_shard_id} {timeline_id} 0/16ABCDE"
2890 4 : ))
2891 4 : .unwrap();
2892 4 : assert_eq!(
2893 4 : cmd,
2894 4 : PageServiceCmd::LeaseLsn(LeaseLsnCmd {
2895 4 : tenant_shard_id,
2896 4 : timeline_id,
2897 4 : lsn: Lsn::from_str("0/16ABCDE").unwrap(),
2898 4 : })
2899 4 : );
2900 4 : let tenant_shard_id = TenantShardId::split(&tenant_shard_id, ShardCount(8))[1];
2901 4 : let cmd = PageServiceCmd::parse(&format!(
2902 4 : "lease lsn {tenant_shard_id} {timeline_id} 0/16ABCDE"
2903 4 : ))
2904 4 : .unwrap();
2905 4 : assert_eq!(
2906 4 : cmd,
2907 4 : PageServiceCmd::LeaseLsn(LeaseLsnCmd {
2908 4 : tenant_shard_id,
2909 4 : timeline_id,
2910 4 : lsn: Lsn::from_str("0/16ABCDE").unwrap(),
2911 4 : })
2912 4 : );
2913 4 : let cmd = PageServiceCmd::parse("set a = b").unwrap();
2914 4 : assert_eq!(cmd, PageServiceCmd::Set);
2915 4 : let cmd = PageServiceCmd::parse("SET foo").unwrap();
2916 4 : assert_eq!(cmd, PageServiceCmd::Set);
2917 4 : }
2918 :
2919 : #[test]
2920 4 : fn pageservice_cmd_err_handling() {
2921 4 : let tenant_id = TenantId::generate();
2922 4 : let timeline_id = TimelineId::generate();
2923 4 : let cmd = PageServiceCmd::parse("unknown_command");
2924 4 : assert!(cmd.is_err());
2925 4 : let cmd = PageServiceCmd::parse("pagestream_v2");
2926 4 : assert!(cmd.is_err());
2927 4 : let cmd = PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id}xxx"));
2928 4 : assert!(cmd.is_err());
2929 4 : let cmd = PageServiceCmd::parse(&format!("pagestream_v2 {tenant_id}xxx {timeline_id}xxx"));
2930 4 : assert!(cmd.is_err());
2931 4 : let cmd = PageServiceCmd::parse(&format!(
2932 4 : "basebackup {tenant_id} {timeline_id} --gzip --gzip"
2933 4 : ));
2934 4 : assert!(cmd.is_err());
2935 4 : let cmd = PageServiceCmd::parse(&format!(
2936 4 : "basebackup {tenant_id} {timeline_id} --gzip --unknown"
2937 4 : ));
2938 4 : assert!(cmd.is_err());
2939 4 : let cmd = PageServiceCmd::parse(&format!(
2940 4 : "basebackup {tenant_id} {timeline_id} --gzip 0/16ABCDE"
2941 4 : ));
2942 4 : assert!(cmd.is_err());
2943 4 : let cmd = PageServiceCmd::parse(&format!("lease {tenant_id} {timeline_id} gzip 0/16ABCDE"));
2944 4 : assert!(cmd.is_err());
2945 4 : }
2946 : }
|