Line data Source code
1 : //! The Page Service listens for client connections and serves their GetPage@LSN
2 : //! requests.
3 :
4 : use anyhow::Context;
5 : use async_compression::tokio::write::GzipEncoder;
6 : use bytes::Buf;
7 : use bytes::Bytes;
8 : use futures::stream::FuturesUnordered;
9 : use futures::Stream;
10 : use futures::StreamExt;
11 : use pageserver_api::key::Key;
12 : use pageserver_api::models::TenantState;
13 : use pageserver_api::models::{
14 : PagestreamBeMessage, PagestreamDbSizeRequest, PagestreamDbSizeResponse,
15 : PagestreamErrorResponse, PagestreamExistsRequest, PagestreamExistsResponse,
16 : PagestreamFeMessage, PagestreamGetPageRequest, PagestreamGetPageResponse,
17 : PagestreamGetSlruSegmentRequest, PagestreamGetSlruSegmentResponse, PagestreamNblocksRequest,
18 : PagestreamNblocksResponse, PagestreamProtocolVersion,
19 : };
20 : use pageserver_api::shard::ShardIndex;
21 : use pageserver_api::shard::ShardNumber;
22 : use postgres_backend::{is_expected_io_error, AuthType, PostgresBackend, QueryError};
23 : use pq_proto::framed::ConnectionError;
24 : use pq_proto::FeStartupPacket;
25 : use pq_proto::{BeMessage, FeMessage, RowDescriptor};
26 : use std::borrow::Cow;
27 : use std::collections::HashMap;
28 : use std::io;
29 : use std::net::TcpListener;
30 : use std::pin::pin;
31 : use std::str;
32 : use std::str::FromStr;
33 : use std::sync::Arc;
34 : use std::time::Duration;
35 : use tokio::io::AsyncWriteExt;
36 : use tokio::io::{AsyncRead, AsyncWrite};
37 : use tokio_util::io::StreamReader;
38 : use tokio_util::sync::CancellationToken;
39 : use tracing::*;
40 : use utils::id::ConnectionId;
41 : use utils::sync::gate::GateGuard;
42 : use utils::{
43 : auth::{Claims, Scope, SwappableJwtAuth},
44 : id::{TenantId, TimelineId},
45 : lsn::Lsn,
46 : simple_rcu::RcuReadGuard,
47 : };
48 :
49 : use crate::auth::check_permission;
50 : use crate::basebackup;
51 : use crate::basebackup::BasebackupError;
52 : use crate::config::PageServerConf;
53 : use crate::context::{DownloadBehavior, RequestContext};
54 : use crate::import_datadir::import_wal_from_tar;
55 : use crate::metrics;
56 : use crate::metrics::LIVE_CONNECTIONS_COUNT;
57 : use crate::pgdatadir_mapping::Version;
58 : use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
59 : use crate::span::debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id;
60 : use crate::task_mgr;
61 : use crate::task_mgr::TaskKind;
62 : use crate::tenant::mgr;
63 : use crate::tenant::mgr::get_active_tenant_with_timeout;
64 : use crate::tenant::mgr::GetActiveTenantError;
65 : use crate::tenant::mgr::ShardSelector;
66 : use crate::tenant::timeline::WaitLsnError;
67 : use crate::tenant::GetTimelineError;
68 : use crate::tenant::PageReconstructError;
69 : use crate::tenant::Timeline;
70 : use crate::trace::Tracer;
71 : use pageserver_api::key::rel_block_to_key;
72 : use pageserver_api::reltag::SlruKind;
73 : use postgres_ffi::pg_constants::DEFAULTTABLESPACE_OID;
74 : use postgres_ffi::BLCKSZ;
75 :
76 : // How long we may wait for a [`TenantSlot::InProgress`]` and/or a [`Tenant`] which
77 : // is not yet in state [`TenantState::Active`].
78 : const ACTIVE_TENANT_TIMEOUT: Duration = Duration::from_millis(30000);
79 :
80 : /// Read the end of a tar archive.
81 : ///
82 : /// A tar archive normally ends with two consecutive blocks of zeros, 512 bytes each.
83 : /// `tokio_tar` already read the first such block. Read the second all-zeros block,
84 : /// and check that there is no more data after the EOF marker.
85 : ///
86 : /// 'tar' command can also write extra blocks of zeros, up to a record
87 : /// size, controlled by the --record-size argument. Ignore them too.
88 0 : async fn read_tar_eof(mut reader: (impl AsyncRead + Unpin)) -> anyhow::Result<()> {
89 0 : use tokio::io::AsyncReadExt;
90 0 : let mut buf = [0u8; 512];
91 0 :
92 0 : // Read the all-zeros block, and verify it
93 0 : let mut total_bytes = 0;
94 0 : while total_bytes < 512 {
95 0 : let nbytes = reader.read(&mut buf[total_bytes..]).await?;
96 0 : total_bytes += nbytes;
97 0 : if nbytes == 0 {
98 0 : break;
99 0 : }
100 : }
101 0 : if total_bytes < 512 {
102 0 : anyhow::bail!("incomplete or invalid tar EOF marker");
103 0 : }
104 0 : if !buf.iter().all(|&x| x == 0) {
105 0 : anyhow::bail!("invalid tar EOF marker");
106 0 : }
107 0 :
108 0 : // Drain any extra zero-blocks after the EOF marker
109 0 : let mut trailing_bytes = 0;
110 0 : let mut seen_nonzero_bytes = false;
111 : loop {
112 0 : let nbytes = reader.read(&mut buf).await?;
113 0 : trailing_bytes += nbytes;
114 0 : if !buf.iter().all(|&x| x == 0) {
115 0 : seen_nonzero_bytes = true;
116 0 : }
117 0 : if nbytes == 0 {
118 0 : break;
119 0 : }
120 : }
121 0 : if seen_nonzero_bytes {
122 0 : anyhow::bail!("unexpected non-zero bytes after the tar archive");
123 0 : }
124 0 : if trailing_bytes % 512 != 0 {
125 0 : anyhow::bail!("unexpected number of zeros ({trailing_bytes}), not divisible by tar block size (512 bytes), after the tar archive");
126 0 : }
127 0 : Ok(())
128 0 : }
129 :
130 : ///////////////////////////////////////////////////////////////////////////////
131 :
132 : ///
133 : /// Main loop of the page service.
134 : ///
135 : /// Listens for connections, and launches a new handler task for each.
136 : ///
137 0 : pub async fn libpq_listener_main(
138 0 : conf: &'static PageServerConf,
139 0 : broker_client: storage_broker::BrokerClientChannel,
140 0 : auth: Option<Arc<SwappableJwtAuth>>,
141 0 : listener: TcpListener,
142 0 : auth_type: AuthType,
143 0 : listener_ctx: RequestContext,
144 0 : cancel: CancellationToken,
145 0 : ) -> anyhow::Result<()> {
146 0 : listener.set_nonblocking(true)?;
147 0 : let tokio_listener = tokio::net::TcpListener::from_std(listener)?;
148 :
149 : // Wait for a new connection to arrive, or for server shutdown.
150 0 : while let Some(res) = tokio::select! {
151 : biased;
152 :
153 : _ = cancel.cancelled() => {
154 : // We were requested to shut down.
155 : None
156 : }
157 :
158 : res = tokio_listener.accept() => {
159 : Some(res)
160 : }
161 : } {
162 0 : match res {
163 0 : Ok((socket, peer_addr)) => {
164 0 : // Connection established. Spawn a new task to handle it.
165 0 : debug!("accepted connection from {}", peer_addr);
166 0 : let local_auth = auth.clone();
167 0 :
168 0 : let connection_ctx = listener_ctx
169 0 : .detached_child(TaskKind::PageRequestHandler, DownloadBehavior::Download);
170 0 :
171 0 : // PageRequestHandler tasks are not associated with any particular
172 0 : // timeline in the task manager. In practice most connections will
173 0 : // only deal with a particular timeline, but we don't know which one
174 0 : // yet.
175 0 : task_mgr::spawn(
176 0 : &tokio::runtime::Handle::current(),
177 0 : TaskKind::PageRequestHandler,
178 0 : None,
179 0 : None,
180 0 : "serving compute connection task",
181 0 : false,
182 0 : page_service_conn_main(
183 0 : conf,
184 0 : broker_client.clone(),
185 0 : local_auth,
186 0 : socket,
187 0 : auth_type,
188 0 : connection_ctx,
189 0 : ),
190 0 : );
191 : }
192 0 : Err(err) => {
193 0 : // accept() failed. Log the error, and loop back to retry on next connection.
194 0 : error!("accept() failed: {:?}", err);
195 : }
196 : }
197 : }
198 :
199 0 : debug!("page_service loop terminated");
200 :
201 0 : Ok(())
202 0 : }
203 :
204 0 : #[instrument(skip_all, fields(peer_addr))]
205 : async fn page_service_conn_main(
206 : conf: &'static PageServerConf,
207 : broker_client: storage_broker::BrokerClientChannel,
208 : auth: Option<Arc<SwappableJwtAuth>>,
209 : socket: tokio::net::TcpStream,
210 : auth_type: AuthType,
211 : connection_ctx: RequestContext,
212 : ) -> anyhow::Result<()> {
213 : // Immediately increment the gauge, then create a job to decrement it on task exit.
214 : // One of the pros of `defer!` is that this will *most probably*
215 : // get called, even in presence of panics.
216 : let gauge = LIVE_CONNECTIONS_COUNT.with_label_values(&["page_service"]);
217 : gauge.inc();
218 : scopeguard::defer! {
219 : gauge.dec();
220 : }
221 :
222 : socket
223 : .set_nodelay(true)
224 : .context("could not set TCP_NODELAY")?;
225 :
226 : let peer_addr = socket.peer_addr().context("get peer address")?;
227 : tracing::Span::current().record("peer_addr", field::display(peer_addr));
228 :
229 : // setup read timeout of 10 minutes. the timeout is rather arbitrary for requirements:
230 : // - long enough for most valid compute connections
231 : // - less than infinite to stop us from "leaking" connections to long-gone computes
232 : //
233 : // no write timeout is used, because the kernel is assumed to error writes after some time.
234 : let mut socket = tokio_io_timeout::TimeoutReader::new(socket);
235 :
236 : let default_timeout_ms = 10 * 60 * 1000; // 10 minutes by default
237 0 : let socket_timeout_ms = (|| {
238 0 : fail::fail_point!("simulated-bad-compute-connection", |avg_timeout_ms| {
239 : // Exponential distribution for simulating
240 : // poor network conditions, expect about avg_timeout_ms to be around 15
241 : // in tests
242 0 : if let Some(avg_timeout_ms) = avg_timeout_ms {
243 0 : let avg = avg_timeout_ms.parse::<i64>().unwrap() as f32;
244 0 : let u = rand::random::<f32>();
245 0 : ((1.0 - u).ln() / (-avg)) as u64
246 : } else {
247 0 : default_timeout_ms
248 : }
249 0 : });
250 0 : default_timeout_ms
251 : })();
252 :
253 : // A timeout here does not mean the client died, it can happen if it's just idle for
254 : // a while: we will tear down this PageServerHandler and instantiate a new one if/when
255 : // they reconnect.
256 : socket.set_timeout(Some(std::time::Duration::from_millis(socket_timeout_ms)));
257 : let socket = std::pin::pin!(socket);
258 :
259 : // XXX: pgbackend.run() should take the connection_ctx,
260 : // and create a child per-query context when it invokes process_query.
261 : // But it's in a shared crate, so, we store connection_ctx inside PageServerHandler
262 : // and create the per-query context in process_query ourselves.
263 : let mut conn_handler = PageServerHandler::new(conf, broker_client, auth, connection_ctx);
264 : let pgbackend = PostgresBackend::new_from_io(socket, peer_addr, auth_type, None)?;
265 :
266 : match pgbackend
267 : .run(&mut conn_handler, task_mgr::shutdown_watcher)
268 : .await
269 : {
270 : Ok(()) => {
271 : // we've been requested to shut down
272 : Ok(())
273 : }
274 : Err(QueryError::Disconnected(ConnectionError::Io(io_error))) => {
275 : if is_expected_io_error(&io_error) {
276 : info!("Postgres client disconnected ({io_error})");
277 : Ok(())
278 : } else {
279 : Err(io_error).context("Postgres connection error")
280 : }
281 : }
282 : other => other.context("Postgres query error"),
283 : }
284 : }
285 :
286 : /// While a handler holds a reference to a Timeline, it also holds a the
287 : /// timeline's Gate open.
288 : struct HandlerTimeline {
289 : timeline: Arc<Timeline>,
290 : _guard: GateGuard,
291 : }
292 :
293 : struct PageServerHandler {
294 : _conf: &'static PageServerConf,
295 : broker_client: storage_broker::BrokerClientChannel,
296 : auth: Option<Arc<SwappableJwtAuth>>,
297 : claims: Option<Claims>,
298 :
299 : /// The context created for the lifetime of the connection
300 : /// services by this PageServerHandler.
301 : /// For each query received over the connection,
302 : /// `process_query` creates a child context from this one.
303 : connection_ctx: RequestContext,
304 :
305 : /// See [`Self::cache_timeline`] for usage.
306 : ///
307 : /// Note on size: the typical size of this map is 1. The largest size we expect
308 : /// to see is the number of shards divided by the number of pageservers (typically < 2),
309 : /// or the ratio used when splitting shards (i.e. how many children created from one)
310 : /// parent shard, where a "large" number might be ~8.
311 : shard_timelines: HashMap<ShardIndex, HandlerTimeline>,
312 : }
313 :
314 0 : #[derive(thiserror::Error, Debug)]
315 : enum PageStreamError {
316 : /// We encountered an error that should prompt the client to reconnect:
317 : /// in practice this means we drop the connection without sending a response.
318 : #[error("Reconnect required: {0}")]
319 : Reconnect(Cow<'static, str>),
320 :
321 : /// We were instructed to shutdown while processing the query
322 : #[error("Shutting down")]
323 : Shutdown,
324 :
325 : /// Something went wrong reading a page: this likely indicates a pageserver bug
326 : #[error("Read error")]
327 : Read(#[source] PageReconstructError),
328 :
329 : /// Ran out of time waiting for an LSN
330 : #[error("LSN timeout: {0}")]
331 : LsnTimeout(WaitLsnError),
332 :
333 : /// The entity required to serve the request (tenant or timeline) is not found,
334 : /// or is not found in a suitable state to serve a request.
335 : #[error("Not found: {0}")]
336 : NotFound(Cow<'static, str>),
337 :
338 : /// Request asked for something that doesn't make sense, like an invalid LSN
339 : #[error("Bad request: {0}")]
340 : BadRequest(Cow<'static, str>),
341 : }
342 :
343 : impl From<PageReconstructError> for PageStreamError {
344 0 : fn from(value: PageReconstructError) -> Self {
345 0 : match value {
346 0 : PageReconstructError::Cancelled => Self::Shutdown,
347 0 : e => Self::Read(e),
348 : }
349 0 : }
350 : }
351 :
352 : impl From<GetActiveTimelineError> for PageStreamError {
353 0 : fn from(value: GetActiveTimelineError) -> Self {
354 0 : match value {
355 0 : GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled) => Self::Shutdown,
356 0 : GetActiveTimelineError::Tenant(e) => Self::NotFound(format!("{e}").into()),
357 0 : GetActiveTimelineError::Timeline(e) => Self::NotFound(format!("{e}").into()),
358 : }
359 0 : }
360 : }
361 :
362 : impl From<WaitLsnError> for PageStreamError {
363 0 : fn from(value: WaitLsnError) -> Self {
364 0 : match value {
365 0 : e @ WaitLsnError::Timeout(_) => Self::LsnTimeout(e),
366 0 : WaitLsnError::Shutdown => Self::Shutdown,
367 0 : WaitLsnError::BadState => Self::Reconnect("Timeline is not active".into()),
368 : }
369 0 : }
370 : }
371 :
372 : impl From<WaitLsnError> for QueryError {
373 0 : fn from(value: WaitLsnError) -> Self {
374 0 : match value {
375 0 : e @ WaitLsnError::Timeout(_) => Self::Other(anyhow::Error::new(e)),
376 0 : WaitLsnError::Shutdown => Self::Shutdown,
377 0 : WaitLsnError::BadState => Self::Reconnect,
378 : }
379 0 : }
380 : }
381 :
382 : impl PageServerHandler {
383 0 : pub fn new(
384 0 : conf: &'static PageServerConf,
385 0 : broker_client: storage_broker::BrokerClientChannel,
386 0 : auth: Option<Arc<SwappableJwtAuth>>,
387 0 : connection_ctx: RequestContext,
388 0 : ) -> Self {
389 0 : PageServerHandler {
390 0 : _conf: conf,
391 0 : broker_client,
392 0 : auth,
393 0 : claims: None,
394 0 : connection_ctx,
395 0 : shard_timelines: HashMap::new(),
396 0 : }
397 0 : }
398 :
399 : /// Future that completes when we need to shut down the connection.
400 : ///
401 : /// We currently need to shut down when any of the following happens:
402 : /// 1. any of the timelines we hold GateGuards for in `shard_timelines` is cancelled
403 : /// 2. task_mgr requests shutdown of the connection
404 : ///
405 : /// NB on (1): the connection's lifecycle is not actually tied to any of the
406 : /// `shard_timelines`s' lifecycles. But it's _necessary_ in the current
407 : /// implementation to be responsive to timeline cancellation because
408 : /// the connection holds their `GateGuards` open (sored in `shard_timelines`).
409 : /// We currently do the easy thing and terminate the connection if any of the
410 : /// shard_timelines gets cancelled. But really, we cuold spend more effort
411 : /// and simply remove the cancelled timeline from the `shard_timelines`, thereby
412 : /// dropping the guard.
413 : ///
414 : /// NB: keep in sync with [`Self::is_connection_cancelled`]
415 0 : async fn await_connection_cancelled(&self) {
416 0 : // A short wait before we expend the cycles to walk our timeline map. This avoids incurring
417 0 : // that cost every time we check for cancellation.
418 0 : tokio::time::sleep(Duration::from_millis(10)).await;
419 :
420 : // This function is never called concurrently with code that adds timelines to shard_timelines,
421 : // which is enforced by the borrow checker (the future returned by this function carries the
422 : // immutable &self). So it's fine to evaluate shard_timelines after the sleep, we don't risk
423 : // missing any inserts to the map.
424 :
425 0 : let mut cancellation_sources = Vec::with_capacity(1 + self.shard_timelines.len());
426 0 : use futures::future::Either;
427 0 : cancellation_sources.push(Either::Left(task_mgr::shutdown_watcher()));
428 0 : cancellation_sources.extend(
429 0 : self.shard_timelines
430 0 : .values()
431 0 : .map(|ht| Either::Right(ht.timeline.cancel.cancelled())),
432 0 : );
433 0 : FuturesUnordered::from_iter(cancellation_sources)
434 0 : .next()
435 0 : .await;
436 0 : }
437 :
438 : /// Checking variant of [`Self::await_connection_cancelled`].
439 0 : fn is_connection_cancelled(&self) -> bool {
440 0 : task_mgr::is_shutdown_requested()
441 0 : || self
442 0 : .shard_timelines
443 0 : .values()
444 0 : .any(|ht| ht.timeline.cancel.is_cancelled() || ht.timeline.is_stopping())
445 0 : }
446 :
447 : /// This function always respects cancellation of any timeline in `[Self::shard_timelines]`. Pass in
448 : /// a cancellation token at the next scope up (such as a tenant cancellation token) to ensure we respect
449 : /// cancellation if there aren't any timelines in the cache.
450 : ///
451 : /// If calling from a function that doesn't use the `[Self::shard_timelines]` cache, then pass in the
452 : /// timeline cancellation token.
453 0 : async fn flush_cancellable<IO>(
454 0 : &self,
455 0 : pgb: &mut PostgresBackend<IO>,
456 0 : cancel: &CancellationToken,
457 0 : ) -> Result<(), QueryError>
458 0 : where
459 0 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
460 0 : {
461 : tokio::select!(
462 : flush_r = pgb.flush() => {
463 : Ok(flush_r?)
464 : },
465 : _ = self.await_connection_cancelled() => {
466 : Err(QueryError::Shutdown)
467 : }
468 : _ = cancel.cancelled() => {
469 : Err(QueryError::Shutdown)
470 : }
471 : )
472 0 : }
473 :
474 0 : fn copyin_stream<'a, IO>(
475 0 : &'a self,
476 0 : pgb: &'a mut PostgresBackend<IO>,
477 0 : cancel: &'a CancellationToken,
478 0 : ) -> impl Stream<Item = io::Result<Bytes>> + 'a
479 0 : where
480 0 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
481 0 : {
482 : async_stream::try_stream! {
483 : loop {
484 : let msg = tokio::select! {
485 : biased;
486 :
487 : _ = cancel.cancelled() => {
488 : // We were requested to shut down.
489 : let msg = "pageserver is shutting down";
490 : let _ = pgb.write_message_noflush(&BeMessage::ErrorResponse(msg, None));
491 : Err(QueryError::Shutdown)
492 : }
493 :
494 : msg = pgb.read_message() => { msg.map_err(QueryError::from)}
495 : };
496 :
497 : match msg {
498 : Ok(Some(message)) => {
499 : let copy_data_bytes = match message {
500 : FeMessage::CopyData(bytes) => bytes,
501 : FeMessage::CopyDone => { break },
502 : FeMessage::Sync => continue,
503 : FeMessage::Terminate => {
504 : let msg = "client terminated connection with Terminate message during COPY";
505 : let query_error = QueryError::Disconnected(ConnectionError::Io(io::Error::new(io::ErrorKind::ConnectionReset, msg)));
506 : // error can't happen here, ErrorResponse serialization should be always ok
507 0 : pgb.write_message_noflush(&BeMessage::ErrorResponse(msg, Some(query_error.pg_error_code()))).map_err(|e| e.into_io_error())?;
508 : Err(io::Error::new(io::ErrorKind::ConnectionReset, msg))?;
509 : break;
510 : }
511 : m => {
512 : let msg = format!("unexpected message {m:?}");
513 : // error can't happen here, ErrorResponse serialization should be always ok
514 0 : pgb.write_message_noflush(&BeMessage::ErrorResponse(&msg, None)).map_err(|e| e.into_io_error())?;
515 : Err(io::Error::new(io::ErrorKind::Other, msg))?;
516 : break;
517 : }
518 : };
519 :
520 : yield copy_data_bytes;
521 : }
522 : Ok(None) => {
523 : let msg = "client closed connection during COPY";
524 : let query_error = QueryError::Disconnected(ConnectionError::Io(io::Error::new(io::ErrorKind::ConnectionReset, msg)));
525 : // error can't happen here, ErrorResponse serialization should be always ok
526 0 : pgb.write_message_noflush(&BeMessage::ErrorResponse(msg, Some(query_error.pg_error_code()))).map_err(|e| e.into_io_error())?;
527 0 : self.flush_cancellable(pgb, cancel).await.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))?;
528 : Err(io::Error::new(io::ErrorKind::ConnectionReset, msg))?;
529 : }
530 : Err(QueryError::Disconnected(ConnectionError::Io(io_error))) => {
531 : Err(io_error)?;
532 : }
533 : Err(other) => {
534 : Err(io::Error::new(io::ErrorKind::Other, other.to_string()))?;
535 : }
536 : };
537 : }
538 : }
539 0 : }
540 :
541 0 : #[instrument(skip_all)]
542 : async fn handle_pagerequests<IO>(
543 : &mut self,
544 : pgb: &mut PostgresBackend<IO>,
545 : tenant_id: TenantId,
546 : timeline_id: TimelineId,
547 : protocol_version: PagestreamProtocolVersion,
548 : ctx: RequestContext,
549 : ) -> Result<(), QueryError>
550 : where
551 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
552 : {
553 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
554 :
555 : let tenant = mgr::get_active_tenant_with_timeout(
556 : tenant_id,
557 : ShardSelector::First,
558 : ACTIVE_TENANT_TIMEOUT,
559 : &task_mgr::shutdown_token(),
560 : )
561 : .await?;
562 :
563 : // Make request tracer if needed
564 : let mut tracer = if tenant.get_trace_read_requests() {
565 : let connection_id = ConnectionId::generate();
566 : let path =
567 : tenant
568 : .conf
569 : .trace_path(&tenant.tenant_shard_id(), &timeline_id, &connection_id);
570 : Some(Tracer::new(path))
571 : } else {
572 : None
573 : };
574 :
575 : // switch client to COPYBOTH
576 : pgb.write_message_noflush(&BeMessage::CopyBothResponse)?;
577 : self.flush_cancellable(pgb, &tenant.cancel).await?;
578 :
579 : loop {
580 : let msg = tokio::select! {
581 : biased;
582 :
583 : _ = self.await_connection_cancelled() => {
584 : // We were requested to shut down.
585 : info!("shutdown request received in page handler");
586 : return Err(QueryError::Shutdown)
587 : }
588 :
589 : msg = pgb.read_message() => { msg }
590 : };
591 :
592 : let copy_data_bytes = match msg? {
593 : Some(FeMessage::CopyData(bytes)) => bytes,
594 : Some(FeMessage::Terminate) => break,
595 : Some(m) => {
596 : return Err(QueryError::Other(anyhow::anyhow!(
597 : "unexpected message: {m:?} during COPY"
598 : )));
599 : }
600 : None => break, // client disconnected
601 : };
602 :
603 : trace!("query: {copy_data_bytes:?}");
604 :
605 : // Trace request if needed
606 : if let Some(t) = tracer.as_mut() {
607 : t.trace(©_data_bytes)
608 : }
609 :
610 : let neon_fe_msg =
611 : PagestreamFeMessage::parse(&mut copy_data_bytes.reader(), protocol_version)?;
612 :
613 : // TODO: We could create a new per-request context here, with unique ID.
614 : // Currently we use the same per-timeline context for all requests
615 :
616 : let (response, span) = match neon_fe_msg {
617 : PagestreamFeMessage::Exists(req) => {
618 : let span = tracing::info_span!("handle_get_rel_exists_request", rel = %req.rel, req_lsn = %req.request_lsn);
619 : (
620 : self.handle_get_rel_exists_request(tenant_id, timeline_id, &req, &ctx)
621 : .instrument(span.clone())
622 : .await,
623 : span,
624 : )
625 : }
626 : PagestreamFeMessage::Nblocks(req) => {
627 : let span = tracing::info_span!("handle_get_nblocks_request", rel = %req.rel, req_lsn = %req.request_lsn);
628 : (
629 : self.handle_get_nblocks_request(tenant_id, timeline_id, &req, &ctx)
630 : .instrument(span.clone())
631 : .await,
632 : span,
633 : )
634 : }
635 : PagestreamFeMessage::GetPage(req) => {
636 : // shard_id is filled in by the handler
637 : let span = tracing::info_span!("handle_get_page_at_lsn_request", rel = %req.rel, blkno = %req.blkno, req_lsn = %req.request_lsn);
638 : (
639 : self.handle_get_page_at_lsn_request(tenant_id, timeline_id, &req, &ctx)
640 : .instrument(span.clone())
641 : .await,
642 : span,
643 : )
644 : }
645 : PagestreamFeMessage::DbSize(req) => {
646 : let span = tracing::info_span!("handle_db_size_request", dbnode = %req.dbnode, req_lsn = %req.request_lsn);
647 : (
648 : self.handle_db_size_request(tenant_id, timeline_id, &req, &ctx)
649 : .instrument(span.clone())
650 : .await,
651 : span,
652 : )
653 : }
654 : PagestreamFeMessage::GetSlruSegment(req) => {
655 : let span = tracing::info_span!("handle_get_slru_segment_request", kind = %req.kind, segno = %req.segno, req_lsn = %req.request_lsn);
656 : (
657 : self.handle_get_slru_segment_request(tenant_id, timeline_id, &req, &ctx)
658 : .instrument(span.clone())
659 : .await,
660 : span,
661 : )
662 : }
663 : };
664 :
665 : match response {
666 : Err(PageStreamError::Shutdown) => {
667 : // If we fail to fulfil a request during shutdown, which may be _because_ of
668 : // shutdown, then do not send the error to the client. Instead just drop the
669 : // connection.
670 0 : span.in_scope(|| info!("dropping connection due to shutdown"));
671 : return Err(QueryError::Shutdown);
672 : }
673 : Err(PageStreamError::Reconnect(reason)) => {
674 0 : span.in_scope(|| info!("handler requested reconnect: {reason}"));
675 : return Err(QueryError::Reconnect);
676 : }
677 : Err(e) if self.is_connection_cancelled() => {
678 : // This branch accomodates code within request handlers that returns an anyhow::Error instead of a clean
679 : // shutdown error, this may be buried inside a PageReconstructError::Other for example.
680 : //
681 : // Requests may fail as soon as we are Stopping, even if the Timeline's cancellation token wasn't fired yet,
682 : // because wait_lsn etc will drop out
683 : // is_stopping(): [`Timeline::flush_and_shutdown`] has entered
684 : // is_canceled(): [`Timeline::shutdown`]` has entered
685 0 : span.in_scope(|| info!("dropped error response during shutdown: {e:#}"));
686 : return Err(QueryError::Shutdown);
687 : }
688 : r => {
689 0 : let response_msg = r.unwrap_or_else(|e| {
690 0 : // print the all details to the log with {:#}, but for the client the
691 0 : // error message is enough. Do not log if shutting down, as the anyhow::Error
692 0 : // here includes cancellation which is not an error.
693 0 : let full = utils::error::report_compact_sources(&e);
694 0 : span.in_scope(|| {
695 0 : error!("error reading relation or page version: {full:#}")
696 0 : });
697 0 : PagestreamBeMessage::Error(PagestreamErrorResponse {
698 0 : message: e.to_string(),
699 0 : })
700 0 : });
701 :
702 : pgb.write_message_noflush(&BeMessage::CopyData(&response_msg.serialize()))?;
703 : self.flush_cancellable(pgb, &tenant.cancel).await?;
704 : }
705 : }
706 : }
707 : Ok(())
708 : }
709 :
710 : #[allow(clippy::too_many_arguments)]
711 0 : #[instrument(skip_all, fields(%base_lsn, end_lsn=%_end_lsn, %pg_version))]
712 : async fn handle_import_basebackup<IO>(
713 : &self,
714 : pgb: &mut PostgresBackend<IO>,
715 : tenant_id: TenantId,
716 : timeline_id: TimelineId,
717 : base_lsn: Lsn,
718 : _end_lsn: Lsn,
719 : pg_version: u32,
720 : ctx: RequestContext,
721 : ) -> Result<(), QueryError>
722 : where
723 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
724 : {
725 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
726 :
727 : // Create empty timeline
728 : info!("creating new timeline");
729 : let tenant = get_active_tenant_with_timeout(
730 : tenant_id,
731 : ShardSelector::Zero,
732 : ACTIVE_TENANT_TIMEOUT,
733 : &task_mgr::shutdown_token(),
734 : )
735 : .await?;
736 : let timeline = tenant
737 : .create_empty_timeline(timeline_id, base_lsn, pg_version, &ctx)
738 : .await?;
739 :
740 : // TODO mark timeline as not ready until it reaches end_lsn.
741 : // We might have some wal to import as well, and we should prevent compute
742 : // from connecting before that and writing conflicting wal.
743 : //
744 : // This is not relevant for pageserver->pageserver migrations, since there's
745 : // no wal to import. But should be fixed if we want to import from postgres.
746 :
747 : // TODO leave clean state on error. For now you can use detach to clean
748 : // up broken state from a failed import.
749 :
750 : // Import basebackup provided via CopyData
751 : info!("importing basebackup");
752 : pgb.write_message_noflush(&BeMessage::CopyInResponse)?;
753 : self.flush_cancellable(pgb, &tenant.cancel).await?;
754 :
755 : let mut copyin_reader = pin!(StreamReader::new(self.copyin_stream(pgb, &tenant.cancel)));
756 : timeline
757 : .import_basebackup_from_tar(
758 : tenant.clone(),
759 : &mut copyin_reader,
760 : base_lsn,
761 : self.broker_client.clone(),
762 : &ctx,
763 : )
764 : .await?;
765 :
766 : // Read the end of the tar archive.
767 : read_tar_eof(copyin_reader).await?;
768 :
769 : // TODO check checksum
770 : // Meanwhile you can verify client-side by taking fullbackup
771 : // and checking that it matches in size with what was imported.
772 : // It wouldn't work if base came from vanilla postgres though,
773 : // since we discard some log files.
774 :
775 : info!("done");
776 : Ok(())
777 : }
778 :
779 0 : #[instrument(skip_all, fields(shard_id, %start_lsn, %end_lsn))]
780 : async fn handle_import_wal<IO>(
781 : &self,
782 : pgb: &mut PostgresBackend<IO>,
783 : tenant_id: TenantId,
784 : timeline_id: TimelineId,
785 : start_lsn: Lsn,
786 : end_lsn: Lsn,
787 : ctx: RequestContext,
788 : ) -> Result<(), QueryError>
789 : where
790 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
791 : {
792 : let timeline = self
793 : .get_active_tenant_timeline(tenant_id, timeline_id, ShardSelector::Zero)
794 : .await?;
795 : let last_record_lsn = timeline.get_last_record_lsn();
796 : if last_record_lsn != start_lsn {
797 : return Err(QueryError::Other(
798 : anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}"))
799 : );
800 : }
801 :
802 : // TODO leave clean state on error. For now you can use detach to clean
803 : // up broken state from a failed import.
804 :
805 : // Import wal provided via CopyData
806 : info!("importing wal");
807 : pgb.write_message_noflush(&BeMessage::CopyInResponse)?;
808 : self.flush_cancellable(pgb, &timeline.cancel).await?;
809 : let mut copyin_reader = pin!(StreamReader::new(self.copyin_stream(pgb, &timeline.cancel)));
810 : import_wal_from_tar(&timeline, &mut copyin_reader, start_lsn, end_lsn, &ctx).await?;
811 : info!("wal import complete");
812 :
813 : // Read the end of the tar archive.
814 : read_tar_eof(copyin_reader).await?;
815 :
816 : // TODO Does it make sense to overshoot?
817 : if timeline.get_last_record_lsn() < end_lsn {
818 : return Err(QueryError::Other(
819 : anyhow::anyhow!("Cannot import WAL from Lsn {start_lsn} because timeline does not start from the same lsn: {last_record_lsn}"))
820 : );
821 : }
822 :
823 : // Flush data to disk, then upload to s3. No need for a forced checkpoint.
824 : // We only want to persist the data, and it doesn't matter if it's in the
825 : // shape of deltas or images.
826 : info!("flushing layers");
827 : timeline.freeze_and_flush().await?;
828 :
829 : info!("done");
830 : Ok(())
831 : }
832 :
833 : /// Helper function to handle the LSN from client request.
834 : ///
835 : /// Each GetPage (and Exists and Nblocks) request includes information about
836 : /// which version of the page is being requested. The primary compute node
837 : /// will always request the latest page version, by setting 'request_lsn' to
838 : /// the last inserted or flushed WAL position, while a standby will request
839 : /// a version at the LSN that it's currently caught up to.
840 : ///
841 : /// In either case, if the page server hasn't received the WAL up to the
842 : /// requested LSN yet, we will wait for it to arrive. The return value is
843 : /// the LSN that should be used to look up the page versions.
844 : ///
845 : /// In addition to the request LSN, each request carries another LSN,
846 : /// 'not_modified_since', which is a hint to the pageserver that the client
847 : /// knows that the page has not been modified between 'not_modified_since'
848 : /// and the request LSN. This allows skipping the wait, as long as the WAL
849 : /// up to 'not_modified_since' has arrived. If the client doesn't have any
850 : /// information about when the page was modified, it will use
851 : /// not_modified_since == lsn. If the client lies and sends a too low
852 : /// not_modified_hint such that there are in fact later page versions, the
853 : /// behavior is undefined: the pageserver may return any of the page versions
854 : /// or an error.
855 0 : async fn wait_or_get_last_lsn(
856 0 : timeline: &Timeline,
857 0 : request_lsn: Lsn,
858 0 : not_modified_since: Lsn,
859 0 : latest_gc_cutoff_lsn: &RcuReadGuard<Lsn>,
860 0 : ctx: &RequestContext,
861 0 : ) -> Result<Lsn, PageStreamError> {
862 0 : let last_record_lsn = timeline.get_last_record_lsn();
863 0 :
864 0 : // Sanity check the request
865 0 : if request_lsn < not_modified_since {
866 0 : return Err(PageStreamError::BadRequest(
867 0 : format!(
868 0 : "invalid request with request LSN {} and not_modified_since {}",
869 0 : request_lsn, not_modified_since,
870 0 : )
871 0 : .into(),
872 0 : ));
873 0 : }
874 0 :
875 0 : if request_lsn < **latest_gc_cutoff_lsn {
876 : // Check explicitly for INVALID just to get a less scary error message if the
877 : // request is obviously bogus
878 0 : return Err(if request_lsn == Lsn::INVALID {
879 0 : PageStreamError::BadRequest("invalid LSN(0) in request".into())
880 : } else {
881 0 : PageStreamError::BadRequest(format!(
882 0 : "tried to request a page version that was garbage collected. requested at {} gc cutoff {}",
883 0 : request_lsn, **latest_gc_cutoff_lsn
884 0 : ).into())
885 : });
886 0 : }
887 0 :
888 0 : // Wait for WAL up to 'not_modified_since' to arrive, if necessary
889 0 : if not_modified_since > last_record_lsn {
890 0 : timeline
891 0 : .wait_lsn(
892 0 : not_modified_since,
893 0 : crate::tenant::timeline::WaitLsnWaiter::PageService,
894 0 : ctx,
895 0 : )
896 0 : .await?;
897 : // Since we waited for 'not_modified_since' to arrive, that is now the last
898 : // record LSN. (Or close enough for our purposes; the last-record LSN can
899 : // advance immediately after we return anyway)
900 0 : Ok(not_modified_since)
901 : } else {
902 : // It might be better to use max(not_modified_since, latest_gc_cutoff_lsn)
903 : // here instead. That would give the same result, since we know that there
904 : // haven't been any modifications since 'not_modified_since'. Using an older
905 : // LSN might be faster, because that could allow skipping recent layers when
906 : // finding the page. However, we have historically used 'last_record_lsn', so
907 : // stick to that for now.
908 0 : Ok(std::cmp::min(last_record_lsn, request_lsn))
909 : }
910 0 : }
911 :
912 0 : #[instrument(skip_all, fields(shard_id))]
913 : async fn handle_get_rel_exists_request(
914 : &mut self,
915 : tenant_id: TenantId,
916 : timeline_id: TimelineId,
917 : req: &PagestreamExistsRequest,
918 : ctx: &RequestContext,
919 : ) -> Result<PagestreamBeMessage, PageStreamError> {
920 : let timeline = self.get_timeline_shard_zero(tenant_id, timeline_id).await?;
921 : let _timer = timeline
922 : .query_metrics
923 : .start_timer(metrics::SmgrQueryType::GetRelExists, ctx);
924 :
925 : let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
926 : let lsn = Self::wait_or_get_last_lsn(
927 : timeline,
928 : req.request_lsn,
929 : req.not_modified_since,
930 : &latest_gc_cutoff_lsn,
931 : ctx,
932 : )
933 : .await?;
934 :
935 : let exists = timeline
936 : .get_rel_exists(req.rel, Version::Lsn(lsn), ctx)
937 : .await?;
938 :
939 : Ok(PagestreamBeMessage::Exists(PagestreamExistsResponse {
940 : exists,
941 : }))
942 : }
943 :
944 0 : #[instrument(skip_all, fields(shard_id))]
945 : async fn handle_get_nblocks_request(
946 : &mut self,
947 : tenant_id: TenantId,
948 : timeline_id: TimelineId,
949 : req: &PagestreamNblocksRequest,
950 : ctx: &RequestContext,
951 : ) -> Result<PagestreamBeMessage, PageStreamError> {
952 : let timeline = self.get_timeline_shard_zero(tenant_id, timeline_id).await?;
953 :
954 : let _timer = timeline
955 : .query_metrics
956 : .start_timer(metrics::SmgrQueryType::GetRelSize, ctx);
957 :
958 : let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
959 : let lsn = Self::wait_or_get_last_lsn(
960 : timeline,
961 : req.request_lsn,
962 : req.not_modified_since,
963 : &latest_gc_cutoff_lsn,
964 : ctx,
965 : )
966 : .await?;
967 :
968 : let n_blocks = timeline
969 : .get_rel_size(req.rel, Version::Lsn(lsn), ctx)
970 : .await?;
971 :
972 : Ok(PagestreamBeMessage::Nblocks(PagestreamNblocksResponse {
973 : n_blocks,
974 : }))
975 : }
976 :
977 0 : #[instrument(skip_all, fields(shard_id))]
978 : async fn handle_db_size_request(
979 : &mut self,
980 : tenant_id: TenantId,
981 : timeline_id: TimelineId,
982 : req: &PagestreamDbSizeRequest,
983 : ctx: &RequestContext,
984 : ) -> Result<PagestreamBeMessage, PageStreamError> {
985 : let timeline = self.get_timeline_shard_zero(tenant_id, timeline_id).await?;
986 :
987 : let _timer = timeline
988 : .query_metrics
989 : .start_timer(metrics::SmgrQueryType::GetDbSize, ctx);
990 :
991 : let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
992 : let lsn = Self::wait_or_get_last_lsn(
993 : timeline,
994 : req.request_lsn,
995 : req.not_modified_since,
996 : &latest_gc_cutoff_lsn,
997 : ctx,
998 : )
999 : .await?;
1000 :
1001 : let total_blocks = timeline
1002 : .get_db_size(DEFAULTTABLESPACE_OID, req.dbnode, Version::Lsn(lsn), ctx)
1003 : .await?;
1004 : let db_size = total_blocks as i64 * BLCKSZ as i64;
1005 :
1006 : Ok(PagestreamBeMessage::DbSize(PagestreamDbSizeResponse {
1007 : db_size,
1008 : }))
1009 : }
1010 :
1011 : /// For most getpage requests, we will already have a Timeline to serve the request: this function
1012 : /// looks up such a Timeline synchronously and without touching any global state.
1013 0 : fn get_cached_timeline_for_page(
1014 0 : &mut self,
1015 0 : req: &PagestreamGetPageRequest,
1016 0 : ) -> Result<&Arc<Timeline>, Key> {
1017 0 : let key = if let Some((first_idx, first_timeline)) = self.shard_timelines.iter().next() {
1018 : // Fastest path: single sharded case
1019 0 : if first_idx.shard_count.count() == 1 {
1020 0 : return Ok(&first_timeline.timeline);
1021 0 : }
1022 0 :
1023 0 : let key = rel_block_to_key(req.rel, req.blkno);
1024 0 : let shard_num = first_timeline
1025 0 : .timeline
1026 0 : .get_shard_identity()
1027 0 : .get_shard_number(&key);
1028 0 :
1029 0 : // Fast path: matched the first timeline in our local handler map. This case is common if
1030 0 : // only one shard per tenant is attached to this pageserver.
1031 0 : if first_timeline.timeline.get_shard_identity().number == shard_num {
1032 0 : return Ok(&first_timeline.timeline);
1033 0 : }
1034 0 :
1035 0 : let shard_index = ShardIndex {
1036 0 : shard_number: shard_num,
1037 0 : shard_count: first_timeline.timeline.get_shard_identity().count,
1038 0 : };
1039 :
1040 : // Fast-ish path: timeline is in the connection handler's local cache
1041 0 : if let Some(found) = self.shard_timelines.get(&shard_index) {
1042 0 : return Ok(&found.timeline);
1043 0 : }
1044 0 :
1045 0 : key
1046 : } else {
1047 0 : rel_block_to_key(req.rel, req.blkno)
1048 : };
1049 :
1050 0 : Err(key)
1051 0 : }
1052 :
1053 : /// Having looked up the [`Timeline`] instance for a particular shard, cache it to enable
1054 : /// use in future requests without having to traverse [`crate::tenant::mgr::TenantManager`]
1055 : /// again.
1056 : ///
1057 : /// Note that all the Timelines in this cache are for the same timeline_id: they're differ
1058 : /// in which shard they belong to. When we serve a getpage@lsn request, we choose a shard
1059 : /// based on key.
1060 : ///
1061 : /// The typical size of this cache is 1, as we generally create shards to distribute work
1062 : /// across pageservers, so don't tend to have multiple shards for the same tenant on the
1063 : /// same pageserver.
1064 0 : fn cache_timeline(
1065 0 : &mut self,
1066 0 : timeline: Arc<Timeline>,
1067 0 : ) -> Result<&Arc<Timeline>, GetActiveTimelineError> {
1068 0 : let gate_guard = timeline
1069 0 : .gate
1070 0 : .enter()
1071 0 : .map_err(|_| GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled))?;
1072 :
1073 0 : let shard_index = timeline.tenant_shard_id.to_index();
1074 0 : let entry = self
1075 0 : .shard_timelines
1076 0 : .entry(shard_index)
1077 0 : .or_insert(HandlerTimeline {
1078 0 : timeline,
1079 0 : _guard: gate_guard,
1080 0 : });
1081 0 :
1082 0 : Ok(&entry.timeline)
1083 0 : }
1084 :
1085 : /// If [`Self::get_cached_timeline_for_page`] missed, then this function is used to populate the cache with
1086 : /// a Timeline to serve requests for this key, if such a Timeline is present on this pageserver. If no such
1087 : /// Timeline is found, then we will return an error (this indicates that the client is talking to the wrong node).
1088 0 : async fn load_timeline_for_page(
1089 0 : &mut self,
1090 0 : tenant_id: TenantId,
1091 0 : timeline_id: TimelineId,
1092 0 : key: Key,
1093 0 : ) -> anyhow::Result<&Arc<Timeline>, GetActiveTimelineError> {
1094 : // Slow path: we must call out to the TenantManager to find the timeline for this Key
1095 0 : let timeline = self
1096 0 : .get_active_tenant_timeline(tenant_id, timeline_id, ShardSelector::Page(key))
1097 0 : .await?;
1098 :
1099 0 : self.cache_timeline(timeline)
1100 0 : }
1101 :
1102 0 : async fn get_timeline_shard_zero(
1103 0 : &mut self,
1104 0 : tenant_id: TenantId,
1105 0 : timeline_id: TimelineId,
1106 0 : ) -> anyhow::Result<&Arc<Timeline>, GetActiveTimelineError> {
1107 : // This is a borrow-checker workaround: we can't return from inside of the `if let Some` because
1108 : // that would be an immutable-borrow-self return, whereas later in the function we will use a mutable
1109 : // ref to salf. So instead, we first build a bool, and then return while not borrowing self.
1110 0 : let have_cached = if let Some((idx, _tl)) = self.shard_timelines.iter().next() {
1111 0 : idx.shard_number == ShardNumber(0)
1112 : } else {
1113 0 : false
1114 : };
1115 :
1116 0 : if have_cached {
1117 0 : let entry = self.shard_timelines.iter().next().unwrap();
1118 0 : Ok(&entry.1.timeline)
1119 : } else {
1120 0 : let timeline = self
1121 0 : .get_active_tenant_timeline(tenant_id, timeline_id, ShardSelector::Zero)
1122 0 : .await?;
1123 0 : Ok(self.cache_timeline(timeline)?)
1124 : }
1125 0 : }
1126 :
1127 0 : #[instrument(skip_all, fields(shard_id))]
1128 : async fn handle_get_page_at_lsn_request(
1129 : &mut self,
1130 : tenant_id: TenantId,
1131 : timeline_id: TimelineId,
1132 : req: &PagestreamGetPageRequest,
1133 : ctx: &RequestContext,
1134 : ) -> Result<PagestreamBeMessage, PageStreamError> {
1135 : let timeline = match self.get_cached_timeline_for_page(req) {
1136 : Ok(tl) => {
1137 : set_tracing_field_shard_id(tl);
1138 : tl
1139 : }
1140 : Err(key) => {
1141 : match self
1142 : .load_timeline_for_page(tenant_id, timeline_id, key)
1143 : .await
1144 : {
1145 : Ok(t) => t,
1146 : Err(GetActiveTimelineError::Tenant(GetActiveTenantError::NotFound(_))) => {
1147 : // We already know this tenant exists in general, because we resolved it at
1148 : // start of connection. Getting a NotFound here indicates that the shard containing
1149 : // the requested page is not present on this node: the client's knowledge of shard->pageserver
1150 : // mapping is out of date.
1151 : //
1152 : // Closing the connection by returning ``::Reconnect` has the side effect of rate-limiting above message, via
1153 : // client's reconnect backoff, as well as hopefully prompting the client to load its updated configuration
1154 : // and talk to a different pageserver.
1155 : return Err(PageStreamError::Reconnect(
1156 : "getpage@lsn request routed to wrong shard".into(),
1157 : ));
1158 : }
1159 : Err(e) => return Err(e.into()),
1160 : }
1161 : }
1162 : };
1163 :
1164 : let _timer = timeline
1165 : .query_metrics
1166 : .start_timer(metrics::SmgrQueryType::GetPageAtLsn, ctx);
1167 :
1168 : let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
1169 : let lsn = Self::wait_or_get_last_lsn(
1170 : timeline,
1171 : req.request_lsn,
1172 : req.not_modified_since,
1173 : &latest_gc_cutoff_lsn,
1174 : ctx,
1175 : )
1176 : .await?;
1177 :
1178 : let page = timeline
1179 : .get_rel_page_at_lsn(req.rel, req.blkno, Version::Lsn(lsn), ctx)
1180 : .await?;
1181 :
1182 : Ok(PagestreamBeMessage::GetPage(PagestreamGetPageResponse {
1183 : page,
1184 : }))
1185 : }
1186 :
1187 0 : #[instrument(skip_all, fields(shard_id))]
1188 : async fn handle_get_slru_segment_request(
1189 : &mut self,
1190 : tenant_id: TenantId,
1191 : timeline_id: TimelineId,
1192 : req: &PagestreamGetSlruSegmentRequest,
1193 : ctx: &RequestContext,
1194 : ) -> Result<PagestreamBeMessage, PageStreamError> {
1195 : let timeline = self.get_timeline_shard_zero(tenant_id, timeline_id).await?;
1196 :
1197 : let _timer = timeline
1198 : .query_metrics
1199 : .start_timer(metrics::SmgrQueryType::GetSlruSegment, ctx);
1200 :
1201 : let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
1202 : let lsn = Self::wait_or_get_last_lsn(
1203 : timeline,
1204 : req.request_lsn,
1205 : req.not_modified_since,
1206 : &latest_gc_cutoff_lsn,
1207 : ctx,
1208 : )
1209 : .await?;
1210 :
1211 : let kind = SlruKind::from_repr(req.kind)
1212 : .ok_or(PageStreamError::BadRequest("invalid SLRU kind".into()))?;
1213 : let segment = timeline.get_slru_segment(kind, req.segno, lsn, ctx).await?;
1214 :
1215 : Ok(PagestreamBeMessage::GetSlruSegment(
1216 : PagestreamGetSlruSegmentResponse { segment },
1217 : ))
1218 : }
1219 :
1220 : /// Note on "fullbackup":
1221 : /// Full basebackups should only be used for debugging purposes.
1222 : /// Originally, it was introduced to enable breaking storage format changes,
1223 : /// but that is not applicable anymore.
1224 : #[allow(clippy::too_many_arguments)]
1225 0 : #[instrument(skip_all, fields(shard_id, ?lsn, ?prev_lsn, %full_backup))]
1226 : async fn handle_basebackup_request<IO>(
1227 : &mut self,
1228 : pgb: &mut PostgresBackend<IO>,
1229 : tenant_id: TenantId,
1230 : timeline_id: TimelineId,
1231 : lsn: Option<Lsn>,
1232 : prev_lsn: Option<Lsn>,
1233 : full_backup: bool,
1234 : gzip: bool,
1235 : ctx: &RequestContext,
1236 : ) -> Result<(), QueryError>
1237 : where
1238 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
1239 : {
1240 0 : fn map_basebackup_error(err: BasebackupError) -> QueryError {
1241 0 : match err {
1242 0 : BasebackupError::Client(e) => QueryError::Disconnected(ConnectionError::Io(e)),
1243 0 : BasebackupError::Server(e) => QueryError::Other(e),
1244 : }
1245 0 : }
1246 :
1247 : let started = std::time::Instant::now();
1248 :
1249 : // check that the timeline exists
1250 : let timeline = self
1251 : .get_active_tenant_timeline(tenant_id, timeline_id, ShardSelector::Zero)
1252 : .await?;
1253 : let latest_gc_cutoff_lsn = timeline.get_latest_gc_cutoff_lsn();
1254 : if let Some(lsn) = lsn {
1255 : // Backup was requested at a particular LSN. Wait for it to arrive.
1256 : info!("waiting for {}", lsn);
1257 : timeline
1258 : .wait_lsn(
1259 : lsn,
1260 : crate::tenant::timeline::WaitLsnWaiter::PageService,
1261 : ctx,
1262 : )
1263 : .await?;
1264 : timeline
1265 : .check_lsn_is_in_scope(lsn, &latest_gc_cutoff_lsn)
1266 : .context("invalid basebackup lsn")?;
1267 : }
1268 :
1269 : let lsn_awaited_after = started.elapsed();
1270 :
1271 : // switch client to COPYOUT
1272 : pgb.write_message_noflush(&BeMessage::CopyOutResponse)
1273 : .map_err(QueryError::Disconnected)?;
1274 : self.flush_cancellable(pgb, &timeline.cancel).await?;
1275 :
1276 : // Send a tarball of the latest layer on the timeline. Compress if not
1277 : // fullbackup. TODO Compress in that case too (tests need to be updated)
1278 : if full_backup {
1279 : let mut writer = pgb.copyout_writer();
1280 : basebackup::send_basebackup_tarball(
1281 : &mut writer,
1282 : &timeline,
1283 : lsn,
1284 : prev_lsn,
1285 : full_backup,
1286 : ctx,
1287 : )
1288 : .await
1289 : .map_err(map_basebackup_error)?;
1290 : } else {
1291 : let mut writer = pgb.copyout_writer();
1292 : if gzip {
1293 : let mut encoder = GzipEncoder::with_quality(
1294 : writer,
1295 : // NOTE using fast compression because it's on the critical path
1296 : // for compute startup. For an empty database, we get
1297 : // <100KB with this method. The Level::Best compression method
1298 : // gives us <20KB, but maybe we should add basebackup caching
1299 : // on compute shutdown first.
1300 : async_compression::Level::Fastest,
1301 : );
1302 : basebackup::send_basebackup_tarball(
1303 : &mut encoder,
1304 : &timeline,
1305 : lsn,
1306 : prev_lsn,
1307 : full_backup,
1308 : ctx,
1309 : )
1310 : .await
1311 : .map_err(map_basebackup_error)?;
1312 : // shutdown the encoder to ensure the gzip footer is written
1313 : encoder
1314 : .shutdown()
1315 : .await
1316 0 : .map_err(|e| QueryError::Disconnected(ConnectionError::Io(e)))?;
1317 : } else {
1318 : basebackup::send_basebackup_tarball(
1319 : &mut writer,
1320 : &timeline,
1321 : lsn,
1322 : prev_lsn,
1323 : full_backup,
1324 : ctx,
1325 : )
1326 : .await
1327 : .map_err(map_basebackup_error)?;
1328 : }
1329 : }
1330 :
1331 : pgb.write_message_noflush(&BeMessage::CopyDone)
1332 : .map_err(QueryError::Disconnected)?;
1333 : self.flush_cancellable(pgb, &timeline.cancel).await?;
1334 :
1335 : let basebackup_after = started
1336 : .elapsed()
1337 : .checked_sub(lsn_awaited_after)
1338 : .unwrap_or(Duration::ZERO);
1339 :
1340 : info!(
1341 : lsn_await_millis = lsn_awaited_after.as_millis(),
1342 : basebackup_millis = basebackup_after.as_millis(),
1343 : "basebackup complete"
1344 : );
1345 :
1346 : Ok(())
1347 : }
1348 :
1349 : // when accessing management api supply None as an argument
1350 : // when using to authorize tenant pass corresponding tenant id
1351 0 : fn check_permission(&self, tenant_id: Option<TenantId>) -> Result<(), QueryError> {
1352 0 : if self.auth.is_none() {
1353 : // auth is set to Trust, nothing to check so just return ok
1354 0 : return Ok(());
1355 0 : }
1356 0 : // auth is some, just checked above, when auth is some
1357 0 : // then claims are always present because of checks during connection init
1358 0 : // so this expect won't trigger
1359 0 : let claims = self
1360 0 : .claims
1361 0 : .as_ref()
1362 0 : .expect("claims presence already checked");
1363 0 : check_permission(claims, tenant_id).map_err(|e| QueryError::Unauthorized(e.0))
1364 0 : }
1365 :
1366 : /// Shorthand for getting a reference to a Timeline of an Active tenant.
1367 0 : async fn get_active_tenant_timeline(
1368 0 : &self,
1369 0 : tenant_id: TenantId,
1370 0 : timeline_id: TimelineId,
1371 0 : selector: ShardSelector,
1372 0 : ) -> Result<Arc<Timeline>, GetActiveTimelineError> {
1373 0 : let tenant = get_active_tenant_with_timeout(
1374 0 : tenant_id,
1375 0 : selector,
1376 0 : ACTIVE_TENANT_TIMEOUT,
1377 0 : &task_mgr::shutdown_token(),
1378 0 : )
1379 0 : .await
1380 0 : .map_err(GetActiveTimelineError::Tenant)?;
1381 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
1382 0 : set_tracing_field_shard_id(&timeline);
1383 0 : Ok(timeline)
1384 0 : }
1385 : }
1386 :
1387 : #[async_trait::async_trait]
1388 : impl<IO> postgres_backend::Handler<IO> for PageServerHandler
1389 : where
1390 : IO: AsyncRead + AsyncWrite + Send + Sync + Unpin,
1391 : {
1392 0 : fn check_auth_jwt(
1393 0 : &mut self,
1394 0 : _pgb: &mut PostgresBackend<IO>,
1395 0 : jwt_response: &[u8],
1396 0 : ) -> Result<(), QueryError> {
1397 : // this unwrap is never triggered, because check_auth_jwt only called when auth_type is NeonJWT
1398 : // which requires auth to be present
1399 0 : let data = self
1400 0 : .auth
1401 0 : .as_ref()
1402 0 : .unwrap()
1403 0 : .decode(str::from_utf8(jwt_response).context("jwt response is not UTF-8")?)
1404 0 : .map_err(|e| QueryError::Unauthorized(e.0))?;
1405 :
1406 0 : if matches!(data.claims.scope, Scope::Tenant) && data.claims.tenant_id.is_none() {
1407 0 : return Err(QueryError::Unauthorized(
1408 0 : "jwt token scope is Tenant, but tenant id is missing".into(),
1409 0 : ));
1410 0 : }
1411 0 :
1412 0 : debug!(
1413 0 : "jwt scope check succeeded for scope: {:#?} by tenant id: {:?}",
1414 : data.claims.scope, data.claims.tenant_id,
1415 : );
1416 :
1417 0 : self.claims = Some(data.claims);
1418 0 : Ok(())
1419 0 : }
1420 :
1421 0 : fn startup(
1422 0 : &mut self,
1423 0 : _pgb: &mut PostgresBackend<IO>,
1424 0 : _sm: &FeStartupPacket,
1425 0 : ) -> Result<(), QueryError> {
1426 0 : Ok(())
1427 0 : }
1428 :
1429 0 : #[instrument(skip_all, fields(tenant_id, timeline_id))]
1430 : async fn process_query(
1431 : &mut self,
1432 : pgb: &mut PostgresBackend<IO>,
1433 : query_string: &str,
1434 0 : ) -> Result<(), QueryError> {
1435 0 : fail::fail_point!("simulated-bad-compute-connection", |_| {
1436 0 : info!("Hit failpoint for bad connection");
1437 0 : Err(QueryError::SimulatedConnectionError)
1438 0 : });
1439 :
1440 0 : let ctx = self.connection_ctx.attached_child();
1441 0 : debug!("process query {query_string:?}");
1442 0 : if query_string.starts_with("pagestream_v2 ") {
1443 0 : let (_, params_raw) = query_string.split_at("pagestream_v2 ".len());
1444 0 : let params = params_raw.split(' ').collect::<Vec<_>>();
1445 0 : if params.len() != 2 {
1446 0 : return Err(QueryError::Other(anyhow::anyhow!(
1447 0 : "invalid param number for pagestream command"
1448 0 : )));
1449 0 : }
1450 0 : let tenant_id = TenantId::from_str(params[0])
1451 0 : .with_context(|| format!("Failed to parse tenant id from {}", params[0]))?;
1452 0 : let timeline_id = TimelineId::from_str(params[1])
1453 0 : .with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
1454 :
1455 0 : tracing::Span::current()
1456 0 : .record("tenant_id", field::display(tenant_id))
1457 0 : .record("timeline_id", field::display(timeline_id));
1458 0 :
1459 0 : self.check_permission(Some(tenant_id))?;
1460 :
1461 0 : self.handle_pagerequests(
1462 0 : pgb,
1463 0 : tenant_id,
1464 0 : timeline_id,
1465 0 : PagestreamProtocolVersion::V2,
1466 0 : ctx,
1467 0 : )
1468 0 : .await?;
1469 0 : } else if query_string.starts_with("pagestream ") {
1470 0 : let (_, params_raw) = query_string.split_at("pagestream ".len());
1471 0 : let params = params_raw.split(' ').collect::<Vec<_>>();
1472 0 : if params.len() != 2 {
1473 0 : return Err(QueryError::Other(anyhow::anyhow!(
1474 0 : "invalid param number for pagestream command"
1475 0 : )));
1476 0 : }
1477 0 : let tenant_id = TenantId::from_str(params[0])
1478 0 : .with_context(|| format!("Failed to parse tenant id from {}", params[0]))?;
1479 0 : let timeline_id = TimelineId::from_str(params[1])
1480 0 : .with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
1481 :
1482 0 : tracing::Span::current()
1483 0 : .record("tenant_id", field::display(tenant_id))
1484 0 : .record("timeline_id", field::display(timeline_id));
1485 0 :
1486 0 : self.check_permission(Some(tenant_id))?;
1487 :
1488 0 : self.handle_pagerequests(
1489 0 : pgb,
1490 0 : tenant_id,
1491 0 : timeline_id,
1492 0 : PagestreamProtocolVersion::V1,
1493 0 : ctx,
1494 0 : )
1495 0 : .await?;
1496 0 : } else if query_string.starts_with("basebackup ") {
1497 0 : let (_, params_raw) = query_string.split_at("basebackup ".len());
1498 0 : let params = params_raw.split_whitespace().collect::<Vec<_>>();
1499 0 :
1500 0 : if params.len() < 2 {
1501 0 : return Err(QueryError::Other(anyhow::anyhow!(
1502 0 : "invalid param number for basebackup command"
1503 0 : )));
1504 0 : }
1505 :
1506 0 : let tenant_id = TenantId::from_str(params[0])
1507 0 : .with_context(|| format!("Failed to parse tenant id from {}", params[0]))?;
1508 0 : let timeline_id = TimelineId::from_str(params[1])
1509 0 : .with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
1510 :
1511 0 : tracing::Span::current()
1512 0 : .record("tenant_id", field::display(tenant_id))
1513 0 : .record("timeline_id", field::display(timeline_id));
1514 0 :
1515 0 : self.check_permission(Some(tenant_id))?;
1516 :
1517 0 : let lsn = if params.len() >= 3 {
1518 : Some(
1519 0 : Lsn::from_str(params[2])
1520 0 : .with_context(|| format!("Failed to parse Lsn from {}", params[2]))?,
1521 : )
1522 : } else {
1523 0 : None
1524 : };
1525 :
1526 0 : let gzip = if params.len() >= 4 {
1527 0 : if params[3] == "--gzip" {
1528 0 : true
1529 : } else {
1530 0 : return Err(QueryError::Other(anyhow::anyhow!(
1531 0 : "Parameter in position 3 unknown {}",
1532 0 : params[3],
1533 0 : )));
1534 : }
1535 : } else {
1536 0 : false
1537 : };
1538 :
1539 0 : let metric_recording = metrics::BASEBACKUP_QUERY_TIME.start_recording(&ctx);
1540 0 : let res = async {
1541 0 : self.handle_basebackup_request(
1542 0 : pgb,
1543 0 : tenant_id,
1544 0 : timeline_id,
1545 0 : lsn,
1546 0 : None,
1547 0 : false,
1548 0 : gzip,
1549 0 : &ctx,
1550 0 : )
1551 0 : .await?;
1552 0 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
1553 0 : Result::<(), QueryError>::Ok(())
1554 0 : }
1555 0 : .await;
1556 0 : metric_recording.observe(&res);
1557 0 : res?;
1558 : }
1559 : // return pair of prev_lsn and last_lsn
1560 0 : else if query_string.starts_with("get_last_record_rlsn ") {
1561 0 : let (_, params_raw) = query_string.split_at("get_last_record_rlsn ".len());
1562 0 : let params = params_raw.split_whitespace().collect::<Vec<_>>();
1563 0 :
1564 0 : if params.len() != 2 {
1565 0 : return Err(QueryError::Other(anyhow::anyhow!(
1566 0 : "invalid param number for get_last_record_rlsn command"
1567 0 : )));
1568 0 : }
1569 :
1570 0 : let tenant_id = TenantId::from_str(params[0])
1571 0 : .with_context(|| format!("Failed to parse tenant id from {}", params[0]))?;
1572 0 : let timeline_id = TimelineId::from_str(params[1])
1573 0 : .with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
1574 :
1575 0 : tracing::Span::current()
1576 0 : .record("tenant_id", field::display(tenant_id))
1577 0 : .record("timeline_id", field::display(timeline_id));
1578 0 :
1579 0 : self.check_permission(Some(tenant_id))?;
1580 0 : async {
1581 0 : let timeline = self
1582 0 : .get_active_tenant_timeline(tenant_id, timeline_id, ShardSelector::Zero)
1583 0 : .await?;
1584 :
1585 0 : let end_of_timeline = timeline.get_last_record_rlsn();
1586 0 :
1587 0 : pgb.write_message_noflush(&BeMessage::RowDescription(&[
1588 0 : RowDescriptor::text_col(b"prev_lsn"),
1589 0 : RowDescriptor::text_col(b"last_lsn"),
1590 0 : ]))?
1591 0 : .write_message_noflush(&BeMessage::DataRow(&[
1592 0 : Some(end_of_timeline.prev.to_string().as_bytes()),
1593 0 : Some(end_of_timeline.last.to_string().as_bytes()),
1594 0 : ]))?
1595 0 : .write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
1596 0 : anyhow::Ok(())
1597 0 : }
1598 0 : .instrument(info_span!(
1599 0 : "handle_get_last_record_lsn",
1600 0 : shard_id = tracing::field::Empty
1601 0 : ))
1602 0 : .await?;
1603 : }
1604 : // same as basebackup, but result includes relational data as well
1605 0 : else if query_string.starts_with("fullbackup ") {
1606 0 : let (_, params_raw) = query_string.split_at("fullbackup ".len());
1607 0 : let params = params_raw.split_whitespace().collect::<Vec<_>>();
1608 0 :
1609 0 : if params.len() < 2 {
1610 0 : return Err(QueryError::Other(anyhow::anyhow!(
1611 0 : "invalid param number for fullbackup command"
1612 0 : )));
1613 0 : }
1614 :
1615 0 : let tenant_id = TenantId::from_str(params[0])
1616 0 : .with_context(|| format!("Failed to parse tenant id from {}", params[0]))?;
1617 0 : let timeline_id = TimelineId::from_str(params[1])
1618 0 : .with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
1619 :
1620 0 : tracing::Span::current()
1621 0 : .record("tenant_id", field::display(tenant_id))
1622 0 : .record("timeline_id", field::display(timeline_id));
1623 :
1624 : // The caller is responsible for providing correct lsn and prev_lsn.
1625 0 : let lsn = if params.len() > 2 {
1626 : Some(
1627 0 : Lsn::from_str(params[2])
1628 0 : .with_context(|| format!("Failed to parse Lsn from {}", params[2]))?,
1629 : )
1630 : } else {
1631 0 : None
1632 : };
1633 0 : let prev_lsn = if params.len() > 3 {
1634 : Some(
1635 0 : Lsn::from_str(params[3])
1636 0 : .with_context(|| format!("Failed to parse Lsn from {}", params[3]))?,
1637 : )
1638 : } else {
1639 0 : None
1640 : };
1641 :
1642 0 : self.check_permission(Some(tenant_id))?;
1643 :
1644 : // Check that the timeline exists
1645 0 : self.handle_basebackup_request(
1646 0 : pgb,
1647 0 : tenant_id,
1648 0 : timeline_id,
1649 0 : lsn,
1650 0 : prev_lsn,
1651 0 : true,
1652 0 : false,
1653 0 : &ctx,
1654 0 : )
1655 0 : .await?;
1656 0 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
1657 0 : } else if query_string.starts_with("import basebackup ") {
1658 : // Import the `base` section (everything but the wal) of a basebackup.
1659 : // Assumes the tenant already exists on this pageserver.
1660 : //
1661 : // Files are scheduled to be persisted to remote storage, and the
1662 : // caller should poll the http api to check when that is done.
1663 : //
1664 : // Example import command:
1665 : // 1. Get start/end LSN from backup_manifest file
1666 : // 2. Run:
1667 : // cat my_backup/base.tar | psql -h $PAGESERVER \
1668 : // -c "import basebackup $TENANT $TIMELINE $START_LSN $END_LSN $PG_VERSION"
1669 0 : let (_, params_raw) = query_string.split_at("import basebackup ".len());
1670 0 : let params = params_raw.split_whitespace().collect::<Vec<_>>();
1671 0 : if params.len() != 5 {
1672 0 : return Err(QueryError::Other(anyhow::anyhow!(
1673 0 : "invalid param number for import basebackup command"
1674 0 : )));
1675 0 : }
1676 0 : let tenant_id = TenantId::from_str(params[0])
1677 0 : .with_context(|| format!("Failed to parse tenant id from {}", params[0]))?;
1678 0 : let timeline_id = TimelineId::from_str(params[1])
1679 0 : .with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
1680 0 : let base_lsn = Lsn::from_str(params[2])
1681 0 : .with_context(|| format!("Failed to parse Lsn from {}", params[2]))?;
1682 0 : let end_lsn = Lsn::from_str(params[3])
1683 0 : .with_context(|| format!("Failed to parse Lsn from {}", params[3]))?;
1684 0 : let pg_version = u32::from_str(params[4])
1685 0 : .with_context(|| format!("Failed to parse pg_version from {}", params[4]))?;
1686 :
1687 0 : tracing::Span::current()
1688 0 : .record("tenant_id", field::display(tenant_id))
1689 0 : .record("timeline_id", field::display(timeline_id));
1690 0 :
1691 0 : self.check_permission(Some(tenant_id))?;
1692 :
1693 0 : match self
1694 0 : .handle_import_basebackup(
1695 0 : pgb,
1696 0 : tenant_id,
1697 0 : timeline_id,
1698 0 : base_lsn,
1699 0 : end_lsn,
1700 0 : pg_version,
1701 0 : ctx,
1702 0 : )
1703 0 : .await
1704 : {
1705 0 : Ok(()) => pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?,
1706 0 : Err(e) => {
1707 0 : error!("error importing base backup between {base_lsn} and {end_lsn}: {e:?}");
1708 0 : pgb.write_message_noflush(&BeMessage::ErrorResponse(
1709 0 : &e.to_string(),
1710 0 : Some(e.pg_error_code()),
1711 0 : ))?
1712 : }
1713 : };
1714 0 : } else if query_string.starts_with("import wal ") {
1715 : // Import the `pg_wal` section of a basebackup.
1716 : //
1717 : // Files are scheduled to be persisted to remote storage, and the
1718 : // caller should poll the http api to check when that is done.
1719 0 : let (_, params_raw) = query_string.split_at("import wal ".len());
1720 0 : let params = params_raw.split_whitespace().collect::<Vec<_>>();
1721 0 : if params.len() != 4 {
1722 0 : return Err(QueryError::Other(anyhow::anyhow!(
1723 0 : "invalid param number for import wal command"
1724 0 : )));
1725 0 : }
1726 0 : let tenant_id = TenantId::from_str(params[0])
1727 0 : .with_context(|| format!("Failed to parse tenant id from {}", params[0]))?;
1728 0 : let timeline_id = TimelineId::from_str(params[1])
1729 0 : .with_context(|| format!("Failed to parse timeline id from {}", params[1]))?;
1730 0 : let start_lsn = Lsn::from_str(params[2])
1731 0 : .with_context(|| format!("Failed to parse Lsn from {}", params[2]))?;
1732 0 : let end_lsn = Lsn::from_str(params[3])
1733 0 : .with_context(|| format!("Failed to parse Lsn from {}", params[3]))?;
1734 :
1735 0 : tracing::Span::current()
1736 0 : .record("tenant_id", field::display(tenant_id))
1737 0 : .record("timeline_id", field::display(timeline_id));
1738 0 :
1739 0 : self.check_permission(Some(tenant_id))?;
1740 :
1741 0 : match self
1742 0 : .handle_import_wal(pgb, tenant_id, timeline_id, start_lsn, end_lsn, ctx)
1743 0 : .await
1744 : {
1745 0 : Ok(()) => pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?,
1746 0 : Err(e) => {
1747 0 : error!("error importing WAL between {start_lsn} and {end_lsn}: {e:?}");
1748 0 : pgb.write_message_noflush(&BeMessage::ErrorResponse(
1749 0 : &e.to_string(),
1750 0 : Some(e.pg_error_code()),
1751 0 : ))?
1752 : }
1753 : };
1754 0 : } else if query_string.to_ascii_lowercase().starts_with("set ") {
1755 : // important because psycopg2 executes "SET datestyle TO 'ISO'"
1756 : // on connect
1757 0 : pgb.write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
1758 0 : } else if query_string.starts_with("show ") {
1759 : // show <tenant_id>
1760 0 : let (_, params_raw) = query_string.split_at("show ".len());
1761 0 : let params = params_raw.split(' ').collect::<Vec<_>>();
1762 0 : if params.len() != 1 {
1763 0 : return Err(QueryError::Other(anyhow::anyhow!(
1764 0 : "invalid param number for config command"
1765 0 : )));
1766 0 : }
1767 0 : let tenant_id = TenantId::from_str(params[0])
1768 0 : .with_context(|| format!("Failed to parse tenant id from {}", params[0]))?;
1769 :
1770 0 : tracing::Span::current().record("tenant_id", field::display(tenant_id));
1771 0 :
1772 0 : self.check_permission(Some(tenant_id))?;
1773 :
1774 0 : let tenant = get_active_tenant_with_timeout(
1775 0 : tenant_id,
1776 0 : ShardSelector::Zero,
1777 0 : ACTIVE_TENANT_TIMEOUT,
1778 0 : &task_mgr::shutdown_token(),
1779 0 : )
1780 0 : .await?;
1781 0 : pgb.write_message_noflush(&BeMessage::RowDescription(&[
1782 0 : RowDescriptor::int8_col(b"checkpoint_distance"),
1783 0 : RowDescriptor::int8_col(b"checkpoint_timeout"),
1784 0 : RowDescriptor::int8_col(b"compaction_target_size"),
1785 0 : RowDescriptor::int8_col(b"compaction_period"),
1786 0 : RowDescriptor::int8_col(b"compaction_threshold"),
1787 0 : RowDescriptor::int8_col(b"gc_horizon"),
1788 0 : RowDescriptor::int8_col(b"gc_period"),
1789 0 : RowDescriptor::int8_col(b"image_creation_threshold"),
1790 0 : RowDescriptor::int8_col(b"pitr_interval"),
1791 0 : ]))?
1792 0 : .write_message_noflush(&BeMessage::DataRow(&[
1793 0 : Some(tenant.get_checkpoint_distance().to_string().as_bytes()),
1794 0 : Some(
1795 0 : tenant
1796 0 : .get_checkpoint_timeout()
1797 0 : .as_secs()
1798 0 : .to_string()
1799 0 : .as_bytes(),
1800 0 : ),
1801 0 : Some(tenant.get_compaction_target_size().to_string().as_bytes()),
1802 0 : Some(
1803 0 : tenant
1804 0 : .get_compaction_period()
1805 0 : .as_secs()
1806 0 : .to_string()
1807 0 : .as_bytes(),
1808 0 : ),
1809 0 : Some(tenant.get_compaction_threshold().to_string().as_bytes()),
1810 0 : Some(tenant.get_gc_horizon().to_string().as_bytes()),
1811 0 : Some(tenant.get_gc_period().as_secs().to_string().as_bytes()),
1812 0 : Some(tenant.get_image_creation_threshold().to_string().as_bytes()),
1813 0 : Some(tenant.get_pitr_interval().as_secs().to_string().as_bytes()),
1814 0 : ]))?
1815 0 : .write_message_noflush(&BeMessage::CommandComplete(b"SELECT 1"))?;
1816 : } else {
1817 0 : return Err(QueryError::Other(anyhow::anyhow!(
1818 0 : "unknown command {query_string}"
1819 0 : )));
1820 : }
1821 :
1822 0 : Ok(())
1823 0 : }
1824 : }
1825 :
1826 : impl From<GetActiveTenantError> for QueryError {
1827 0 : fn from(e: GetActiveTenantError) -> Self {
1828 0 : match e {
1829 0 : GetActiveTenantError::WaitForActiveTimeout { .. } => QueryError::Disconnected(
1830 0 : ConnectionError::Io(io::Error::new(io::ErrorKind::TimedOut, e.to_string())),
1831 0 : ),
1832 : GetActiveTenantError::Cancelled
1833 : | GetActiveTenantError::WillNotBecomeActive(TenantState::Stopping { .. }) => {
1834 0 : QueryError::Shutdown
1835 : }
1836 0 : e @ GetActiveTenantError::NotFound(_) => QueryError::NotFound(format!("{e}").into()),
1837 0 : e => QueryError::Other(anyhow::anyhow!(e)),
1838 : }
1839 0 : }
1840 : }
1841 :
1842 0 : #[derive(Debug, thiserror::Error)]
1843 : enum GetActiveTimelineError {
1844 : #[error(transparent)]
1845 : Tenant(GetActiveTenantError),
1846 : #[error(transparent)]
1847 : Timeline(#[from] GetTimelineError),
1848 : }
1849 :
1850 : impl From<GetActiveTimelineError> for QueryError {
1851 0 : fn from(e: GetActiveTimelineError) -> Self {
1852 0 : match e {
1853 0 : GetActiveTimelineError::Tenant(GetActiveTenantError::Cancelled) => QueryError::Shutdown,
1854 0 : GetActiveTimelineError::Tenant(e) => e.into(),
1855 0 : GetActiveTimelineError::Timeline(e) => QueryError::NotFound(format!("{e}").into()),
1856 : }
1857 0 : }
1858 : }
1859 :
1860 0 : fn set_tracing_field_shard_id(timeline: &Timeline) {
1861 0 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
1862 0 : tracing::Span::current().record(
1863 0 : "shard_id",
1864 0 : tracing::field::display(timeline.tenant_shard_id.shard_slug()),
1865 0 : );
1866 0 : debug_assert_current_span_has_tenant_and_timeline_id();
1867 0 : }
|