LCOV - code coverage report
Current view: top level - pageserver/src/tenant/timeline/walreceiver - walreceiver_connection.rs (source / functions) Coverage Total Hit
Test: 1b0a6a0c05cee5a7de360813c8034804e105ce1c.info Lines: 0.0 % 509 0
Test Date: 2025-03-12 00:01:28 Functions: 0.0 % 23 0

            Line data    Source code
       1              : //! Actual Postgres connection handler to stream WAL to the server.
       2              : 
       3              : use std::error::Error;
       4              : use std::pin::pin;
       5              : use std::str::FromStr;
       6              : use std::sync::Arc;
       7              : use std::time::{Duration, SystemTime};
       8              : 
       9              : use anyhow::{Context, anyhow};
      10              : use bytes::BytesMut;
      11              : use chrono::{NaiveDateTime, Utc};
      12              : use fail::fail_point;
      13              : use futures::StreamExt;
      14              : use postgres_backend::is_expected_io_error;
      15              : use postgres_connection::PgConnectionConfig;
      16              : use postgres_ffi::WAL_SEGMENT_SIZE;
      17              : use postgres_ffi::v14::xlog_utils::normalize_lsn;
      18              : use postgres_ffi::waldecoder::{WalDecodeError, WalStreamDecoder};
      19              : use postgres_protocol::message::backend::ReplicationMessage;
      20              : use postgres_types::PgLsn;
      21              : use tokio::sync::watch;
      22              : use tokio::{select, time};
      23              : use tokio_postgres::error::SqlState;
      24              : use tokio_postgres::replication::ReplicationStream;
      25              : use tokio_postgres::{Client, SimpleQueryMessage, SimpleQueryRow};
      26              : use tokio_util::sync::CancellationToken;
      27              : use tracing::{Instrument, debug, error, info, trace, warn};
      28              : use utils::critical;
      29              : use utils::id::NodeId;
      30              : use utils::lsn::Lsn;
      31              : use utils::pageserver_feedback::PageserverFeedback;
      32              : use utils::postgres_client::PostgresClientProtocol;
      33              : use utils::sync::gate::GateError;
      34              : use wal_decoder::models::{FlushUncommittedRecords, InterpretedWalRecord, InterpretedWalRecords};
      35              : use wal_decoder::wire_format::FromWireFormat;
      36              : 
      37              : use super::TaskStateUpdate;
      38              : use crate::context::RequestContext;
      39              : use crate::metrics::{LIVE_CONNECTIONS, WAL_INGEST, WALRECEIVER_STARTED_CONNECTIONS};
      40              : use crate::pgdatadir_mapping::DatadirModification;
      41              : use crate::task_mgr::{TaskKind, WALRECEIVER_RUNTIME};
      42              : use crate::tenant::{
      43              :     Timeline, WalReceiverInfo, debug_assert_current_span_has_tenant_and_timeline_id,
      44              : };
      45              : use crate::walingest::WalIngest;
      46              : 
      47              : /// Status of the connection.
      48              : #[derive(Debug, Clone, Copy)]
      49              : pub(super) struct WalConnectionStatus {
      50              :     /// If we were able to initiate a postgres connection, this means that safekeeper process is at least running.
      51              :     pub is_connected: bool,
      52              :     /// Defines a healthy connection as one on which pageserver received WAL from safekeeper
      53              :     /// and is able to process it in walingest without errors.
      54              :     pub has_processed_wal: bool,
      55              :     /// Connection establishment time or the timestamp of a latest connection message received.
      56              :     pub latest_connection_update: NaiveDateTime,
      57              :     /// Time of the latest WAL message received.
      58              :     pub latest_wal_update: NaiveDateTime,
      59              :     /// Latest WAL update contained WAL up to this LSN. Next WAL message with start from that LSN.
      60              :     pub streaming_lsn: Option<Lsn>,
      61              :     /// Latest commit_lsn received from the safekeeper. Can be zero if no message has been received yet.
      62              :     pub commit_lsn: Option<Lsn>,
      63              :     /// The node it is connected to
      64              :     pub node: NodeId,
      65              : }
      66              : 
      67              : pub(super) enum WalReceiverError {
      68              :     /// An error of a type that does not indicate an issue, e.g. a connection closing
      69              :     ExpectedSafekeeperError(tokio_postgres::Error),
      70              :     /// An "error" message that carries a SUCCESSFUL_COMPLETION status code.  Carries
      71              :     /// the message part of the original postgres error
      72              :     SuccessfulCompletion(String),
      73              :     /// Generic error
      74              :     Other(anyhow::Error),
      75              :     ClosedGate,
      76              : }
      77              : 
      78              : impl From<tokio_postgres::Error> for WalReceiverError {
      79            0 :     fn from(err: tokio_postgres::Error) -> Self {
      80            0 :         if let Some(dberror) = err.as_db_error().filter(|db_error| {
      81            0 :             db_error.code() == &SqlState::SUCCESSFUL_COMPLETION
      82            0 :                 && db_error.message().contains("ending streaming")
      83            0 :         }) {
      84              :             // Strip the outer DbError, which carries a misleading "error" severity
      85            0 :             Self::SuccessfulCompletion(dberror.message().to_string())
      86            0 :         } else if err.is_closed()
      87            0 :             || err
      88            0 :                 .source()
      89            0 :                 .and_then(|source| source.downcast_ref::<std::io::Error>())
      90            0 :                 .map(is_expected_io_error)
      91            0 :                 .unwrap_or(false)
      92              :         {
      93            0 :             Self::ExpectedSafekeeperError(err)
      94              :         } else {
      95            0 :             Self::Other(anyhow::Error::new(err))
      96              :         }
      97            0 :     }
      98              : }
      99              : 
     100              : impl From<anyhow::Error> for WalReceiverError {
     101            0 :     fn from(err: anyhow::Error) -> Self {
     102            0 :         Self::Other(err)
     103            0 :     }
     104              : }
     105              : 
     106              : impl From<WalDecodeError> for WalReceiverError {
     107            0 :     fn from(err: WalDecodeError) -> Self {
     108            0 :         Self::Other(anyhow::Error::new(err))
     109            0 :     }
     110              : }
     111              : 
     112              : /// Open a connection to the given safekeeper and receive WAL, sending back progress
     113              : /// messages as we go.
     114              : #[allow(clippy::too_many_arguments)]
     115            0 : pub(super) async fn handle_walreceiver_connection(
     116            0 :     timeline: Arc<Timeline>,
     117            0 :     protocol: PostgresClientProtocol,
     118            0 :     wal_source_connconf: PgConnectionConfig,
     119            0 :     events_sender: watch::Sender<TaskStateUpdate<WalConnectionStatus>>,
     120            0 :     cancellation: CancellationToken,
     121            0 :     connect_timeout: Duration,
     122            0 :     ctx: RequestContext,
     123            0 :     safekeeper_node: NodeId,
     124            0 :     ingest_batch_size: u64,
     125            0 :     validate_wal_contiguity: bool,
     126            0 : ) -> Result<(), WalReceiverError> {
     127            0 :     debug_assert_current_span_has_tenant_and_timeline_id();
     128              : 
     129              :     // prevent timeline shutdown from finishing until we have exited
     130            0 :     let _guard = timeline.gate.enter().map_err(|e| match e {
     131            0 :         GateError::GateClosed => WalReceiverError::ClosedGate,
     132            0 :     })?;
     133              :     // This function spawns a side-car task (WalReceiverConnectionPoller).
     134              :     // Get its gate guard now as well.
     135            0 :     let poller_guard = timeline.gate.enter().map_err(|e| match e {
     136            0 :         GateError::GateClosed => WalReceiverError::ClosedGate,
     137            0 :     })?;
     138              : 
     139            0 :     WALRECEIVER_STARTED_CONNECTIONS.inc();
     140            0 : 
     141            0 :     // Connect to the database in replication mode.
     142            0 :     info!("connecting to {wal_source_connconf:?}");
     143              : 
     144            0 :     let (replication_client, connection) = {
     145            0 :         let mut config = wal_source_connconf.to_tokio_postgres_config();
     146            0 :         config.application_name(format!("pageserver-{}", timeline.conf.id.0).as_str());
     147            0 :         config.replication_mode(tokio_postgres::config::ReplicationMode::Physical);
     148            0 :         match time::timeout(connect_timeout, config.connect(tokio_postgres::NoTls)).await {
     149            0 :             Ok(client_and_conn) => client_and_conn?,
     150            0 :             Err(_elapsed) => {
     151            0 :                 // Timing out to connect to a safekeeper node could happen long time, due to
     152            0 :                 // many reasons that pageserver cannot control.
     153            0 :                 // Do not produce an error, but make it visible, that timeouts happen by logging the `event.
     154            0 :                 info!(
     155            0 :                     "Timed out while waiting {connect_timeout:?} for walreceiver connection to open"
     156              :                 );
     157            0 :                 return Ok(());
     158              :             }
     159              :         }
     160              :     };
     161              : 
     162            0 :     debug!("connected!");
     163            0 :     let mut connection_status = WalConnectionStatus {
     164            0 :         is_connected: true,
     165            0 :         has_processed_wal: false,
     166            0 :         latest_connection_update: Utc::now().naive_utc(),
     167            0 :         latest_wal_update: Utc::now().naive_utc(),
     168            0 :         streaming_lsn: None,
     169            0 :         commit_lsn: None,
     170            0 :         node: safekeeper_node,
     171            0 :     };
     172            0 :     if let Err(e) = events_sender.send(TaskStateUpdate::Progress(connection_status)) {
     173            0 :         warn!(
     174            0 :             "Wal connection event listener dropped right after connection init, aborting the connection: {e}"
     175              :         );
     176            0 :         return Ok(());
     177            0 :     }
     178            0 : 
     179            0 :     // The connection object performs the actual communication with the database,
     180            0 :     // so spawn it off to run on its own. It shouldn't outlive this function, but,
     181            0 :     // due to lack of async drop, we can't enforce that. However, we ensure that
     182            0 :     // 1. it is sensitive to `cancellation` and
     183            0 :     // 2. holds the Timeline gate open so that after timeline shutdown,
     184            0 :     //    we know this task is gone.
     185            0 :     let _connection_ctx = ctx.detached_child(
     186            0 :         TaskKind::WalReceiverConnectionPoller,
     187            0 :         ctx.download_behavior(),
     188            0 :     );
     189            0 :     let connection_cancellation = cancellation.clone();
     190            0 :     WALRECEIVER_RUNTIME.spawn(
     191            0 :         async move {
     192            0 :             debug_assert_current_span_has_tenant_and_timeline_id();
     193            0 :             select! {
     194            0 :                 connection_result = connection => match connection_result {
     195            0 :                     Ok(()) => debug!("Walreceiver db connection closed"),
     196            0 :                     Err(connection_error) => {
     197            0 :                         match WalReceiverError::from(connection_error) {
     198            0 :                             WalReceiverError::ExpectedSafekeeperError(_) => {
     199            0 :                                 // silence, because most likely we've already exited the outer call
     200            0 :                                 // with a similar error.
     201            0 :                             },
     202            0 :                             WalReceiverError::SuccessfulCompletion(_) => {}
     203            0 :                             WalReceiverError::ClosedGate => {
     204            0 :                                 // doesn't happen at runtime
     205            0 :                             }
     206            0 :                             WalReceiverError::Other(err) => {
     207            0 :                                 warn!("Connection aborted: {err:#}")
     208              :                             }
     209              :                         }
     210              :                     }
     211              :                 },
     212            0 :                 _ = connection_cancellation.cancelled() => debug!("Connection cancelled"),
     213              :             }
     214            0 :             drop(poller_guard);
     215            0 :         }
     216              :         // Enrich the log lines emitted by this closure with meaningful context.
     217              :         // TODO: technically, this task outlives the surrounding function, so, the
     218              :         // spans won't be properly nested.
     219            0 :         .instrument(tracing::info_span!("poller")),
     220              :     );
     221              : 
     222            0 :     let _guard = LIVE_CONNECTIONS
     223            0 :         .with_label_values(&["wal_receiver"])
     224            0 :         .guard();
     225              : 
     226            0 :     let identify = identify_system(&replication_client).await?;
     227            0 :     info!("{identify:?}");
     228              : 
     229            0 :     let end_of_wal = Lsn::from(u64::from(identify.xlogpos));
     230            0 :     let mut caught_up = false;
     231            0 : 
     232            0 :     connection_status.latest_connection_update = Utc::now().naive_utc();
     233            0 :     connection_status.latest_wal_update = Utc::now().naive_utc();
     234            0 :     connection_status.commit_lsn = Some(end_of_wal);
     235            0 :     if let Err(e) = events_sender.send(TaskStateUpdate::Progress(connection_status)) {
     236            0 :         warn!(
     237            0 :             "Wal connection event listener dropped after IDENTIFY_SYSTEM, aborting the connection: {e}"
     238              :         );
     239            0 :         return Ok(());
     240            0 :     }
     241            0 : 
     242            0 :     //
     243            0 :     // Start streaming the WAL, from where we left off previously.
     244            0 :     //
     245            0 :     // If we had previously received WAL up to some point in the middle of a WAL record, we
     246            0 :     // better start from the end of last full WAL record, not in the middle of one.
     247            0 :     let mut last_rec_lsn = timeline.get_last_record_lsn();
     248            0 :     let mut startpoint = last_rec_lsn;
     249            0 : 
     250            0 :     if startpoint == Lsn(0) {
     251            0 :         return Err(WalReceiverError::Other(anyhow!("No previous WAL position")));
     252            0 :     }
     253            0 : 
     254            0 :     // There might be some padding after the last full record, skip it.
     255            0 :     startpoint += startpoint.calc_padding(8u32);
     256            0 : 
     257            0 :     // If the starting point is at a WAL page boundary, skip past the page header. We don't need the page headers
     258            0 :     // for anything, and in some corner cases, the compute node might have never generated the WAL for page headers
     259            0 :     //. That happens if you create a branch at page boundary: the start point of the branch is at the page boundary,
     260            0 :     // but when the compute node first starts on the branch, we normalize the first REDO position to just after the page
     261            0 :     // header (see generate_pg_control()), so the WAL for the page header is never streamed from the compute node
     262            0 :     //  to the safekeepers.
     263            0 :     startpoint = normalize_lsn(startpoint, WAL_SEGMENT_SIZE);
     264            0 : 
     265            0 :     info!(
     266            0 :         "last_record_lsn {last_rec_lsn} starting replication from {startpoint}, safekeeper is at {end_of_wal}..."
     267              :     );
     268              : 
     269            0 :     let query = format!("START_REPLICATION PHYSICAL {startpoint}");
     270              : 
     271            0 :     let copy_stream = replication_client.copy_both_simple(&query).await?;
     272            0 :     let mut physical_stream = pin!(ReplicationStream::new(copy_stream));
     273            0 : 
     274            0 :     let mut waldecoder = WalStreamDecoder::new(startpoint, timeline.pg_version);
     275              : 
     276            0 :     let mut walingest = WalIngest::new(timeline.as_ref(), startpoint, &ctx).await?;
     277              : 
     278            0 :     let shard = vec![*timeline.get_shard_identity()];
     279              : 
     280            0 :     let interpreted_proto_config = match protocol {
     281            0 :         PostgresClientProtocol::Vanilla => None,
     282              :         PostgresClientProtocol::Interpreted {
     283            0 :             format,
     284            0 :             compression,
     285            0 :         } => Some((format, compression)),
     286              :     };
     287              : 
     288            0 :     let mut expected_wal_start = startpoint;
     289            0 :     while let Some(replication_message) = {
     290            0 :         select! {
     291            0 :             _ = cancellation.cancelled() => {
     292            0 :                 debug!("walreceiver interrupted");
     293            0 :                 None
     294              :             }
     295            0 :             replication_message = physical_stream.next() => replication_message,
     296              :         }
     297              :     } {
     298            0 :         let replication_message = replication_message?;
     299              : 
     300            0 :         let now = Utc::now().naive_utc();
     301            0 :         let last_rec_lsn_before_msg = last_rec_lsn;
     302            0 : 
     303            0 :         // Update the connection status before processing the message. If the message processing
     304            0 :         // fails (e.g. in walingest), we still want to know latests LSNs from the safekeeper.
     305            0 :         match &replication_message {
     306            0 :             ReplicationMessage::XLogData(xlog_data) => {
     307            0 :                 connection_status.latest_connection_update = now;
     308            0 :                 connection_status.commit_lsn = Some(Lsn::from(xlog_data.wal_end()));
     309            0 :                 connection_status.streaming_lsn = Some(Lsn::from(
     310            0 :                     xlog_data.wal_start() + xlog_data.data().len() as u64,
     311            0 :                 ));
     312            0 :                 if !xlog_data.data().is_empty() {
     313            0 :                     connection_status.latest_wal_update = now;
     314            0 :                 }
     315              :             }
     316            0 :             ReplicationMessage::PrimaryKeepAlive(keepalive) => {
     317            0 :                 connection_status.latest_connection_update = now;
     318            0 :                 connection_status.commit_lsn = Some(Lsn::from(keepalive.wal_end()));
     319            0 :             }
     320            0 :             ReplicationMessage::RawInterpretedWalRecords(raw) => {
     321            0 :                 connection_status.latest_connection_update = now;
     322            0 :                 if !raw.data().is_empty() {
     323            0 :                     connection_status.latest_wal_update = now;
     324            0 :                 }
     325              : 
     326            0 :                 connection_status.commit_lsn = Some(Lsn::from(raw.commit_lsn()));
     327            0 :                 connection_status.streaming_lsn = Some(Lsn::from(raw.streaming_lsn()));
     328              :             }
     329            0 :             &_ => {}
     330              :         };
     331            0 :         if let Err(e) = events_sender.send(TaskStateUpdate::Progress(connection_status)) {
     332            0 :             warn!("Wal connection event listener dropped, aborting the connection: {e}");
     333            0 :             return Ok(());
     334            0 :         }
     335              : 
     336            0 :         let status_update = match replication_message {
     337            0 :             ReplicationMessage::RawInterpretedWalRecords(raw) => {
     338            0 :                 WAL_INGEST.bytes_received.inc_by(raw.data().len() as u64);
     339            0 : 
     340            0 :                 let mut uncommitted_records = 0;
     341            0 : 
     342            0 :                 // This is the end LSN of the raw WAL from which the records
     343            0 :                 // were interpreted.
     344            0 :                 let streaming_lsn = Lsn::from(raw.streaming_lsn());
     345            0 : 
     346            0 :                 let (format, compression) = interpreted_proto_config.unwrap();
     347            0 :                 let batch = InterpretedWalRecords::from_wire(raw.data(), format, compression)
     348            0 :                     .await
     349            0 :                     .with_context(|| {
     350            0 :                         anyhow::anyhow!(
     351            0 :                         "Failed to deserialize interpreted records ending at LSN {streaming_lsn}"
     352            0 :                     )
     353            0 :                     })?;
     354              : 
     355              :                 // Guard against WAL gaps. If the start LSN of the PG WAL section
     356              :                 // from which the interpreted records were extracted, doesn't match
     357              :                 // the end of the previous batch (or the starting point for the first batch),
     358              :                 // then kill this WAL receiver connection and start a new one.
     359            0 :                 if validate_wal_contiguity {
     360            0 :                     if let Some(raw_wal_start_lsn) = batch.raw_wal_start_lsn {
     361            0 :                         match raw_wal_start_lsn.cmp(&expected_wal_start) {
     362              :                             std::cmp::Ordering::Greater => {
     363            0 :                                 let msg = format!(
     364            0 :                                     "Gap in streamed WAL: [{}, {})",
     365            0 :                                     expected_wal_start, raw_wal_start_lsn
     366            0 :                                 );
     367            0 :                                 critical!("{msg}");
     368            0 :                                 return Err(WalReceiverError::Other(anyhow!(msg)));
     369              :                             }
     370              :                             std::cmp::Ordering::Less => {
     371              :                                 // Other shards are reading WAL behind us.
     372              :                                 // This is valid, but check that we received records
     373              :                                 // that we haven't seen before.
     374            0 :                                 if let Some(first_rec) = batch.records.first() {
     375            0 :                                     if first_rec.next_record_lsn < last_rec_lsn {
     376            0 :                                         let msg = format!(
     377            0 :                                             "Received record with next_record_lsn multiple times ({} < {})",
     378            0 :                                             first_rec.next_record_lsn, expected_wal_start
     379            0 :                                         );
     380            0 :                                         critical!("{msg}");
     381            0 :                                         return Err(WalReceiverError::Other(anyhow!(msg)));
     382            0 :                                     }
     383            0 :                                 }
     384              :                             }
     385            0 :                             std::cmp::Ordering::Equal => {}
     386              :                         }
     387            0 :                     }
     388            0 :                 }
     389              : 
     390              :                 let InterpretedWalRecords {
     391            0 :                     records,
     392            0 :                     next_record_lsn,
     393            0 :                     raw_wal_start_lsn: _,
     394            0 :                 } = batch;
     395            0 : 
     396            0 :                 tracing::debug!(
     397            0 :                     "Received WAL up to {} with next_record_lsn={}",
     398              :                     streaming_lsn,
     399              :                     next_record_lsn
     400              :                 );
     401              : 
     402              :                 // We start the modification at 0 because each interpreted record
     403              :                 // advances it to its end LSN. 0 is just an initialization placeholder.
     404            0 :                 let mut modification = timeline.begin_modification(Lsn(0));
     405              : 
     406            0 :                 async fn commit(
     407            0 :                     modification: &mut DatadirModification<'_>,
     408            0 :                     ctx: &RequestContext,
     409            0 :                     uncommitted: &mut u64,
     410            0 :                 ) -> anyhow::Result<()> {
     411            0 :                     let stats = modification.stats();
     412            0 :                     modification.commit(ctx).await?;
     413            0 :                     WAL_INGEST.records_committed.inc_by(*uncommitted);
     414            0 :                     WAL_INGEST.inc_values_committed(&stats);
     415            0 :                     *uncommitted = 0;
     416            0 :                     Ok(())
     417            0 :                 }
     418              : 
     419            0 :                 if !records.is_empty() {
     420            0 :                     timeline
     421            0 :                         .metrics
     422            0 :                         .wal_records_received
     423            0 :                         .inc_by(records.len() as u64);
     424            0 :                 }
     425              : 
     426            0 :                 for interpreted in records {
     427            0 :                     if matches!(interpreted.flush_uncommitted, FlushUncommittedRecords::Yes)
     428            0 :                         && uncommitted_records > 0
     429              :                     {
     430            0 :                         commit(&mut modification, &ctx, &mut uncommitted_records).await?;
     431            0 :                     }
     432              : 
     433            0 :                     let local_next_record_lsn = interpreted.next_record_lsn;
     434            0 : 
     435            0 :                     if interpreted.is_observed() {
     436            0 :                         WAL_INGEST.records_observed.inc();
     437            0 :                     }
     438              : 
     439            0 :                     walingest
     440            0 :                         .ingest_record(interpreted, &mut modification, &ctx)
     441            0 :                         .await
     442            0 :                         .with_context(|| {
     443            0 :                             format!("could not ingest record at {local_next_record_lsn}")
     444            0 :                         })
     445            0 :                         .inspect_err(|err| {
     446            0 :                             // TODO: we can't differentiate cancellation errors with
     447            0 :                             // anyhow::Error, so just ignore it if we're cancelled.
     448            0 :                             if !cancellation.is_cancelled() {
     449            0 :                                 critical!("{err:?}")
     450            0 :                             }
     451            0 :                         })?;
     452              : 
     453            0 :                     uncommitted_records += 1;
     454            0 : 
     455            0 :                     // FIXME: this cannot be made pausable_failpoint without fixing the
     456            0 :                     // failpoint library; in tests, the added amount of debugging will cause us
     457            0 :                     // to timeout the tests.
     458            0 :                     fail_point!("walreceiver-after-ingest");
     459            0 : 
     460            0 :                     // Commit every ingest_batch_size records. Even if we filtered out
     461            0 :                     // all records, we still need to call commit to advance the LSN.
     462            0 :                     if uncommitted_records >= ingest_batch_size
     463            0 :                         || modification.approx_pending_bytes()
     464            0 :                             > DatadirModification::MAX_PENDING_BYTES
     465              :                     {
     466            0 :                         commit(&mut modification, &ctx, &mut uncommitted_records).await?;
     467            0 :                     }
     468              :                 }
     469              : 
     470              :                 // Records might have been filtered out on the safekeeper side, but we still
     471              :                 // need to advance last record LSN on all shards. If we've not ingested the latest
     472              :                 // record, then set the LSN of the modification past it. This way all shards
     473              :                 // advance their last record LSN at the same time.
     474            0 :                 let needs_last_record_lsn_advance = if next_record_lsn > modification.get_lsn() {
     475            0 :                     modification.set_lsn(next_record_lsn).unwrap();
     476            0 :                     true
     477              :                 } else {
     478            0 :                     false
     479              :                 };
     480              : 
     481            0 :                 if uncommitted_records > 0 || needs_last_record_lsn_advance {
     482              :                     // Commit any uncommitted records
     483            0 :                     commit(&mut modification, &ctx, &mut uncommitted_records).await?;
     484            0 :                 }
     485              : 
     486            0 :                 if !caught_up && streaming_lsn >= end_of_wal {
     487            0 :                     info!("caught up at LSN {streaming_lsn}");
     488            0 :                     caught_up = true;
     489            0 :                 }
     490              : 
     491            0 :                 tracing::debug!(
     492            0 :                     "Ingested WAL up to {streaming_lsn}. Last record LSN is {}",
     493            0 :                     timeline.get_last_record_lsn()
     494              :                 );
     495              : 
     496            0 :                 last_rec_lsn = next_record_lsn;
     497            0 :                 expected_wal_start = streaming_lsn;
     498            0 : 
     499            0 :                 Some(streaming_lsn)
     500              :             }
     501              : 
     502            0 :             ReplicationMessage::XLogData(xlog_data) => {
     503            0 :                 async fn commit(
     504            0 :                     modification: &mut DatadirModification<'_>,
     505            0 :                     uncommitted: &mut u64,
     506            0 :                     filtered: &mut u64,
     507            0 :                     ctx: &RequestContext,
     508            0 :                 ) -> anyhow::Result<()> {
     509            0 :                     let stats = modification.stats();
     510            0 :                     modification.commit(ctx).await?;
     511            0 :                     WAL_INGEST
     512            0 :                         .records_committed
     513            0 :                         .inc_by(*uncommitted - *filtered);
     514            0 :                     WAL_INGEST.inc_values_committed(&stats);
     515            0 :                     *uncommitted = 0;
     516            0 :                     *filtered = 0;
     517            0 :                     Ok(())
     518            0 :                 }
     519              : 
     520              :                 // Pass the WAL data to the decoder, and see if we can decode
     521              :                 // more records as a result.
     522            0 :                 let data = xlog_data.data();
     523            0 :                 let startlsn = Lsn::from(xlog_data.wal_start());
     524            0 :                 let endlsn = startlsn + data.len() as u64;
     525            0 : 
     526            0 :                 trace!("received XLogData between {startlsn} and {endlsn}");
     527              : 
     528            0 :                 WAL_INGEST.bytes_received.inc_by(data.len() as u64);
     529            0 :                 waldecoder.feed_bytes(data);
     530            0 : 
     531            0 :                 {
     532            0 :                     let mut modification = timeline.begin_modification(startlsn);
     533            0 :                     let mut uncommitted_records = 0;
     534            0 :                     let mut filtered_records = 0;
     535              : 
     536            0 :                     while let Some((next_record_lsn, recdata)) = waldecoder.poll_decode()? {
     537              :                         // It is important to deal with the aligned records as lsn in getPage@LSN is
     538              :                         // aligned and can be several bytes bigger. Without this alignment we are
     539              :                         // at risk of hitting a deadlock.
     540            0 :                         if !next_record_lsn.is_aligned() {
     541            0 :                             return Err(WalReceiverError::Other(anyhow!("LSN not aligned")));
     542            0 :                         }
     543              : 
     544              :                         // Deserialize and interpret WAL record
     545            0 :                         let interpreted = InterpretedWalRecord::from_bytes_filtered(
     546            0 :                             recdata,
     547            0 :                             &shard,
     548            0 :                             next_record_lsn,
     549            0 :                             modification.tline.pg_version,
     550            0 :                         )?
     551            0 :                         .remove(timeline.get_shard_identity())
     552            0 :                         .unwrap();
     553              : 
     554            0 :                         if matches!(interpreted.flush_uncommitted, FlushUncommittedRecords::Yes)
     555            0 :                             && uncommitted_records > 0
     556              :                         {
     557              :                             // Special case: legacy PG database creations operate by reading pages from a 'template' database:
     558              :                             // these are the only kinds of WAL record that require reading data blocks while ingesting.  Ensure
     559              :                             // all earlier writes of data blocks are visible by committing any modification in flight.
     560            0 :                             commit(
     561            0 :                                 &mut modification,
     562            0 :                                 &mut uncommitted_records,
     563            0 :                                 &mut filtered_records,
     564            0 :                                 &ctx,
     565            0 :                             )
     566            0 :                             .await?;
     567            0 :                         }
     568              : 
     569              :                         // Ingest the records without immediately committing them.
     570            0 :                         timeline.metrics.wal_records_received.inc();
     571            0 :                         let ingested = walingest
     572            0 :                             .ingest_record(interpreted, &mut modification, &ctx)
     573            0 :                             .await
     574            0 :                             .with_context(|| {
     575            0 :                                 format!("could not ingest record at {next_record_lsn}")
     576            0 :                             })
     577            0 :                             .inspect_err(|err| {
     578            0 :                                 // TODO: we can't differentiate cancellation errors with
     579            0 :                                 // anyhow::Error, so just ignore it if we're cancelled.
     580            0 :                                 if !cancellation.is_cancelled() {
     581            0 :                                     critical!("{err:?}")
     582            0 :                                 }
     583            0 :                             })?;
     584            0 :                         if !ingested {
     585            0 :                             tracing::debug!("ingest: filtered out record @ LSN {next_record_lsn}");
     586            0 :                             WAL_INGEST.records_filtered.inc();
     587            0 :                             filtered_records += 1;
     588            0 :                         }
     589              : 
     590              :                         // FIXME: this cannot be made pausable_failpoint without fixing the
     591              :                         // failpoint library; in tests, the added amount of debugging will cause us
     592              :                         // to timeout the tests.
     593            0 :                         fail_point!("walreceiver-after-ingest");
     594            0 : 
     595            0 :                         last_rec_lsn = next_record_lsn;
     596            0 : 
     597            0 :                         // Commit every ingest_batch_size records. Even if we filtered out
     598            0 :                         // all records, we still need to call commit to advance the LSN.
     599            0 :                         uncommitted_records += 1;
     600            0 :                         if uncommitted_records >= ingest_batch_size
     601            0 :                             || modification.approx_pending_bytes()
     602            0 :                                 > DatadirModification::MAX_PENDING_BYTES
     603              :                         {
     604            0 :                             commit(
     605            0 :                                 &mut modification,
     606            0 :                                 &mut uncommitted_records,
     607            0 :                                 &mut filtered_records,
     608            0 :                                 &ctx,
     609            0 :                             )
     610            0 :                             .await?;
     611            0 :                         }
     612              :                     }
     613              : 
     614              :                     // Commit the remaining records.
     615            0 :                     if uncommitted_records > 0 {
     616            0 :                         commit(
     617            0 :                             &mut modification,
     618            0 :                             &mut uncommitted_records,
     619            0 :                             &mut filtered_records,
     620            0 :                             &ctx,
     621            0 :                         )
     622            0 :                         .await?;
     623            0 :                     }
     624              :                 }
     625              : 
     626            0 :                 if !caught_up && endlsn >= end_of_wal {
     627            0 :                     info!("caught up at LSN {endlsn}");
     628            0 :                     caught_up = true;
     629            0 :                 }
     630              : 
     631            0 :                 Some(endlsn)
     632              :             }
     633              : 
     634            0 :             ReplicationMessage::PrimaryKeepAlive(keepalive) => {
     635            0 :                 let wal_end = keepalive.wal_end();
     636            0 :                 let timestamp = keepalive.timestamp();
     637            0 :                 let reply_requested = keepalive.reply() != 0;
     638            0 : 
     639            0 :                 trace!(
     640            0 :                     "received PrimaryKeepAlive(wal_end: {wal_end}, timestamp: {timestamp:?} reply: {reply_requested})"
     641              :                 );
     642              : 
     643            0 :                 if reply_requested {
     644            0 :                     Some(last_rec_lsn)
     645              :                 } else {
     646            0 :                     None
     647              :                 }
     648              :             }
     649              : 
     650            0 :             _ => None,
     651              :         };
     652              : 
     653            0 :         if !connection_status.has_processed_wal && last_rec_lsn > last_rec_lsn_before_msg {
     654              :             // We have successfully processed at least one WAL record.
     655            0 :             connection_status.has_processed_wal = true;
     656            0 :             if let Err(e) = events_sender.send(TaskStateUpdate::Progress(connection_status)) {
     657            0 :                 warn!("Wal connection event listener dropped, aborting the connection: {e}");
     658            0 :                 return Ok(());
     659            0 :             }
     660            0 :         }
     661              : 
     662            0 :         if let Some(last_lsn) = status_update {
     663            0 :             let timeline_remote_consistent_lsn = timeline
     664            0 :                 .get_remote_consistent_lsn_visible()
     665            0 :                 .unwrap_or(Lsn(0));
     666            0 : 
     667            0 :             // The last LSN we processed. It is not guaranteed to survive pageserver crash.
     668            0 :             let last_received_lsn = last_lsn;
     669            0 :             // `disk_consistent_lsn` is the LSN at which page server guarantees local persistence of all received data
     670            0 :             let disk_consistent_lsn = timeline.get_disk_consistent_lsn();
     671            0 :             // The last LSN that is synced to remote storage and is guaranteed to survive pageserver crash
     672            0 :             // Used by safekeepers to remove WAL preceding `remote_consistent_lsn`.
     673            0 :             let remote_consistent_lsn = timeline_remote_consistent_lsn;
     674            0 :             let ts = SystemTime::now();
     675            0 : 
     676            0 :             // Update the status about what we just received. This is shown in the mgmt API.
     677            0 :             let last_received_wal = WalReceiverInfo {
     678            0 :                 wal_source_connconf: wal_source_connconf.clone(),
     679            0 :                 last_received_msg_lsn: last_lsn,
     680            0 :                 last_received_msg_ts: ts
     681            0 :                     .duration_since(SystemTime::UNIX_EPOCH)
     682            0 :                     .expect("Received message time should be before UNIX EPOCH!")
     683            0 :                     .as_micros(),
     684            0 :             };
     685            0 :             *timeline.last_received_wal.lock().unwrap() = Some(last_received_wal);
     686              : 
     687              :             // Send the replication feedback message.
     688              :             // Regular standby_status_update fields are put into this message.
     689            0 :             let current_timeline_size = if timeline.tenant_shard_id.is_shard_zero() {
     690            0 :                 timeline
     691            0 :                     .get_current_logical_size(
     692            0 :                         crate::tenant::timeline::GetLogicalSizePriority::User,
     693            0 :                         &ctx,
     694            0 :                     )
     695            0 :                     // FIXME: https://github.com/neondatabase/neon/issues/5963
     696            0 :                     .size_dont_care_about_accuracy()
     697              :             } else {
     698              :                 // Non-zero shards send zero for logical size.  The safekeeper will ignore
     699              :                 // this number.  This is because in a sharded tenant, only shard zero maintains
     700              :                 // accurate logical size.
     701            0 :                 0
     702              :             };
     703              : 
     704            0 :             let status_update = PageserverFeedback {
     705            0 :                 current_timeline_size,
     706            0 :                 last_received_lsn,
     707            0 :                 disk_consistent_lsn,
     708            0 :                 remote_consistent_lsn,
     709            0 :                 replytime: ts,
     710            0 :                 shard_number: timeline.tenant_shard_id.shard_number.0 as u32,
     711            0 :             };
     712            0 : 
     713            0 :             debug!("neon_status_update {status_update:?}");
     714              : 
     715            0 :             let mut data = BytesMut::new();
     716            0 :             status_update.serialize(&mut data);
     717            0 :             physical_stream
     718            0 :                 .as_mut()
     719            0 :                 .zenith_status_update(data.len() as u64, &data)
     720            0 :                 .await?;
     721            0 :         }
     722              :     }
     723              : 
     724            0 :     Ok(())
     725            0 : }
     726              : 
     727              : /// Data returned from the postgres `IDENTIFY_SYSTEM` command
     728              : ///
     729              : /// See the [postgres docs] for more details.
     730              : ///
     731              : /// [postgres docs]: https://www.postgresql.org/docs/current/protocol-replication.html
     732              : #[derive(Debug)]
     733              : // As of nightly 2021-09-11, fields that are only read by the type's `Debug` impl still count as
     734              : // unused. Relevant issue: https://github.com/rust-lang/rust/issues/88900
     735              : #[allow(dead_code)]
     736              : struct IdentifySystem {
     737              :     systemid: u64,
     738              :     timeline: u32,
     739              :     xlogpos: PgLsn,
     740              :     dbname: Option<String>,
     741              : }
     742              : 
     743              : /// There was a problem parsing the response to
     744              : /// a postgres IDENTIFY_SYSTEM command.
     745              : #[derive(Debug, thiserror::Error)]
     746              : #[error("IDENTIFY_SYSTEM parse error")]
     747              : struct IdentifyError;
     748              : 
     749              : /// Run the postgres `IDENTIFY_SYSTEM` command
     750            0 : async fn identify_system(client: &Client) -> anyhow::Result<IdentifySystem> {
     751            0 :     let query_str = "IDENTIFY_SYSTEM";
     752            0 :     let response = client.simple_query(query_str).await?;
     753              : 
     754              :     // get(N) from row, then parse it as some destination type.
     755            0 :     fn get_parse<T>(row: &SimpleQueryRow, idx: usize) -> Result<T, IdentifyError>
     756            0 :     where
     757            0 :         T: FromStr,
     758            0 :     {
     759            0 :         let val = row.get(idx).ok_or(IdentifyError)?;
     760            0 :         val.parse::<T>().or(Err(IdentifyError))
     761            0 :     }
     762              : 
     763              :     // extract the row contents into an IdentifySystem struct.
     764              :     // written as a closure so I can use ? for Option here.
     765            0 :     if let Some(SimpleQueryMessage::Row(first_row)) = response.first() {
     766              :         Ok(IdentifySystem {
     767            0 :             systemid: get_parse(first_row, 0)?,
     768            0 :             timeline: get_parse(first_row, 1)?,
     769            0 :             xlogpos: get_parse(first_row, 2)?,
     770            0 :             dbname: get_parse(first_row, 3).ok(),
     771              :         })
     772              :     } else {
     773            0 :         Err(IdentifyError.into())
     774              :     }
     775            0 : }
        

Generated by: LCOV version 2.1-beta