Line data Source code
1 : //! This module implements pulling WAL from peer safekeepers if compute can't
2 : //! provide it, i.e. safekeeper lags too much.
3 :
4 : use std::time::SystemTime;
5 : use std::{fmt, pin::pin};
6 :
7 : use anyhow::{bail, Context};
8 : use futures::StreamExt;
9 : use postgres_protocol::message::backend::ReplicationMessage;
10 : use tokio::sync::mpsc::{channel, Receiver, Sender};
11 : use tokio::time::timeout;
12 : use tokio::{
13 : select,
14 : time::sleep,
15 : time::{self, Duration},
16 : };
17 : use tokio_postgres::replication::ReplicationStream;
18 : use tokio_postgres::types::PgLsn;
19 : use tracing::*;
20 : use utils::postgres_client::{ConnectionConfigArgs, PostgresClientProtocol};
21 : use utils::{id::NodeId, lsn::Lsn, postgres_client::wal_stream_connection_config};
22 :
23 : use crate::receive_wal::{WalAcceptor, REPLY_QUEUE_SIZE};
24 : use crate::safekeeper::{AppendRequest, AppendRequestHeader};
25 : use crate::timeline::WalResidentTimeline;
26 : use crate::{
27 : http::routes::TimelineStatus,
28 : receive_wal::MSG_QUEUE_SIZE,
29 : safekeeper::{
30 : AcceptorProposerMessage, ProposerAcceptorMessage, ProposerElected, Term, TermHistory,
31 : TermLsn, VoteRequest,
32 : },
33 : timeline::PeerInfo,
34 : SafeKeeperConf,
35 : };
36 :
37 : /// Entrypoint for per timeline task which always runs, checking whether
38 : /// recovery for this safekeeper is needed and starting it if so.
39 0 : #[instrument(name = "recovery", skip_all, fields(ttid = %tli.ttid))]
40 : pub async fn recovery_main(tli: WalResidentTimeline, conf: SafeKeeperConf) {
41 : info!("started");
42 :
43 : let cancel = tli.cancel.clone();
44 : select! {
45 : _ = recovery_main_loop(tli, conf) => { unreachable!() }
46 : _ = cancel.cancelled() => {
47 : info!("stopped");
48 : }
49 : }
50 : }
51 :
52 : /// Should we start fetching WAL from a peer safekeeper, and if yes, from
53 : /// which? Answer is yes, i.e. .donors is not empty if 1) there is something
54 : /// to fetch, and we can do that without running elections; 2) there is no
55 : /// actively streaming compute, as we don't want to compete with it.
56 : ///
57 : /// If donor(s) are choosen, theirs last_log_term is guaranteed to be equal
58 : /// to its last_log_term so we are sure such a leader ever had been elected.
59 : ///
60 : /// All possible donors are returned so that we could keep connection to the
61 : /// current one if it is good even if it slightly lags behind.
62 : ///
63 : /// Note that term conditions above might be not met, but safekeepers are
64 : /// still not aligned on last flush_lsn. Generally in this case until
65 : /// elections are run it is not possible to say which safekeeper should
66 : /// recover from which one -- history which would be committed is different
67 : /// depending on assembled quorum (e.g. classic picture 8 from Raft paper).
68 : /// Thus we don't try to predict it here.
69 0 : async fn recovery_needed(
70 0 : tli: &WalResidentTimeline,
71 0 : heartbeat_timeout: Duration,
72 0 : ) -> RecoveryNeededInfo {
73 0 : let ss = tli.read_shared_state().await;
74 0 : let term = ss.sk.state().acceptor_state.term;
75 0 : let last_log_term = ss.sk.last_log_term();
76 0 : let flush_lsn = ss.sk.flush_lsn();
77 0 : // note that peers contain myself, but that's ok -- we are interested only in peers which are strictly ahead of us.
78 0 : let mut peers = ss.get_peers(heartbeat_timeout);
79 0 : // Sort by <last log term, lsn> pairs.
80 0 : peers.sort_by(|p1, p2| {
81 0 : let tl1 = TermLsn {
82 0 : term: p1.last_log_term,
83 0 : lsn: p1.flush_lsn,
84 0 : };
85 0 : let tl2 = TermLsn {
86 0 : term: p2.last_log_term,
87 0 : lsn: p2.flush_lsn,
88 0 : };
89 0 : tl2.cmp(&tl1) // desc
90 0 : });
91 0 : let num_streaming_computes = tli.get_walreceivers().get_num_streaming();
92 0 : let donors = if num_streaming_computes > 0 {
93 0 : vec![] // If there is a streaming compute, don't try to recover to not intervene.
94 : } else {
95 0 : peers
96 0 : .iter()
97 0 : .filter_map(|candidate| {
98 0 : // Are we interested in this candidate?
99 0 : let candidate_tl = TermLsn {
100 0 : term: candidate.last_log_term,
101 0 : lsn: candidate.flush_lsn,
102 0 : };
103 0 : let my_tl = TermLsn {
104 0 : term: last_log_term,
105 0 : lsn: flush_lsn,
106 0 : };
107 0 : if my_tl < candidate_tl {
108 : // Yes, we are interested. Can we pull from it without
109 : // (re)running elections? It is possible if 1) his term
110 : // is equal to his last_log_term so we could act on
111 : // behalf of leader of this term (we must be sure he was
112 : // ever elected) and 2) our term is not higher, or we'll refuse data.
113 0 : if candidate.term == candidate.last_log_term && candidate.term >= term {
114 0 : Some(Donor::from(candidate))
115 : } else {
116 0 : None
117 : }
118 : } else {
119 0 : None
120 : }
121 0 : })
122 0 : .collect()
123 : };
124 0 : RecoveryNeededInfo {
125 0 : term,
126 0 : last_log_term,
127 0 : flush_lsn,
128 0 : peers,
129 0 : num_streaming_computes,
130 0 : donors,
131 0 : }
132 0 : }
133 : /// Result of Timeline::recovery_needed, contains donor(s) if recovery needed and
134 : /// fields to explain the choice.
135 : #[derive(Debug)]
136 : pub struct RecoveryNeededInfo {
137 : /// my term
138 : pub term: Term,
139 : /// my last_log_term
140 : pub last_log_term: Term,
141 : /// my flush_lsn
142 : pub flush_lsn: Lsn,
143 : /// peers from which we can fetch WAL, for observability.
144 : pub peers: Vec<PeerInfo>,
145 : /// for observability
146 : pub num_streaming_computes: usize,
147 : pub donors: Vec<Donor>,
148 : }
149 :
150 : // Custom to omit not important fields from PeerInfo.
151 : impl fmt::Display for RecoveryNeededInfo {
152 0 : fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
153 0 : write!(f, "{{")?;
154 0 : write!(
155 0 : f,
156 0 : "term: {}, last_log_term: {}, flush_lsn: {}, peers: {{",
157 0 : self.term, self.last_log_term, self.flush_lsn
158 0 : )?;
159 0 : for p in self.peers.iter() {
160 0 : write!(
161 0 : f,
162 0 : "PeerInfo {{ sk_id: {}, term: {}, last_log_term: {}, flush_lsn: {} }}, ",
163 0 : p.sk_id, p.term, p.last_log_term, p.flush_lsn
164 0 : )?;
165 : }
166 0 : write!(
167 0 : f,
168 0 : "}} num_streaming_computes: {}, donors: {:?}",
169 0 : self.num_streaming_computes, self.donors
170 0 : )
171 0 : }
172 : }
173 :
174 : #[derive(Clone, Debug)]
175 : pub struct Donor {
176 : pub sk_id: NodeId,
177 : /// equals to last_log_term
178 : pub term: Term,
179 : pub flush_lsn: Lsn,
180 : pub pg_connstr: String,
181 : pub http_connstr: String,
182 : }
183 :
184 : impl From<&PeerInfo> for Donor {
185 0 : fn from(p: &PeerInfo) -> Self {
186 0 : Donor {
187 0 : sk_id: p.sk_id,
188 0 : term: p.term,
189 0 : flush_lsn: p.flush_lsn,
190 0 : pg_connstr: p.pg_connstr.clone(),
191 0 : http_connstr: p.http_connstr.clone(),
192 0 : }
193 0 : }
194 : }
195 :
196 : const CHECK_INTERVAL_MS: u64 = 2000;
197 :
198 : /// Check regularly whether we need to start recovery.
199 0 : async fn recovery_main_loop(tli: WalResidentTimeline, conf: SafeKeeperConf) {
200 0 : let check_duration = Duration::from_millis(CHECK_INTERVAL_MS);
201 : loop {
202 0 : let recovery_needed_info = recovery_needed(&tli, conf.heartbeat_timeout).await;
203 0 : match recovery_needed_info.donors.first() {
204 0 : Some(donor) => {
205 0 : info!(
206 0 : "starting recovery from donor {}: {}",
207 : donor.sk_id, recovery_needed_info
208 : );
209 0 : let res = tli.wal_residence_guard().await;
210 0 : if let Err(e) = res {
211 0 : warn!("failed to obtain guard: {}", e);
212 0 : continue;
213 0 : }
214 0 : match recover(res.unwrap(), donor, &conf).await {
215 : // Note: 'write_wal rewrites WAL written before' error is
216 : // expected here and might happen if compute and recovery
217 : // concurrently write the same data. Eventually compute
218 : // should win.
219 0 : Err(e) => warn!("recovery failed: {:#}", e),
220 0 : Ok(msg) => info!("recovery finished: {}", msg),
221 : }
222 : }
223 : None => {
224 0 : trace!(
225 0 : "recovery not needed or not possible: {}",
226 : recovery_needed_info
227 : );
228 : }
229 : }
230 0 : sleep(check_duration).await;
231 : }
232 : }
233 :
234 : /// Recover from the specified donor. Returns message explaining normal finish
235 : /// reason or error.
236 0 : async fn recover(
237 0 : tli: WalResidentTimeline,
238 0 : donor: &Donor,
239 0 : conf: &SafeKeeperConf,
240 0 : ) -> anyhow::Result<String> {
241 0 : // Learn donor term switch history to figure out starting point.
242 0 : let client = reqwest::Client::new();
243 0 : let timeline_info: TimelineStatus = client
244 0 : .get(format!(
245 0 : "http://{}/v1/tenant/{}/timeline/{}",
246 0 : donor.http_connstr, tli.ttid.tenant_id, tli.ttid.timeline_id
247 0 : ))
248 0 : .send()
249 0 : .await?
250 0 : .json()
251 0 : .await?;
252 0 : if timeline_info.acceptor_state.term != donor.term {
253 0 : bail!(
254 0 : "donor term changed from {} to {}",
255 0 : donor.term,
256 0 : timeline_info.acceptor_state.term
257 0 : );
258 0 : }
259 0 : // convert from API TermSwitchApiEntry into TermLsn.
260 0 : let donor_th = TermHistory(
261 0 : timeline_info
262 0 : .acceptor_state
263 0 : .term_history
264 0 : .iter()
265 0 : .map(|tl| Into::<TermLsn>::into(*tl))
266 0 : .collect(),
267 0 : );
268 0 :
269 0 : // Now understand our term history.
270 0 : let vote_request = ProposerAcceptorMessage::VoteRequest(VoteRequest { term: donor.term });
271 0 : let vote_response = match tli
272 0 : .process_msg(&vote_request)
273 0 : .await
274 0 : .context("VoteRequest handling")?
275 : {
276 0 : Some(AcceptorProposerMessage::VoteResponse(vr)) => vr,
277 : _ => {
278 0 : bail!("unexpected VoteRequest response"); // unreachable
279 : }
280 : };
281 0 : if vote_response.term != donor.term {
282 0 : bail!(
283 0 : "our term changed from {} to {}",
284 0 : donor.term,
285 0 : vote_response.term
286 0 : );
287 0 : }
288 :
289 0 : let last_common_point = match TermHistory::find_highest_common_point(
290 0 : &donor_th,
291 0 : &vote_response.term_history,
292 0 : vote_response.flush_lsn,
293 0 : ) {
294 0 : None => bail!(
295 0 : "couldn't find common point in histories, donor {:?}, sk {:?}",
296 0 : donor_th,
297 0 : vote_response.term_history,
298 0 : ),
299 0 : Some(lcp) => lcp,
300 0 : };
301 0 : info!("found last common point at {:?}", last_common_point);
302 :
303 : // truncate WAL locally
304 0 : let pe = ProposerAcceptorMessage::Elected(ProposerElected {
305 0 : term: donor.term,
306 0 : start_streaming_at: last_common_point.lsn,
307 0 : term_history: donor_th,
308 0 : timeline_start_lsn: Lsn::INVALID,
309 0 : });
310 0 : // Successful ProposerElected handling always returns None. If term changed,
311 0 : // we'll find out that during the streaming. Note: it is expected to get
312 0 : // 'refusing to overwrite correct WAL' here if walproposer reconnected
313 0 : // concurrently, restart helps here.
314 0 : tli.process_msg(&pe)
315 0 : .await
316 0 : .context("ProposerElected handling")?;
317 :
318 0 : recovery_stream(tli, donor, last_common_point.lsn, conf).await
319 0 : }
320 :
321 : // Pull WAL from donor, assuming handshake is already done.
322 0 : async fn recovery_stream(
323 0 : tli: WalResidentTimeline,
324 0 : donor: &Donor,
325 0 : start_streaming_at: Lsn,
326 0 : conf: &SafeKeeperConf,
327 0 : ) -> anyhow::Result<String> {
328 0 : // TODO: pass auth token
329 0 : let connection_conf_args = ConnectionConfigArgs {
330 0 : protocol: PostgresClientProtocol::Vanilla,
331 0 : ttid: tli.ttid,
332 0 : shard_number: None,
333 0 : shard_count: None,
334 0 : shard_stripe_size: None,
335 0 : listen_pg_addr_str: &donor.pg_connstr,
336 0 : auth_token: None,
337 0 : availability_zone: None,
338 0 : };
339 0 : let cfg = wal_stream_connection_config(connection_conf_args)?;
340 0 : let mut cfg = cfg.to_tokio_postgres_config();
341 0 : // It will make safekeeper give out not committed WAL (up to flush_lsn).
342 0 : cfg.application_name(&format!("safekeeper_{}", conf.my_id));
343 0 : cfg.replication_mode(tokio_postgres::config::ReplicationMode::Physical);
344 0 :
345 0 : let connect_timeout = Duration::from_millis(10000);
346 0 : let (client, connection) = match time::timeout(connect_timeout, cfg.connect(postgres::NoTls))
347 0 : .await
348 : {
349 0 : Ok(client_and_conn) => client_and_conn?,
350 0 : Err(_elapsed) => {
351 0 : bail!("timed out while waiting {connect_timeout:?} for connection to peer safekeeper to open");
352 : }
353 : };
354 0 : trace!("connected to {:?}", donor);
355 :
356 : // The connection object performs the actual communication with the
357 : // server, spawn it off to run on its own.
358 0 : let ttid = tli.ttid;
359 0 : tokio::spawn(async move {
360 0 : if let Err(e) = connection
361 0 : .instrument(info_span!("recovery task connection poll", ttid = %ttid))
362 0 : .await
363 : {
364 : // This logging isn't very useful as error is anyway forwarded to client.
365 0 : trace!(
366 0 : "tokio_postgres connection object finished with error: {}",
367 : e
368 : );
369 0 : }
370 0 : });
371 0 :
372 0 : let query = format!(
373 0 : "START_REPLICATION PHYSICAL {} (term='{}')",
374 0 : start_streaming_at, donor.term
375 0 : );
376 :
377 0 : let copy_stream = client.copy_both_simple(&query).await?;
378 0 : let physical_stream = ReplicationStream::new(copy_stream);
379 0 :
380 0 : // As in normal walreceiver, do networking and writing to disk in parallel.
381 0 : let (msg_tx, msg_rx) = channel(MSG_QUEUE_SIZE);
382 0 : let (reply_tx, reply_rx) = channel(REPLY_QUEUE_SIZE);
383 0 : let wa = WalAcceptor::spawn(tli.wal_residence_guard().await?, msg_rx, reply_tx, None);
384 :
385 0 : let res = tokio::select! {
386 0 : r = network_io(physical_stream, msg_tx, donor.clone(), tli, conf.clone()) => r,
387 0 : r = read_replies(reply_rx, donor.term) => r.map(|()| None),
388 : };
389 :
390 : // Join the spawned WalAcceptor. At this point chans to/from it passed to
391 : // network routines are dropped, so it will exit as soon as it touches them.
392 0 : match wa.await {
393 : Ok(Ok(())) => {
394 : // WalAcceptor finished normally, termination reason is different
395 0 : match res {
396 0 : Ok(Some(success_desc)) => Ok(success_desc),
397 0 : Ok(None) => bail!("unexpected recovery end without error/success"), // can't happen
398 0 : Err(e) => Err(e), // network error or term change
399 : }
400 : }
401 0 : Ok(Err(e)) => Err(e), // error while processing message
402 0 : Err(e) => bail!("WalAcceptor panicked: {}", e),
403 : }
404 0 : }
405 :
406 : // Perform network part of streaming: read data and push it to msg_tx, send KA
407 : // to make sender hear from us. If there is nothing coming for a while, check
408 : // for termination.
409 : // Returns
410 : // - Ok(None) if channel to WalAcceptor closed -- its task should return error.
411 : // - Ok(Some(String)) if recovery successfully completed.
412 : // - Err if error happened while reading/writing to socket.
413 0 : async fn network_io(
414 0 : physical_stream: ReplicationStream,
415 0 : msg_tx: Sender<ProposerAcceptorMessage>,
416 0 : donor: Donor,
417 0 : tli: WalResidentTimeline,
418 0 : conf: SafeKeeperConf,
419 0 : ) -> anyhow::Result<Option<String>> {
420 0 : let mut physical_stream = pin!(physical_stream);
421 0 : let mut last_received_lsn = Lsn::INVALID;
422 0 : // tear down connection if no data arrives withing this period
423 0 : let no_data_timeout = Duration::from_millis(30000);
424 :
425 : loop {
426 0 : let msg = match timeout(no_data_timeout, physical_stream.next()).await {
427 0 : Ok(next) => match next {
428 0 : None => bail!("unexpected end of replication stream"),
429 0 : Some(msg) => msg.context("get replication message")?,
430 : },
431 0 : Err(_) => bail!("no message received within {:?}", no_data_timeout),
432 : };
433 :
434 0 : match msg {
435 0 : ReplicationMessage::XLogData(xlog_data) => {
436 0 : let ar_hdr = AppendRequestHeader {
437 0 : term: donor.term,
438 0 : term_start_lsn: Lsn::INVALID, // unused
439 0 : begin_lsn: Lsn(xlog_data.wal_start()),
440 0 : end_lsn: Lsn(xlog_data.wal_start()) + xlog_data.data().len() as u64,
441 0 : commit_lsn: Lsn::INVALID, // do not attempt to advance, peer communication anyway does it
442 0 : truncate_lsn: Lsn::INVALID, // do not attempt to advance
443 0 : proposer_uuid: [0; 16],
444 0 : };
445 0 : let ar = AppendRequest {
446 0 : h: ar_hdr,
447 0 : wal_data: xlog_data.into_data(),
448 0 : };
449 0 : trace!(
450 0 : "processing AppendRequest {}-{}, len {}",
451 0 : ar.h.begin_lsn,
452 0 : ar.h.end_lsn,
453 0 : ar.wal_data.len()
454 : );
455 0 : last_received_lsn = ar.h.end_lsn;
456 0 : if msg_tx
457 0 : .send(ProposerAcceptorMessage::AppendRequest(ar))
458 0 : .await
459 0 : .is_err()
460 : {
461 0 : return Ok(None); // chan closed, WalAcceptor terminated
462 0 : }
463 : }
464 : ReplicationMessage::PrimaryKeepAlive(_) => {
465 : // keepalive means nothing is being streamed for a while. Check whether we need to stop.
466 0 : let recovery_needed_info = recovery_needed(&tli, conf.heartbeat_timeout).await;
467 : // do current donors still contain one we currently connected to?
468 0 : if !recovery_needed_info
469 0 : .donors
470 0 : .iter()
471 0 : .any(|d| d.sk_id == donor.sk_id)
472 : {
473 : // Most likely it means we are caughtup.
474 : // note: just exiting makes tokio_postgres send CopyFail to the far end.
475 0 : return Ok(Some(format!(
476 0 : "terminating at {} as connected safekeeper {} with term {} is not a donor anymore: {}",
477 0 : last_received_lsn, donor.sk_id, donor.term, recovery_needed_info
478 0 : )));
479 0 : }
480 : }
481 0 : _ => {}
482 : }
483 : // Send reply to each message to keep connection alive. Ideally we
484 : // should do that once in a while instead, but this again requires
485 : // stream split or similar workaround, and recovery is anyway not that
486 : // performance critical.
487 : //
488 : // We do not know here real write/flush LSNs (need to take mutex again
489 : // or check replies which are read in different future), but neither
490 : // sender much cares about them, so just send last received.
491 0 : physical_stream
492 0 : .as_mut()
493 0 : .standby_status_update(
494 0 : PgLsn::from(last_received_lsn.0),
495 0 : PgLsn::from(last_received_lsn.0),
496 0 : PgLsn::from(last_received_lsn.0),
497 0 : SystemTime::now(),
498 0 : 0,
499 0 : )
500 0 : .await?;
501 : }
502 0 : }
503 :
504 : // Read replies from WalAcceptor. We are not interested much in sending them to
505 : // donor safekeeper, so don't route them anywhere. However, we should check if
506 : // term changes and exit if it does.
507 : // Returns Ok(()) if channel closed, Err in case of term change.
508 0 : async fn read_replies(
509 0 : mut reply_rx: Receiver<AcceptorProposerMessage>,
510 0 : donor_term: Term,
511 0 : ) -> anyhow::Result<()> {
512 : loop {
513 0 : match reply_rx.recv().await {
514 0 : Some(msg) => {
515 0 : if let AcceptorProposerMessage::AppendResponse(ar) = msg {
516 0 : if ar.term != donor_term {
517 0 : bail!("donor term changed from {} to {}", donor_term, ar.term);
518 0 : }
519 0 : }
520 : }
521 0 : None => return Ok(()), // chan closed, WalAcceptor terminated
522 : }
523 : }
524 0 : }
|