Line data Source code
1 : //! This module implements pulling WAL from peer safekeepers if compute can't
2 : //! provide it, i.e. safekeeper lags too much.
3 :
4 : use std::fmt;
5 : use std::pin::pin;
6 : use std::time::SystemTime;
7 :
8 : use anyhow::{Context, bail};
9 : use futures::StreamExt;
10 : use postgres_protocol::message::backend::ReplicationMessage;
11 : use safekeeper_api::Term;
12 : use safekeeper_api::membership::INVALID_GENERATION;
13 : use safekeeper_api::models::{PeerInfo, TimelineStatus};
14 : use tokio::select;
15 : use tokio::sync::mpsc::{Receiver, Sender, channel};
16 : use tokio::time::{self, Duration, sleep, timeout};
17 : use tokio_postgres::replication::ReplicationStream;
18 : use tokio_postgres::types::PgLsn;
19 : use tracing::*;
20 : use utils::id::NodeId;
21 : use utils::lsn::Lsn;
22 : use utils::postgres_client::{
23 : ConnectionConfigArgs, PostgresClientProtocol, wal_stream_connection_config,
24 : };
25 :
26 : use crate::SafeKeeperConf;
27 : use crate::receive_wal::{MSG_QUEUE_SIZE, REPLY_QUEUE_SIZE, WalAcceptor};
28 : use crate::safekeeper::{
29 : AcceptorProposerMessage, AppendRequest, AppendRequestHeader, ProposerAcceptorMessage,
30 : ProposerElected, TermHistory, TermLsn, VoteRequest,
31 : };
32 : use crate::timeline::WalResidentTimeline;
33 :
34 : /// Entrypoint for per timeline task which always runs, checking whether
35 : /// recovery for this safekeeper is needed and starting it if so.
36 : #[instrument(name = "recovery", skip_all, fields(ttid = %tli.ttid))]
37 : pub async fn recovery_main(tli: WalResidentTimeline, conf: SafeKeeperConf) {
38 : info!("started");
39 :
40 : let cancel = tli.cancel.clone();
41 : select! {
42 : _ = recovery_main_loop(tli, conf) => { unreachable!() }
43 : _ = cancel.cancelled() => {
44 : info!("stopped");
45 : }
46 : }
47 : }
48 :
49 : /// Should we start fetching WAL from a peer safekeeper, and if yes, from
50 : /// which? Answer is yes, i.e. .donors is not empty if 1) there is something
51 : /// to fetch, and we can do that without running elections; 2) there is no
52 : /// actively streaming compute, as we don't want to compete with it.
53 : ///
54 : /// If donor(s) are choosen, theirs last_log_term is guaranteed to be equal
55 : /// to its last_log_term so we are sure such a leader ever had been elected.
56 : ///
57 : /// All possible donors are returned so that we could keep connection to the
58 : /// current one if it is good even if it slightly lags behind.
59 : ///
60 : /// Note that term conditions above might be not met, but safekeepers are
61 : /// still not aligned on last flush_lsn. Generally in this case until
62 : /// elections are run it is not possible to say which safekeeper should
63 : /// recover from which one -- history which would be committed is different
64 : /// depending on assembled quorum (e.g. classic picture 8 from Raft paper).
65 : /// Thus we don't try to predict it here.
66 5 : async fn recovery_needed(
67 5 : tli: &WalResidentTimeline,
68 5 : heartbeat_timeout: Duration,
69 5 : ) -> RecoveryNeededInfo {
70 5 : let ss = tli.read_shared_state().await;
71 5 : let term = ss.sk.state().acceptor_state.term;
72 5 : let last_log_term = ss.sk.last_log_term();
73 5 : let flush_lsn = ss.sk.flush_lsn();
74 5 : // note that peers contain myself, but that's ok -- we are interested only in peers which are strictly ahead of us.
75 5 : let mut peers = ss.get_peers(heartbeat_timeout);
76 5 : // Sort by <last log term, lsn> pairs.
77 5 : peers.sort_by(|p1, p2| {
78 0 : let tl1 = TermLsn {
79 0 : term: p1.last_log_term,
80 0 : lsn: p1.flush_lsn,
81 0 : };
82 0 : let tl2 = TermLsn {
83 0 : term: p2.last_log_term,
84 0 : lsn: p2.flush_lsn,
85 0 : };
86 0 : tl2.cmp(&tl1) // desc
87 5 : });
88 5 : let num_streaming_computes = tli.get_walreceivers().get_num_streaming();
89 5 : let donors = if num_streaming_computes > 0 {
90 0 : vec![] // If there is a streaming compute, don't try to recover to not intervene.
91 : } else {
92 5 : peers
93 5 : .iter()
94 5 : .filter_map(|candidate| {
95 0 : // Are we interested in this candidate?
96 0 : let candidate_tl = TermLsn {
97 0 : term: candidate.last_log_term,
98 0 : lsn: candidate.flush_lsn,
99 0 : };
100 0 : let my_tl = TermLsn {
101 0 : term: last_log_term,
102 0 : lsn: flush_lsn,
103 0 : };
104 0 : if my_tl < candidate_tl {
105 : // Yes, we are interested. Can we pull from it without
106 : // (re)running elections? It is possible if 1) his term
107 : // is equal to his last_log_term so we could act on
108 : // behalf of leader of this term (we must be sure he was
109 : // ever elected) and 2) our term is not higher, or we'll refuse data.
110 0 : if candidate.term == candidate.last_log_term && candidate.term >= term {
111 0 : Some(Donor::from(candidate))
112 : } else {
113 0 : None
114 : }
115 : } else {
116 0 : None
117 : }
118 5 : })
119 5 : .collect()
120 : };
121 5 : RecoveryNeededInfo {
122 5 : term,
123 5 : last_log_term,
124 5 : flush_lsn,
125 5 : peers,
126 5 : num_streaming_computes,
127 5 : donors,
128 5 : }
129 5 : }
130 : /// Result of Timeline::recovery_needed, contains donor(s) if recovery needed and
131 : /// fields to explain the choice.
132 : #[derive(Debug)]
133 : pub struct RecoveryNeededInfo {
134 : /// my term
135 : pub term: Term,
136 : /// my last_log_term
137 : pub last_log_term: Term,
138 : /// my flush_lsn
139 : pub flush_lsn: Lsn,
140 : /// peers from which we can fetch WAL, for observability.
141 : pub peers: Vec<PeerInfo>,
142 : /// for observability
143 : pub num_streaming_computes: usize,
144 : pub donors: Vec<Donor>,
145 : }
146 :
147 : // Custom to omit not important fields from PeerInfo.
148 : impl fmt::Display for RecoveryNeededInfo {
149 0 : fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
150 0 : write!(f, "{{")?;
151 0 : write!(
152 0 : f,
153 0 : "term: {}, last_log_term: {}, flush_lsn: {}, peers: {{",
154 0 : self.term, self.last_log_term, self.flush_lsn
155 0 : )?;
156 0 : for p in self.peers.iter() {
157 0 : write!(
158 0 : f,
159 0 : "PeerInfo {{ sk_id: {}, term: {}, last_log_term: {}, flush_lsn: {} }}, ",
160 0 : p.sk_id, p.term, p.last_log_term, p.flush_lsn
161 0 : )?;
162 : }
163 0 : write!(
164 0 : f,
165 0 : "}} num_streaming_computes: {}, donors: {:?}",
166 0 : self.num_streaming_computes, self.donors
167 0 : )
168 0 : }
169 : }
170 :
171 : #[derive(Clone, Debug)]
172 : pub struct Donor {
173 : pub sk_id: NodeId,
174 : /// equals to last_log_term
175 : pub term: Term,
176 : pub flush_lsn: Lsn,
177 : pub pg_connstr: String,
178 : pub http_connstr: String,
179 : }
180 :
181 : impl From<&PeerInfo> for Donor {
182 0 : fn from(p: &PeerInfo) -> Self {
183 0 : Donor {
184 0 : sk_id: p.sk_id,
185 0 : term: p.term,
186 0 : flush_lsn: p.flush_lsn,
187 0 : pg_connstr: p.pg_connstr.clone(),
188 0 : http_connstr: p.http_connstr.clone(),
189 0 : }
190 0 : }
191 : }
192 :
193 : const CHECK_INTERVAL_MS: u64 = 2000;
194 :
195 : /// Check regularly whether we need to start recovery.
196 5 : async fn recovery_main_loop(tli: WalResidentTimeline, conf: SafeKeeperConf) {
197 5 : let check_duration = Duration::from_millis(CHECK_INTERVAL_MS);
198 : loop {
199 5 : let recovery_needed_info = recovery_needed(&tli, conf.heartbeat_timeout).await;
200 5 : match recovery_needed_info.donors.first() {
201 0 : Some(donor) => {
202 0 : info!(
203 0 : "starting recovery from donor {}: {}",
204 : donor.sk_id, recovery_needed_info
205 : );
206 0 : let res = tli.wal_residence_guard().await;
207 0 : if let Err(e) = res {
208 0 : warn!("failed to obtain guard: {}", e);
209 0 : continue;
210 0 : }
211 0 : match recover(res.unwrap(), donor, &conf).await {
212 : // Note: 'write_wal rewrites WAL written before' error is
213 : // expected here and might happen if compute and recovery
214 : // concurrently write the same data. Eventually compute
215 : // should win.
216 0 : Err(e) => warn!("recovery failed: {:#}", e),
217 0 : Ok(msg) => info!("recovery finished: {}", msg),
218 : }
219 : }
220 : None => {
221 5 : trace!(
222 0 : "recovery not needed or not possible: {}",
223 : recovery_needed_info
224 : );
225 : }
226 : }
227 5 : sleep(check_duration).await;
228 : }
229 : }
230 :
231 : /// Recover from the specified donor. Returns message explaining normal finish
232 : /// reason or error.
233 0 : async fn recover(
234 0 : tli: WalResidentTimeline,
235 0 : donor: &Donor,
236 0 : conf: &SafeKeeperConf,
237 0 : ) -> anyhow::Result<String> {
238 0 : // Learn donor term switch history to figure out starting point.
239 0 : let client = reqwest::Client::new();
240 0 : let timeline_info: TimelineStatus = client
241 0 : .get(format!(
242 0 : "http://{}/v1/tenant/{}/timeline/{}",
243 0 : donor.http_connstr, tli.ttid.tenant_id, tli.ttid.timeline_id
244 0 : ))
245 0 : .send()
246 0 : .await?
247 0 : .json()
248 0 : .await?;
249 0 : if timeline_info.acceptor_state.term != donor.term {
250 0 : bail!(
251 0 : "donor term changed from {} to {}",
252 0 : donor.term,
253 0 : timeline_info.acceptor_state.term
254 0 : );
255 0 : }
256 0 : // convert from API TermSwitchApiEntry into TermLsn.
257 0 : let donor_th = TermHistory(
258 0 : timeline_info
259 0 : .acceptor_state
260 0 : .term_history
261 0 : .iter()
262 0 : .map(|tl| Into::<TermLsn>::into(*tl))
263 0 : .collect(),
264 0 : );
265 0 :
266 0 : // Now understand our term history.
267 0 : let vote_request = ProposerAcceptorMessage::VoteRequest(VoteRequest {
268 0 : generation: INVALID_GENERATION,
269 0 : term: donor.term,
270 0 : });
271 0 : let vote_response = match tli
272 0 : .process_msg(&vote_request)
273 0 : .await
274 0 : .context("VoteRequest handling")?
275 : {
276 0 : Some(AcceptorProposerMessage::VoteResponse(vr)) => vr,
277 : _ => {
278 0 : bail!("unexpected VoteRequest response"); // unreachable
279 : }
280 : };
281 0 : if vote_response.term != donor.term {
282 0 : bail!(
283 0 : "our term changed from {} to {}",
284 0 : donor.term,
285 0 : vote_response.term
286 0 : );
287 0 : }
288 :
289 0 : let last_common_point = match TermHistory::find_highest_common_point(
290 0 : &donor_th,
291 0 : &vote_response.term_history,
292 0 : vote_response.flush_lsn,
293 0 : ) {
294 0 : None => bail!(
295 0 : "couldn't find common point in histories, donor {:?}, sk {:?}",
296 0 : donor_th,
297 0 : vote_response.term_history,
298 0 : ),
299 0 : Some(lcp) => lcp,
300 0 : };
301 0 : info!("found last common point at {:?}", last_common_point);
302 :
303 : // truncate WAL locally
304 0 : let pe = ProposerAcceptorMessage::Elected(ProposerElected {
305 0 : generation: INVALID_GENERATION,
306 0 : term: donor.term,
307 0 : start_streaming_at: last_common_point.lsn,
308 0 : term_history: donor_th,
309 0 : });
310 0 : // Successful ProposerElected handling always returns None. If term changed,
311 0 : // we'll find out that during the streaming. Note: it is expected to get
312 0 : // 'refusing to overwrite correct WAL' here if walproposer reconnected
313 0 : // concurrently, restart helps here.
314 0 : tli.process_msg(&pe)
315 0 : .await
316 0 : .context("ProposerElected handling")?;
317 :
318 0 : recovery_stream(tli, donor, last_common_point.lsn, conf).await
319 0 : }
320 :
321 : // Pull WAL from donor, assuming handshake is already done.
322 0 : async fn recovery_stream(
323 0 : tli: WalResidentTimeline,
324 0 : donor: &Donor,
325 0 : start_streaming_at: Lsn,
326 0 : conf: &SafeKeeperConf,
327 0 : ) -> anyhow::Result<String> {
328 0 : // TODO: pass auth token
329 0 : let connection_conf_args = ConnectionConfigArgs {
330 0 : protocol: PostgresClientProtocol::Vanilla,
331 0 : ttid: tli.ttid,
332 0 : shard_number: None,
333 0 : shard_count: None,
334 0 : shard_stripe_size: None,
335 0 : listen_pg_addr_str: &donor.pg_connstr,
336 0 : auth_token: None,
337 0 : availability_zone: None,
338 0 : };
339 0 : let cfg = wal_stream_connection_config(connection_conf_args)?;
340 0 : let mut cfg = cfg.to_tokio_postgres_config();
341 0 : // It will make safekeeper give out not committed WAL (up to flush_lsn).
342 0 : cfg.application_name(&format!("safekeeper_{}", conf.my_id));
343 0 : cfg.replication_mode(tokio_postgres::config::ReplicationMode::Physical);
344 0 :
345 0 : let connect_timeout = Duration::from_millis(10000);
346 0 : let (client, connection) = match time::timeout(
347 0 : connect_timeout,
348 0 : cfg.connect(tokio_postgres::NoTls),
349 0 : )
350 0 : .await
351 : {
352 0 : Ok(client_and_conn) => client_and_conn?,
353 0 : Err(_elapsed) => {
354 0 : bail!(
355 0 : "timed out while waiting {connect_timeout:?} for connection to peer safekeeper to open"
356 0 : );
357 : }
358 : };
359 0 : trace!("connected to {:?}", donor);
360 :
361 : // The connection object performs the actual communication with the
362 : // server, spawn it off to run on its own.
363 0 : let ttid = tli.ttid;
364 0 : tokio::spawn(async move {
365 0 : if let Err(e) = connection
366 0 : .instrument(info_span!("recovery task connection poll", ttid = %ttid))
367 0 : .await
368 : {
369 : // This logging isn't very useful as error is anyway forwarded to client.
370 0 : trace!(
371 0 : "tokio_postgres connection object finished with error: {}",
372 : e
373 : );
374 0 : }
375 0 : });
376 0 :
377 0 : let query = format!(
378 0 : "START_REPLICATION PHYSICAL {} (term='{}')",
379 0 : start_streaming_at, donor.term
380 0 : );
381 :
382 0 : let copy_stream = client.copy_both_simple(&query).await?;
383 0 : let physical_stream = ReplicationStream::new(copy_stream);
384 0 :
385 0 : // As in normal walreceiver, do networking and writing to disk in parallel.
386 0 : let (msg_tx, msg_rx) = channel(MSG_QUEUE_SIZE);
387 0 : let (reply_tx, reply_rx) = channel(REPLY_QUEUE_SIZE);
388 0 : let wa = WalAcceptor::spawn(tli.wal_residence_guard().await?, msg_rx, reply_tx, None);
389 :
390 0 : let res = tokio::select! {
391 0 : r = network_io(physical_stream, msg_tx, donor.clone(), tli, conf.clone()) => r,
392 0 : r = read_replies(reply_rx, donor.term) => r.map(|()| None),
393 : };
394 :
395 : // Join the spawned WalAcceptor. At this point chans to/from it passed to
396 : // network routines are dropped, so it will exit as soon as it touches them.
397 0 : match wa.await {
398 : Ok(Ok(())) => {
399 : // WalAcceptor finished normally, termination reason is different
400 0 : match res {
401 0 : Ok(Some(success_desc)) => Ok(success_desc),
402 0 : Ok(None) => bail!("unexpected recovery end without error/success"), // can't happen
403 0 : Err(e) => Err(e), // network error or term change
404 : }
405 : }
406 0 : Ok(Err(e)) => Err(e), // error while processing message
407 0 : Err(e) => bail!("WalAcceptor panicked: {}", e),
408 : }
409 0 : }
410 :
411 : // Perform network part of streaming: read data and push it to msg_tx, send KA
412 : // to make sender hear from us. If there is nothing coming for a while, check
413 : // for termination.
414 : // Returns
415 : // - Ok(None) if channel to WalAcceptor closed -- its task should return error.
416 : // - Ok(Some(String)) if recovery successfully completed.
417 : // - Err if error happened while reading/writing to socket.
418 0 : async fn network_io(
419 0 : physical_stream: ReplicationStream,
420 0 : msg_tx: Sender<ProposerAcceptorMessage>,
421 0 : donor: Donor,
422 0 : tli: WalResidentTimeline,
423 0 : conf: SafeKeeperConf,
424 0 : ) -> anyhow::Result<Option<String>> {
425 0 : let mut physical_stream = pin!(physical_stream);
426 0 : let mut last_received_lsn = Lsn::INVALID;
427 0 : // tear down connection if no data arrives withing this period
428 0 : let no_data_timeout = Duration::from_millis(30000);
429 :
430 : loop {
431 0 : let msg = match timeout(no_data_timeout, physical_stream.next()).await {
432 0 : Ok(next) => match next {
433 0 : None => bail!("unexpected end of replication stream"),
434 0 : Some(msg) => msg.context("get replication message")?,
435 : },
436 0 : Err(_) => bail!("no message received within {:?}", no_data_timeout),
437 : };
438 :
439 0 : match msg {
440 0 : ReplicationMessage::XLogData(xlog_data) => {
441 0 : let ar_hdr = AppendRequestHeader {
442 0 : generation: INVALID_GENERATION,
443 0 : term: donor.term,
444 0 : begin_lsn: Lsn(xlog_data.wal_start()),
445 0 : end_lsn: Lsn(xlog_data.wal_start()) + xlog_data.data().len() as u64,
446 0 : commit_lsn: Lsn::INVALID, // do not attempt to advance, peer communication anyway does it
447 0 : truncate_lsn: Lsn::INVALID, // do not attempt to advance
448 0 : };
449 0 : let ar = AppendRequest {
450 0 : h: ar_hdr,
451 0 : wal_data: xlog_data.into_data(),
452 0 : };
453 0 : trace!(
454 0 : "processing AppendRequest {}-{}, len {}",
455 0 : ar.h.begin_lsn,
456 0 : ar.h.end_lsn,
457 0 : ar.wal_data.len()
458 : );
459 0 : last_received_lsn = ar.h.end_lsn;
460 0 : if msg_tx
461 0 : .send(ProposerAcceptorMessage::AppendRequest(ar))
462 0 : .await
463 0 : .is_err()
464 : {
465 0 : return Ok(None); // chan closed, WalAcceptor terminated
466 0 : }
467 : }
468 : ReplicationMessage::PrimaryKeepAlive(_) => {
469 : // keepalive means nothing is being streamed for a while. Check whether we need to stop.
470 0 : let recovery_needed_info = recovery_needed(&tli, conf.heartbeat_timeout).await;
471 : // do current donors still contain one we currently connected to?
472 0 : if !recovery_needed_info
473 0 : .donors
474 0 : .iter()
475 0 : .any(|d| d.sk_id == donor.sk_id)
476 : {
477 : // Most likely it means we are caughtup.
478 : // note: just exiting makes tokio_postgres send CopyFail to the far end.
479 0 : return Ok(Some(format!(
480 0 : "terminating at {} as connected safekeeper {} with term {} is not a donor anymore: {}",
481 0 : last_received_lsn, donor.sk_id, donor.term, recovery_needed_info
482 0 : )));
483 0 : }
484 : }
485 0 : _ => {}
486 : }
487 : // Send reply to each message to keep connection alive. Ideally we
488 : // should do that once in a while instead, but this again requires
489 : // stream split or similar workaround, and recovery is anyway not that
490 : // performance critical.
491 : //
492 : // We do not know here real write/flush LSNs (need to take mutex again
493 : // or check replies which are read in different future), but neither
494 : // sender much cares about them, so just send last received.
495 0 : physical_stream
496 0 : .as_mut()
497 0 : .standby_status_update(
498 0 : PgLsn::from(last_received_lsn.0),
499 0 : PgLsn::from(last_received_lsn.0),
500 0 : PgLsn::from(last_received_lsn.0),
501 0 : SystemTime::now(),
502 0 : 0,
503 0 : )
504 0 : .await?;
505 : }
506 0 : }
507 :
508 : // Read replies from WalAcceptor. We are not interested much in sending them to
509 : // donor safekeeper, so don't route them anywhere. However, we should check if
510 : // term changes and exit if it does.
511 : // Returns Ok(()) if channel closed, Err in case of term change.
512 0 : async fn read_replies(
513 0 : mut reply_rx: Receiver<AcceptorProposerMessage>,
514 0 : donor_term: Term,
515 0 : ) -> anyhow::Result<()> {
516 : loop {
517 0 : match reply_rx.recv().await {
518 0 : Some(msg) => {
519 0 : if let AcceptorProposerMessage::AppendResponse(ar) = msg {
520 0 : if ar.term != donor_term {
521 0 : bail!("donor term changed from {} to {}", donor_term, ar.term);
522 0 : }
523 0 : }
524 : }
525 0 : None => return Ok(()), // chan closed, WalAcceptor terminated
526 : }
527 : }
528 0 : }
|