Line data Source code
1 : //! Acceptor part of proposer-acceptor consensus algorithm.
2 :
3 : use anyhow::{bail, Context, Result};
4 : use byteorder::{LittleEndian, ReadBytesExt};
5 : use bytes::{Buf, BufMut, Bytes, BytesMut};
6 :
7 : use postgres_ffi::{TimeLineID, MAX_SEND_SIZE};
8 : use serde::{Deserialize, Serialize};
9 : use std::cmp::max;
10 : use std::cmp::min;
11 : use std::fmt;
12 : use std::io::Read;
13 : use std::time::Duration;
14 : use storage_broker::proto::SafekeeperTimelineInfo;
15 :
16 : use tracing::*;
17 :
18 : use crate::control_file;
19 : use crate::send_wal::HotStandbyFeedback;
20 :
21 : use crate::state::TimelineState;
22 : use crate::wal_storage;
23 : use pq_proto::SystemId;
24 : use utils::pageserver_feedback::PageserverFeedback;
25 : use utils::{
26 : bin_ser::LeSer,
27 : id::{NodeId, TenantId, TimelineId},
28 : lsn::Lsn,
29 : };
30 :
31 : const SK_PROTOCOL_VERSION: u32 = 2;
32 : pub const UNKNOWN_SERVER_VERSION: u32 = 0;
33 :
34 : /// Consensus logical timestamp.
35 : pub type Term = u64;
36 : pub const INVALID_TERM: Term = 0;
37 :
38 8 : #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
39 : pub struct TermLsn {
40 : pub term: Term,
41 : pub lsn: Lsn,
42 : }
43 :
44 : // Creation from tuple provides less typing (e.g. for unit tests).
45 : impl From<(Term, Lsn)> for TermLsn {
46 36 : fn from(pair: (Term, Lsn)) -> TermLsn {
47 36 : TermLsn {
48 36 : term: pair.0,
49 36 : lsn: pair.1,
50 36 : }
51 36 : }
52 : }
53 :
54 12 : #[derive(Clone, Serialize, Deserialize, PartialEq)]
55 : pub struct TermHistory(pub Vec<TermLsn>);
56 :
57 : impl TermHistory {
58 11404 : pub fn empty() -> TermHistory {
59 11404 : TermHistory(Vec::new())
60 11404 : }
61 :
62 : // Parse TermHistory as n_entries followed by TermLsn pairs
63 7192 : pub fn from_bytes(bytes: &mut Bytes) -> Result<TermHistory> {
64 7192 : if bytes.remaining() < 4 {
65 0 : bail!("TermHistory misses len");
66 7192 : }
67 7192 : let n_entries = bytes.get_u32_le();
68 7192 : let mut res = Vec::with_capacity(n_entries as usize);
69 7192 : for _ in 0..n_entries {
70 67807 : if bytes.remaining() < 16 {
71 0 : bail!("TermHistory is incomplete");
72 67807 : }
73 67807 : res.push(TermLsn {
74 67807 : term: bytes.get_u64_le(),
75 67807 : lsn: bytes.get_u64_le().into(),
76 67807 : })
77 : }
78 7192 : Ok(TermHistory(res))
79 7192 : }
80 :
81 : /// Return copy of self with switches happening strictly after up_to
82 : /// truncated.
83 32426 : pub fn up_to(&self, up_to: Lsn) -> TermHistory {
84 32426 : let mut res = Vec::with_capacity(self.0.len());
85 202085 : for e in &self.0 {
86 169700 : if e.lsn > up_to {
87 41 : break;
88 169659 : }
89 169659 : res.push(*e);
90 : }
91 32426 : TermHistory(res)
92 32426 : }
93 :
94 : /// Find point of divergence between leader (walproposer) term history and
95 : /// safekeeper. Arguments are not symmetrics as proposer history ends at
96 : /// +infinity while safekeeper at flush_lsn.
97 : /// C version is at walproposer SendProposerElected.
98 8 : pub fn find_highest_common_point(
99 8 : prop_th: &TermHistory,
100 8 : sk_th: &TermHistory,
101 8 : sk_wal_end: Lsn,
102 8 : ) -> Option<TermLsn> {
103 8 : let (prop_th, sk_th) = (&prop_th.0, &sk_th.0); // avoid .0 below
104 :
105 8 : if let Some(sk_th_last) = sk_th.last() {
106 8 : assert!(
107 8 : sk_th_last.lsn <= sk_wal_end,
108 0 : "safekeeper term history end {:?} LSN is higher than WAL end {:?}",
109 : sk_th_last,
110 : sk_wal_end
111 : );
112 0 : }
113 :
114 : // find last common term, if any...
115 8 : let mut last_common_idx = None;
116 16 : for i in 0..min(sk_th.len(), prop_th.len()) {
117 16 : if prop_th[i].term != sk_th[i].term {
118 4 : break;
119 12 : }
120 12 : // If term is the same, LSN must be equal as well.
121 12 : assert!(
122 12 : prop_th[i].lsn == sk_th[i].lsn,
123 0 : "same term {} has different start LSNs: prop {}, sk {}",
124 0 : prop_th[i].term,
125 0 : prop_th[i].lsn,
126 0 : sk_th[i].lsn
127 : );
128 12 : last_common_idx = Some(i);
129 : }
130 8 : let last_common_idx = match last_common_idx {
131 2 : None => return None, // no common point
132 6 : Some(lci) => lci,
133 6 : };
134 6 : // Now find where it ends at both prop and sk and take min. End of
135 6 : // (common) term is the start of the next except it is the last one;
136 6 : // there it is flush_lsn in case of safekeeper or, in case of proposer
137 6 : // +infinity, so we just take flush_lsn then.
138 6 : if last_common_idx == prop_th.len() - 1 {
139 2 : Some(TermLsn {
140 2 : term: prop_th[last_common_idx].term,
141 2 : lsn: sk_wal_end,
142 2 : })
143 : } else {
144 4 : let prop_common_term_end = prop_th[last_common_idx + 1].lsn;
145 4 : let sk_common_term_end = if last_common_idx + 1 < sk_th.len() {
146 2 : sk_th[last_common_idx + 1].lsn
147 : } else {
148 2 : sk_wal_end
149 : };
150 4 : Some(TermLsn {
151 4 : term: prop_th[last_common_idx].term,
152 4 : lsn: min(prop_common_term_end, sk_common_term_end),
153 4 : })
154 : }
155 8 : }
156 : }
157 :
158 : /// Display only latest entries for Debug.
159 : impl fmt::Debug for TermHistory {
160 400 : fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
161 400 : let n_printed = 20;
162 400 : write!(
163 400 : fmt,
164 400 : "{}{:?}",
165 400 : if self.0.len() > n_printed { "... " } else { "" },
166 400 : self.0
167 400 : .iter()
168 400 : .rev()
169 400 : .take(n_printed)
170 2216 : .map(|&e| (e.term, e.lsn)) // omit TermSwitchEntry
171 400 : .collect::<Vec<_>>()
172 400 : )
173 400 : }
174 : }
175 :
176 : /// Unique id of proposer. Not needed for correctness, used for monitoring.
177 : pub type PgUuid = [u8; 16];
178 :
179 : /// Persistent consensus state of the acceptor.
180 12 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
181 : pub struct AcceptorState {
182 : /// acceptor's last term it voted for (advanced in 1 phase)
183 : pub term: Term,
184 : /// History of term switches for safekeeper's WAL.
185 : /// Actually it often goes *beyond* WAL contents as we adopt term history
186 : /// from the proposer before recovery.
187 : pub term_history: TermHistory,
188 : }
189 :
190 : impl AcceptorState {
191 : /// acceptor's epoch is the term of the highest entry in the log
192 7198 : pub fn get_epoch(&self, flush_lsn: Lsn) -> Term {
193 7198 : let th = self.term_history.up_to(flush_lsn);
194 7198 : match th.0.last() {
195 6085 : Some(e) => e.term,
196 1113 : None => 0,
197 : }
198 7198 : }
199 : }
200 :
201 : /// Information about Postgres. Safekeeper gets it once and then verifies
202 : /// all further connections from computes match.
203 8 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
204 : pub struct ServerInfo {
205 : /// Postgres server version
206 : pub pg_version: u32,
207 : pub system_id: SystemId,
208 : pub wal_seg_size: u32,
209 : }
210 :
211 4 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
212 : pub struct PersistedPeerInfo {
213 : /// LSN up to which safekeeper offloaded WAL to s3.
214 : pub backup_lsn: Lsn,
215 : /// Term of the last entry.
216 : pub term: Term,
217 : /// LSN of the last record.
218 : pub flush_lsn: Lsn,
219 : /// Up to which LSN safekeeper regards its WAL as committed.
220 : pub commit_lsn: Lsn,
221 : }
222 :
223 : impl PersistedPeerInfo {
224 0 : pub fn new() -> Self {
225 0 : Self {
226 0 : backup_lsn: Lsn::INVALID,
227 0 : term: INVALID_TERM,
228 0 : flush_lsn: Lsn(0),
229 0 : commit_lsn: Lsn(0),
230 0 : }
231 0 : }
232 : }
233 :
234 : // make clippy happy
235 : impl Default for PersistedPeerInfo {
236 0 : fn default() -> Self {
237 0 : Self::new()
238 0 : }
239 : }
240 :
241 : // protocol messages
242 :
243 : /// Initial Proposer -> Acceptor message
244 155897 : #[derive(Debug, Deserialize)]
245 : pub struct ProposerGreeting {
246 : /// proposer-acceptor protocol version
247 : pub protocol_version: u32,
248 : /// Postgres server version
249 : pub pg_version: u32,
250 : pub proposer_id: PgUuid,
251 : pub system_id: SystemId,
252 : pub timeline_id: TimelineId,
253 : pub tenant_id: TenantId,
254 : pub tli: TimeLineID,
255 : pub wal_seg_size: u32,
256 : }
257 :
258 : /// Acceptor -> Proposer initial response: the highest term known to me
259 : /// (acceptor voted for).
260 : #[derive(Debug, Serialize)]
261 : pub struct AcceptorGreeting {
262 : term: u64,
263 : node_id: NodeId,
264 : }
265 :
266 : /// Vote request sent from proposer to safekeepers
267 25224 : #[derive(Debug, Deserialize)]
268 : pub struct VoteRequest {
269 : pub term: Term,
270 : }
271 :
272 : /// Vote itself, sent from safekeeper to proposer
273 : #[derive(Debug, Serialize)]
274 : pub struct VoteResponse {
275 : pub term: Term, // safekeeper's current term; if it is higher than proposer's, the compute is out of date.
276 : vote_given: u64, // fixme u64 due to padding
277 : // Safekeeper flush_lsn (end of WAL) + history of term switches allow
278 : // proposer to choose the most advanced one.
279 : pub flush_lsn: Lsn,
280 : truncate_lsn: Lsn,
281 : pub term_history: TermHistory,
282 : timeline_start_lsn: Lsn,
283 : }
284 :
285 : /*
286 : * Proposer -> Acceptor message announcing proposer is elected and communicating
287 : * term history to it.
288 : */
289 : #[derive(Debug)]
290 : pub struct ProposerElected {
291 : pub term: Term,
292 : pub start_streaming_at: Lsn,
293 : pub term_history: TermHistory,
294 : pub timeline_start_lsn: Lsn,
295 : }
296 :
297 : /// Request with WAL message sent from proposer to safekeeper. Along the way it
298 : /// communicates commit_lsn.
299 : #[derive(Debug)]
300 : pub struct AppendRequest {
301 : pub h: AppendRequestHeader,
302 : pub wal_data: Bytes,
303 : }
304 23974 : #[derive(Debug, Clone, Deserialize)]
305 : pub struct AppendRequestHeader {
306 : // safekeeper's current term; if it is higher than proposer's, the compute is out of date.
307 : pub term: Term,
308 : // TODO: remove this field, it in unused -- LSN of term switch can be taken
309 : // from ProposerElected (as well as from term history).
310 : pub epoch_start_lsn: Lsn,
311 : /// start position of message in WAL
312 : pub begin_lsn: Lsn,
313 : /// end position of message in WAL
314 : pub end_lsn: Lsn,
315 : /// LSN committed by quorum of safekeepers
316 : pub commit_lsn: Lsn,
317 : /// minimal LSN which may be needed by proposer to perform recovery of some safekeeper
318 : pub truncate_lsn: Lsn,
319 : // only for logging/debugging
320 : pub proposer_uuid: PgUuid,
321 : }
322 :
323 : /// Report safekeeper state to proposer
324 : #[derive(Debug, Serialize, Clone)]
325 : pub struct AppendResponse {
326 : // Current term of the safekeeper; if it is higher than proposer's, the
327 : // compute is out of date.
328 : pub term: Term,
329 : // NOTE: this is physical end of wal on safekeeper; currently it doesn't
330 : // make much sense without taking epoch into account, as history can be
331 : // diverged.
332 : pub flush_lsn: Lsn,
333 : // We report back our awareness about which WAL is committed, as this is
334 : // a criterion for walproposer --sync mode exit
335 : pub commit_lsn: Lsn,
336 : pub hs_feedback: HotStandbyFeedback,
337 : pub pageserver_feedback: Option<PageserverFeedback>,
338 : }
339 :
340 : impl AppendResponse {
341 0 : fn term_only(term: Term) -> AppendResponse {
342 0 : AppendResponse {
343 0 : term,
344 0 : flush_lsn: Lsn(0),
345 0 : commit_lsn: Lsn(0),
346 0 : hs_feedback: HotStandbyFeedback::empty(),
347 0 : pageserver_feedback: None,
348 0 : }
349 0 : }
350 : }
351 :
352 : /// Proposer -> Acceptor messages
353 : #[derive(Debug)]
354 : pub enum ProposerAcceptorMessage {
355 : Greeting(ProposerGreeting),
356 : VoteRequest(VoteRequest),
357 : Elected(ProposerElected),
358 : AppendRequest(AppendRequest),
359 : NoFlushAppendRequest(AppendRequest),
360 : FlushWAL,
361 : }
362 :
363 : impl ProposerAcceptorMessage {
364 : /// Parse proposer message.
365 212287 : pub fn parse(msg_bytes: Bytes) -> Result<ProposerAcceptorMessage> {
366 212287 : // xxx using Reader is inefficient but easy to work with bincode
367 212287 : let mut stream = msg_bytes.reader();
368 : // u64 is here to avoid padding; it will be removed once we stop packing C structs into the wire as is
369 212287 : let tag = stream.read_u64::<LittleEndian>()? as u8 as char;
370 212287 : match tag {
371 : 'g' => {
372 155897 : let msg = ProposerGreeting::des_from(&mut stream)?;
373 155897 : Ok(ProposerAcceptorMessage::Greeting(msg))
374 : }
375 : 'v' => {
376 25224 : let msg = VoteRequest::des_from(&mut stream)?;
377 25224 : Ok(ProposerAcceptorMessage::VoteRequest(msg))
378 : }
379 : 'e' => {
380 7192 : let mut msg_bytes = stream.into_inner();
381 7192 : if msg_bytes.remaining() < 16 {
382 0 : bail!("ProposerElected message is not complete");
383 7192 : }
384 7192 : let term = msg_bytes.get_u64_le();
385 7192 : let start_streaming_at = msg_bytes.get_u64_le().into();
386 7192 : let term_history = TermHistory::from_bytes(&mut msg_bytes)?;
387 7192 : if msg_bytes.remaining() < 8 {
388 0 : bail!("ProposerElected message is not complete");
389 7192 : }
390 7192 : let timeline_start_lsn = msg_bytes.get_u64_le().into();
391 7192 : let msg = ProposerElected {
392 7192 : term,
393 7192 : start_streaming_at,
394 7192 : timeline_start_lsn,
395 7192 : term_history,
396 7192 : };
397 7192 : Ok(ProposerAcceptorMessage::Elected(msg))
398 : }
399 : 'a' => {
400 : // read header followed by wal data
401 23974 : let hdr = AppendRequestHeader::des_from(&mut stream)?;
402 23974 : let rec_size = hdr
403 23974 : .end_lsn
404 23974 : .checked_sub(hdr.begin_lsn)
405 23974 : .context("begin_lsn > end_lsn in AppendRequest")?
406 : .0 as usize;
407 23974 : if rec_size > MAX_SEND_SIZE {
408 0 : bail!(
409 0 : "AppendRequest is longer than MAX_SEND_SIZE ({})",
410 0 : MAX_SEND_SIZE
411 0 : );
412 23974 : }
413 23974 :
414 23974 : let mut wal_data_vec: Vec<u8> = vec![0; rec_size];
415 23974 : stream.read_exact(&mut wal_data_vec)?;
416 23974 : let wal_data = Bytes::from(wal_data_vec);
417 23974 : let msg = AppendRequest { h: hdr, wal_data };
418 23974 :
419 23974 : Ok(ProposerAcceptorMessage::AppendRequest(msg))
420 : }
421 0 : _ => bail!("unknown proposer-acceptor message tag: {}", tag),
422 : }
423 212287 : }
424 : }
425 :
426 : /// Acceptor -> Proposer messages
427 : #[derive(Debug)]
428 : pub enum AcceptorProposerMessage {
429 : Greeting(AcceptorGreeting),
430 : VoteResponse(VoteResponse),
431 : AppendResponse(AppendResponse),
432 : }
433 :
434 : impl AcceptorProposerMessage {
435 : /// Serialize acceptor -> proposer message.
436 200927 : pub fn serialize(&self, buf: &mut BytesMut) -> Result<()> {
437 200927 : match self {
438 155897 : AcceptorProposerMessage::Greeting(msg) => {
439 155897 : buf.put_u64_le('g' as u64);
440 155897 : buf.put_u64_le(msg.term);
441 155897 : buf.put_u64_le(msg.node_id.0);
442 155897 : }
443 25224 : AcceptorProposerMessage::VoteResponse(msg) => {
444 25224 : buf.put_u64_le('v' as u64);
445 25224 : buf.put_u64_le(msg.term);
446 25224 : buf.put_u64_le(msg.vote_given);
447 25224 : buf.put_u64_le(msg.flush_lsn.into());
448 25224 : buf.put_u64_le(msg.truncate_lsn.into());
449 25224 : buf.put_u32_le(msg.term_history.0.len() as u32);
450 134911 : for e in &msg.term_history.0 {
451 109687 : buf.put_u64_le(e.term);
452 109687 : buf.put_u64_le(e.lsn.into());
453 109687 : }
454 25224 : buf.put_u64_le(msg.timeline_start_lsn.into());
455 : }
456 19806 : AcceptorProposerMessage::AppendResponse(msg) => {
457 19806 : buf.put_u64_le('a' as u64);
458 19806 : buf.put_u64_le(msg.term);
459 19806 : buf.put_u64_le(msg.flush_lsn.into());
460 19806 : buf.put_u64_le(msg.commit_lsn.into());
461 19806 : buf.put_i64_le(msg.hs_feedback.ts);
462 19806 : buf.put_u64_le(msg.hs_feedback.xmin);
463 19806 : buf.put_u64_le(msg.hs_feedback.catalog_xmin);
464 :
465 : // AsyncReadMessage in walproposer.c will not try to decode pageserver_feedback
466 : // if it is not present.
467 19806 : if let Some(ref msg) = msg.pageserver_feedback {
468 0 : msg.serialize(buf);
469 19806 : }
470 : }
471 : }
472 :
473 200927 : Ok(())
474 200927 : }
475 : }
476 :
477 : /// Safekeeper implements consensus to reliably persist WAL across nodes.
478 : /// It controls all WAL disk writes and updates of control file.
479 : ///
480 : /// Currently safekeeper processes:
481 : /// - messages from compute (proposers) and provides replies
482 : /// - messages from broker peers
483 : pub struct SafeKeeper<CTRL: control_file::Storage, WAL: wal_storage::Storage> {
484 : /// LSN since the proposer safekeeper currently talking to appends WAL;
485 : /// determines epoch switch point.
486 : pub epoch_start_lsn: Lsn,
487 :
488 : pub state: TimelineState<CTRL>, // persistent state storage
489 : pub wal_store: WAL,
490 :
491 : node_id: NodeId, // safekeeper's node id
492 : }
493 :
494 : impl<CTRL, WAL> SafeKeeper<CTRL, WAL>
495 : where
496 : CTRL: control_file::Storage,
497 : WAL: wal_storage::Storage,
498 : {
499 : /// Accepts a control file storage containing the safekeeper state.
500 : /// State must be initialized, i.e. contain filled `tenant_id`, `timeline_id`
501 : /// and `server` (`wal_seg_size` inside it) fields.
502 68990 : pub fn new(state: CTRL, wal_store: WAL, node_id: NodeId) -> Result<SafeKeeper<CTRL, WAL>> {
503 68990 : if state.tenant_id == TenantId::from([0u8; 16])
504 68990 : || state.timeline_id == TimelineId::from([0u8; 16])
505 : {
506 0 : bail!(
507 0 : "Calling SafeKeeper::new with empty tenant_id ({}) or timeline_id ({})",
508 0 : state.tenant_id,
509 0 : state.timeline_id
510 0 : );
511 68990 : }
512 68990 :
513 68990 : Ok(SafeKeeper {
514 68990 : epoch_start_lsn: Lsn(0),
515 68990 : state: TimelineState::new(state),
516 68990 : wal_store,
517 68990 : node_id,
518 68990 : })
519 68990 : }
520 :
521 : /// Get history of term switches for the available WAL
522 25228 : fn get_term_history(&self) -> TermHistory {
523 25228 : self.state
524 25228 : .acceptor_state
525 25228 : .term_history
526 25228 : .up_to(self.flush_lsn())
527 25228 : }
528 :
529 : /// Get current term.
530 0 : pub fn get_term(&self) -> Term {
531 0 : self.state.acceptor_state.term
532 0 : }
533 :
534 7198 : pub fn get_epoch(&self) -> Term {
535 7198 : self.state.acceptor_state.get_epoch(self.flush_lsn())
536 7198 : }
537 :
538 : /// wal_store wrapper avoiding commit_lsn <= flush_lsn violation when we don't have WAL yet.
539 89963 : pub fn flush_lsn(&self) -> Lsn {
540 89963 : max(self.wal_store.flush_lsn(), self.state.timeline_start_lsn)
541 89963 : }
542 :
543 : /// Process message from proposer and possibly form reply. Concurrent
544 : /// callers must exclude each other.
545 232103 : pub async fn process_msg(
546 232103 : &mut self,
547 232103 : msg: &ProposerAcceptorMessage,
548 232103 : ) -> Result<Option<AcceptorProposerMessage>> {
549 232103 : match msg {
550 155897 : ProposerAcceptorMessage::Greeting(msg) => self.handle_greeting(msg).await,
551 25228 : ProposerAcceptorMessage::VoteRequest(msg) => self.handle_vote_request(msg).await,
552 7194 : ProposerAcceptorMessage::Elected(msg) => self.handle_elected(msg).await,
553 4 : ProposerAcceptorMessage::AppendRequest(msg) => {
554 4 : self.handle_append_request(msg, true).await
555 : }
556 23974 : ProposerAcceptorMessage::NoFlushAppendRequest(msg) => {
557 23974 : self.handle_append_request(msg, false).await
558 : }
559 19806 : ProposerAcceptorMessage::FlushWAL => self.handle_flush().await,
560 : }
561 232103 : }
562 :
563 : /// Handle initial message from proposer: check its sanity and send my
564 : /// current term.
565 155897 : async fn handle_greeting(
566 155897 : &mut self,
567 155897 : msg: &ProposerGreeting,
568 155897 : ) -> Result<Option<AcceptorProposerMessage>> {
569 155897 : // Check protocol compatibility
570 155897 : if msg.protocol_version != SK_PROTOCOL_VERSION {
571 0 : bail!(
572 0 : "incompatible protocol version {}, expected {}",
573 0 : msg.protocol_version,
574 0 : SK_PROTOCOL_VERSION
575 0 : );
576 155897 : }
577 155897 : /* Postgres major version mismatch is treated as fatal error
578 155897 : * because safekeepers parse WAL headers and the format
579 155897 : * may change between versions.
580 155897 : */
581 155897 : if msg.pg_version / 10000 != self.state.server.pg_version / 10000
582 0 : && self.state.server.pg_version != UNKNOWN_SERVER_VERSION
583 : {
584 0 : bail!(
585 0 : "incompatible server version {}, expected {}",
586 0 : msg.pg_version,
587 0 : self.state.server.pg_version
588 0 : );
589 155897 : }
590 155897 :
591 155897 : if msg.tenant_id != self.state.tenant_id {
592 0 : bail!(
593 0 : "invalid tenant ID, got {}, expected {}",
594 0 : msg.tenant_id,
595 0 : self.state.tenant_id
596 0 : );
597 155897 : }
598 155897 : if msg.timeline_id != self.state.timeline_id {
599 0 : bail!(
600 0 : "invalid timeline ID, got {}, expected {}",
601 0 : msg.timeline_id,
602 0 : self.state.timeline_id
603 0 : );
604 155897 : }
605 155897 : if self.state.server.wal_seg_size != msg.wal_seg_size {
606 0 : bail!(
607 0 : "invalid wal_seg_size, got {}, expected {}",
608 0 : msg.wal_seg_size,
609 0 : self.state.server.wal_seg_size
610 0 : );
611 155897 : }
612 155897 :
613 155897 : // system_id will be updated on mismatch
614 155897 : // sync-safekeepers doesn't know sysid and sends 0, ignore it
615 155897 : if self.state.server.system_id != msg.system_id && msg.system_id != 0 {
616 0 : if self.state.server.system_id != 0 {
617 0 : warn!(
618 0 : "unexpected system ID arrived, got {}, expected {}",
619 0 : msg.system_id, self.state.server.system_id
620 0 : );
621 0 : }
622 :
623 0 : let mut state = self.state.start_change();
624 0 : state.server.system_id = msg.system_id;
625 0 : if msg.pg_version != UNKNOWN_SERVER_VERSION {
626 0 : state.server.pg_version = msg.pg_version;
627 0 : }
628 0 : self.state.finish_change(&state).await?;
629 155897 : }
630 :
631 155897 : info!(
632 302 : "processed greeting from walproposer {}, sending term {:?}",
633 4832 : msg.proposer_id.map(|b| format!("{:X}", b)).join(""),
634 302 : self.state.acceptor_state.term
635 302 : );
636 155897 : Ok(Some(AcceptorProposerMessage::Greeting(AcceptorGreeting {
637 155897 : term: self.state.acceptor_state.term,
638 155897 : node_id: self.node_id,
639 155897 : })))
640 155897 : }
641 :
642 : /// Give vote for the given term, if we haven't done that previously.
643 25228 : async fn handle_vote_request(
644 25228 : &mut self,
645 25228 : msg: &VoteRequest,
646 25228 : ) -> Result<Option<AcceptorProposerMessage>> {
647 25228 : // Once voted, we won't accept data from older proposers; flush
648 25228 : // everything we've already received so that new proposer starts
649 25228 : // streaming at end of our WAL, without overlap. Currently we truncate
650 25228 : // WAL at streaming point, so this avoids truncating already committed
651 25228 : // WAL.
652 25228 : //
653 25228 : // TODO: it would be smoother to not truncate committed piece at
654 25228 : // handle_elected instead. Currently not a big deal, as proposer is the
655 25228 : // only source of WAL; with peer2peer recovery it would be more
656 25228 : // important.
657 25228 : self.wal_store.flush_wal().await?;
658 : // initialize with refusal
659 25228 : let mut resp = VoteResponse {
660 25228 : term: self.state.acceptor_state.term,
661 25228 : vote_given: false as u64,
662 25228 : flush_lsn: self.flush_lsn(),
663 25228 : truncate_lsn: self.state.inmem.peer_horizon_lsn,
664 25228 : term_history: self.get_term_history(),
665 25228 : timeline_start_lsn: self.state.timeline_start_lsn,
666 25228 : };
667 25228 : if self.state.acceptor_state.term < msg.term {
668 23948 : let mut state = self.state.start_change();
669 23948 : state.acceptor_state.term = msg.term;
670 23948 : // persist vote before sending it out
671 23948 : self.state.finish_change(&state).await?;
672 :
673 23948 : resp.term = self.state.acceptor_state.term;
674 23948 : resp.vote_given = true as u64;
675 1280 : }
676 25228 : info!("processed VoteRequest for term {}: {:?}", msg.term, &resp);
677 25228 : Ok(Some(AcceptorProposerMessage::VoteResponse(resp)))
678 25228 : }
679 :
680 : /// Form AppendResponse from current state.
681 19810 : fn append_response(&self) -> AppendResponse {
682 19810 : let ar = AppendResponse {
683 19810 : term: self.state.acceptor_state.term,
684 19810 : flush_lsn: self.flush_lsn(),
685 19810 : commit_lsn: self.state.commit_lsn,
686 19810 : // will be filled by the upper code to avoid bothering safekeeper
687 19810 : hs_feedback: HotStandbyFeedback::empty(),
688 19810 : pageserver_feedback: None,
689 19810 : };
690 19810 : trace!("formed AppendResponse {:?}", ar);
691 19810 : ar
692 19810 : }
693 :
694 7194 : async fn handle_elected(
695 7194 : &mut self,
696 7194 : msg: &ProposerElected,
697 7194 : ) -> Result<Option<AcceptorProposerMessage>> {
698 7194 : info!("received ProposerElected {:?}", msg);
699 7194 : if self.state.acceptor_state.term < msg.term {
700 2 : let mut state = self.state.start_change();
701 2 : state.acceptor_state.term = msg.term;
702 2 : self.state.finish_change(&state).await?;
703 7192 : }
704 :
705 : // If our term is higher, ignore the message (next feedback will inform the compute)
706 7194 : if self.state.acceptor_state.term > msg.term {
707 0 : return Ok(None);
708 7194 : }
709 7194 :
710 7194 : // This might happen in a rare race when another (old) connection from
711 7194 : // the same walproposer writes + flushes WAL after this connection
712 7194 : // already sent flush_lsn in VoteRequest. It is generally safe to
713 7194 : // proceed, but to prevent commit_lsn surprisingly going down we should
714 7194 : // either refuse the session (simpler) or skip the part we already have
715 7194 : // from the stream (can be implemented).
716 7194 : if msg.term == self.get_epoch() && self.flush_lsn() > msg.start_streaming_at {
717 0 : bail!("refusing ProposerElected which is going to overwrite correct WAL: term={}, flush_lsn={}, start_streaming_at={}; restarting the handshake should help",
718 0 : msg.term, self.flush_lsn(), msg.start_streaming_at)
719 7194 : }
720 7194 : // Otherwise we must never attempt to truncate committed data.
721 7194 : assert!(
722 7194 : msg.start_streaming_at >= self.state.inmem.commit_lsn,
723 0 : "attempt to truncate committed data: start_streaming_at={}, commit_lsn={}",
724 : msg.start_streaming_at,
725 : self.state.inmem.commit_lsn
726 : );
727 :
728 : // TODO: cross check divergence point, check if msg.start_streaming_at corresponds to
729 : // intersection of our history and history from msg
730 :
731 : // truncate wal, update the LSNs
732 7194 : self.wal_store.truncate_wal(msg.start_streaming_at).await?;
733 :
734 : // and now adopt term history from proposer
735 : {
736 7194 : let mut state = self.state.start_change();
737 7194 :
738 7194 : // Here we learn initial LSN for the first time, set fields
739 7194 : // interested in that.
740 7194 :
741 7194 : if state.timeline_start_lsn == Lsn(0) {
742 : // Remember point where WAL begins globally.
743 1111 : state.timeline_start_lsn = msg.timeline_start_lsn;
744 1111 : info!(
745 12 : "setting timeline_start_lsn to {:?}",
746 12 : state.timeline_start_lsn
747 12 : );
748 6083 : }
749 7194 : if state.peer_horizon_lsn == Lsn(0) {
750 1111 : // Update peer_horizon_lsn as soon as we know where timeline starts.
751 1111 : // It means that peer_horizon_lsn cannot be zero after we know timeline_start_lsn.
752 1111 : state.peer_horizon_lsn = msg.timeline_start_lsn;
753 6083 : }
754 7194 : if state.local_start_lsn == Lsn(0) {
755 1111 : state.local_start_lsn = msg.start_streaming_at;
756 1111 : info!("setting local_start_lsn to {:?}", state.local_start_lsn);
757 6083 : }
758 : // Initializing commit_lsn before acking first flushed record is
759 : // important to let find_end_of_wal skip the hole in the beginning
760 : // of the first segment.
761 : //
762 : // NB: on new clusters, this happens at the same time as
763 : // timeline_start_lsn initialization, it is taken outside to provide
764 : // upgrade.
765 7194 : state.commit_lsn = max(state.commit_lsn, state.timeline_start_lsn);
766 7194 :
767 7194 : // Initializing backup_lsn is useful to avoid making backup think it should upload 0 segment.
768 7194 : state.backup_lsn = max(state.backup_lsn, state.timeline_start_lsn);
769 7194 :
770 7194 : state.acceptor_state.term_history = msg.term_history.clone();
771 7194 : self.state.finish_change(&state).await?;
772 : }
773 :
774 7194 : info!("start receiving WAL since {:?}", msg.start_streaming_at);
775 :
776 : // Cache LSN where term starts to immediately fsync control file with
777 : // commit_lsn once we reach it -- sync-safekeepers finishes when
778 : // persisted commit_lsn on majority of safekeepers aligns.
779 7194 : self.epoch_start_lsn = match msg.term_history.0.last() {
780 0 : None => bail!("proposer elected with empty term history"),
781 7194 : Some(term_lsn_start) => term_lsn_start.lsn,
782 7194 : };
783 7194 :
784 7194 : Ok(None)
785 7194 : }
786 :
787 : /// Advance commit_lsn taking into account what we have locally.
788 : ///
789 : /// Note: it is assumed that 'WAL we have is from the right term' check has
790 : /// already been done outside.
791 12028 : async fn update_commit_lsn(&mut self, mut candidate: Lsn) -> Result<()> {
792 12028 : // Both peers and walproposer communicate this value, we might already
793 12028 : // have a fresher (higher) version.
794 12028 : candidate = max(candidate, self.state.inmem.commit_lsn);
795 12028 : let commit_lsn = min(candidate, self.flush_lsn());
796 12028 : assert!(
797 12028 : commit_lsn >= self.state.inmem.commit_lsn,
798 0 : "commit_lsn monotonicity violated: old={} new={}",
799 : self.state.inmem.commit_lsn,
800 : commit_lsn
801 : );
802 :
803 12028 : self.state.inmem.commit_lsn = commit_lsn;
804 12028 :
805 12028 : // If new commit_lsn reached epoch switch, force sync of control
806 12028 : // file: walproposer in sync mode is very interested when this
807 12028 : // happens. Note: this is for sync-safekeepers mode only, as
808 12028 : // otherwise commit_lsn might jump over epoch_start_lsn.
809 12028 : if commit_lsn >= self.epoch_start_lsn && self.state.commit_lsn < self.epoch_start_lsn {
810 904 : self.state.flush().await?;
811 11124 : }
812 :
813 12028 : Ok(())
814 12028 : }
815 :
816 : /// Persist control file if there is something to save and enough time
817 : /// passed after the last save.
818 0 : pub async fn maybe_persist_inmem_control_file(&mut self) -> Result<()> {
819 0 : const CF_SAVE_INTERVAL: Duration = Duration::from_secs(300);
820 0 : if self.state.pers.last_persist_at().elapsed() < CF_SAVE_INTERVAL {
821 0 : return Ok(());
822 0 : }
823 0 : let need_persist = self.state.inmem.commit_lsn > self.state.commit_lsn
824 0 : || self.state.inmem.backup_lsn > self.state.backup_lsn
825 0 : || self.state.inmem.peer_horizon_lsn > self.state.peer_horizon_lsn
826 0 : || self.state.inmem.remote_consistent_lsn > self.state.remote_consistent_lsn;
827 0 : if need_persist {
828 0 : self.state.flush().await?;
829 0 : trace!("saved control file: {CF_SAVE_INTERVAL:?} passed");
830 0 : }
831 0 : Ok(())
832 0 : }
833 :
834 : /// Handle request to append WAL.
835 : #[allow(clippy::comparison_chain)]
836 23978 : async fn handle_append_request(
837 23978 : &mut self,
838 23978 : msg: &AppendRequest,
839 23978 : require_flush: bool,
840 23978 : ) -> Result<Option<AcceptorProposerMessage>> {
841 23978 : if self.state.acceptor_state.term < msg.h.term {
842 0 : bail!("got AppendRequest before ProposerElected");
843 23978 : }
844 23978 :
845 23978 : // If our term is higher, immediately refuse the message.
846 23978 : if self.state.acceptor_state.term > msg.h.term {
847 0 : let resp = AppendResponse::term_only(self.state.acceptor_state.term);
848 0 : return Ok(Some(AcceptorProposerMessage::AppendResponse(resp)));
849 23978 : }
850 23978 :
851 23978 : // Now we know that we are in the same term as the proposer,
852 23978 : // processing the message.
853 23978 :
854 23978 : self.state.inmem.proposer_uuid = msg.h.proposer_uuid;
855 23978 :
856 23978 : // do the job
857 23978 : if !msg.wal_data.is_empty() {
858 4388 : self.wal_store
859 4388 : .write_wal(msg.h.begin_lsn, &msg.wal_data)
860 0 : .await?;
861 19590 : }
862 :
863 : // flush wal to the disk, if required
864 23978 : if require_flush {
865 4 : self.wal_store.flush_wal().await?;
866 23974 : }
867 :
868 : // Update commit_lsn.
869 23978 : if msg.h.commit_lsn != Lsn(0) {
870 12028 : self.update_commit_lsn(msg.h.commit_lsn).await?;
871 11950 : }
872 : // Value calculated by walproposer can always lag:
873 : // - safekeepers can forget inmem value and send to proposer lower
874 : // persisted one on restart;
875 : // - if we make safekeepers always send persistent value,
876 : // any compute restart would pull it down.
877 : // Thus, take max before adopting.
878 23978 : self.state.inmem.peer_horizon_lsn =
879 23978 : max(self.state.inmem.peer_horizon_lsn, msg.h.truncate_lsn);
880 23978 :
881 23978 : // Update truncate and commit LSN in control file.
882 23978 : // To avoid negative impact on performance of extra fsync, do it only
883 23978 : // when commit_lsn delta exceeds WAL segment size.
884 23978 : if self.state.commit_lsn + (self.state.server.wal_seg_size as u64)
885 23978 : < self.state.inmem.commit_lsn
886 : {
887 0 : self.state.flush().await?;
888 23978 : }
889 :
890 23978 : trace!(
891 0 : "processed AppendRequest of len {}, end_lsn={:?}, commit_lsn={:?}, truncate_lsn={:?}, flushed={:?}",
892 0 : msg.wal_data.len(),
893 0 : msg.h.end_lsn,
894 0 : msg.h.commit_lsn,
895 0 : msg.h.truncate_lsn,
896 0 : require_flush,
897 0 : );
898 :
899 : // If flush_lsn hasn't updated, AppendResponse is not very useful.
900 23978 : if !require_flush {
901 23974 : return Ok(None);
902 4 : }
903 4 :
904 4 : let resp = self.append_response();
905 4 : Ok(Some(AcceptorProposerMessage::AppendResponse(resp)))
906 23978 : }
907 :
908 : /// Flush WAL to disk. Return AppendResponse with latest LSNs.
909 19806 : async fn handle_flush(&mut self) -> Result<Option<AcceptorProposerMessage>> {
910 19806 : self.wal_store.flush_wal().await?;
911 19806 : Ok(Some(AcceptorProposerMessage::AppendResponse(
912 19806 : self.append_response(),
913 19806 : )))
914 19806 : }
915 :
916 : /// Update timeline state with peer safekeeper data.
917 0 : pub async fn record_safekeeper_info(&mut self, sk_info: &SafekeeperTimelineInfo) -> Result<()> {
918 0 : let mut sync_control_file = false;
919 0 :
920 0 : if (Lsn(sk_info.commit_lsn) != Lsn::INVALID) && (sk_info.last_log_term != INVALID_TERM) {
921 : // Note: the check is too restrictive, generally we can update local
922 : // commit_lsn if our history matches (is part of) history of advanced
923 : // commit_lsn provider.
924 0 : if sk_info.last_log_term == self.get_epoch() {
925 0 : self.update_commit_lsn(Lsn(sk_info.commit_lsn)).await?;
926 0 : }
927 0 : }
928 :
929 0 : self.state.inmem.backup_lsn = max(Lsn(sk_info.backup_lsn), self.state.inmem.backup_lsn);
930 0 : sync_control_file |= self.state.backup_lsn + (self.state.server.wal_seg_size as u64)
931 0 : < self.state.inmem.backup_lsn;
932 0 :
933 0 : self.state.inmem.remote_consistent_lsn = max(
934 0 : Lsn(sk_info.remote_consistent_lsn),
935 0 : self.state.inmem.remote_consistent_lsn,
936 0 : );
937 0 : sync_control_file |= self.state.remote_consistent_lsn
938 0 : + (self.state.server.wal_seg_size as u64)
939 0 : < self.state.inmem.remote_consistent_lsn;
940 0 :
941 0 : self.state.inmem.peer_horizon_lsn = max(
942 0 : Lsn(sk_info.peer_horizon_lsn),
943 0 : self.state.inmem.peer_horizon_lsn,
944 0 : );
945 0 : sync_control_file |= self.state.peer_horizon_lsn + (self.state.server.wal_seg_size as u64)
946 0 : < self.state.inmem.peer_horizon_lsn;
947 0 :
948 0 : if sync_control_file {
949 0 : self.state.flush().await?;
950 0 : }
951 0 : Ok(())
952 0 : }
953 : }
954 :
955 : #[cfg(test)]
956 : mod tests {
957 : use futures::future::BoxFuture;
958 : use postgres_ffi::{XLogSegNo, WAL_SEGMENT_SIZE};
959 :
960 : use super::*;
961 : use crate::{
962 : state::{PersistedPeers, TimelinePersistentState},
963 : wal_storage::Storage,
964 : };
965 : use std::{ops::Deref, str::FromStr, time::Instant};
966 :
967 : // fake storage for tests
968 : struct InMemoryState {
969 : persisted_state: TimelinePersistentState,
970 : }
971 :
972 : #[async_trait::async_trait]
973 : impl control_file::Storage for InMemoryState {
974 6 : async fn persist(&mut self, s: &TimelinePersistentState) -> Result<()> {
975 6 : self.persisted_state = s.clone();
976 6 : Ok(())
977 12 : }
978 :
979 0 : fn last_persist_at(&self) -> Instant {
980 0 : Instant::now()
981 0 : }
982 : }
983 :
984 : impl Deref for InMemoryState {
985 : type Target = TimelinePersistentState;
986 :
987 120 : fn deref(&self) -> &Self::Target {
988 120 : &self.persisted_state
989 120 : }
990 : }
991 :
992 4 : fn test_sk_state() -> TimelinePersistentState {
993 4 : let mut state = TimelinePersistentState::empty();
994 4 : state.server.wal_seg_size = WAL_SEGMENT_SIZE as u32;
995 4 : state.tenant_id = TenantId::from([1u8; 16]);
996 4 : state.timeline_id = TimelineId::from([1u8; 16]);
997 4 : state
998 4 : }
999 :
1000 : struct DummyWalStore {
1001 : lsn: Lsn,
1002 : }
1003 :
1004 : #[async_trait::async_trait]
1005 : impl wal_storage::Storage for DummyWalStore {
1006 18 : fn flush_lsn(&self) -> Lsn {
1007 18 : self.lsn
1008 18 : }
1009 :
1010 4 : async fn write_wal(&mut self, startpos: Lsn, buf: &[u8]) -> Result<()> {
1011 4 : self.lsn = startpos + buf.len() as u64;
1012 4 : Ok(())
1013 8 : }
1014 :
1015 4 : async fn truncate_wal(&mut self, end_pos: Lsn) -> Result<()> {
1016 4 : self.lsn = end_pos;
1017 4 : Ok(())
1018 8 : }
1019 :
1020 8 : async fn flush_wal(&mut self) -> Result<()> {
1021 8 : Ok(())
1022 16 : }
1023 :
1024 0 : fn remove_up_to(&self, _segno_up_to: XLogSegNo) -> BoxFuture<'static, anyhow::Result<()>> {
1025 0 : Box::pin(async { Ok(()) })
1026 0 : }
1027 :
1028 0 : fn get_metrics(&self) -> crate::metrics::WalStorageMetrics {
1029 0 : crate::metrics::WalStorageMetrics::default()
1030 0 : }
1031 : }
1032 :
1033 : #[tokio::test]
1034 2 : async fn test_voting() {
1035 2 : let storage = InMemoryState {
1036 2 : persisted_state: test_sk_state(),
1037 2 : };
1038 2 : let wal_store = DummyWalStore { lsn: Lsn(0) };
1039 2 : let mut sk = SafeKeeper::new(storage, wal_store, NodeId(0)).unwrap();
1040 2 :
1041 2 : // check voting for 1 is ok
1042 2 : let vote_request = ProposerAcceptorMessage::VoteRequest(VoteRequest { term: 1 });
1043 2 : let mut vote_resp = sk.process_msg(&vote_request).await;
1044 2 : match vote_resp.unwrap() {
1045 2 : Some(AcceptorProposerMessage::VoteResponse(resp)) => assert!(resp.vote_given != 0),
1046 2 : r => panic!("unexpected response: {:?}", r),
1047 2 : }
1048 2 :
1049 2 : // reboot...
1050 2 : let state = sk.state.deref().clone();
1051 2 : let storage = InMemoryState {
1052 2 : persisted_state: state,
1053 2 : };
1054 2 :
1055 2 : sk = SafeKeeper::new(storage, sk.wal_store, NodeId(0)).unwrap();
1056 2 :
1057 2 : // and ensure voting second time for 1 is not ok
1058 2 : vote_resp = sk.process_msg(&vote_request).await;
1059 2 : match vote_resp.unwrap() {
1060 2 : Some(AcceptorProposerMessage::VoteResponse(resp)) => assert!(resp.vote_given == 0),
1061 2 : r => panic!("unexpected response: {:?}", r),
1062 2 : }
1063 2 : }
1064 :
1065 : #[tokio::test]
1066 2 : async fn test_epoch_switch() {
1067 2 : let storage = InMemoryState {
1068 2 : persisted_state: test_sk_state(),
1069 2 : };
1070 2 : let wal_store = DummyWalStore { lsn: Lsn(0) };
1071 2 :
1072 2 : let mut sk = SafeKeeper::new(storage, wal_store, NodeId(0)).unwrap();
1073 2 :
1074 2 : let mut ar_hdr = AppendRequestHeader {
1075 2 : term: 1,
1076 2 : epoch_start_lsn: Lsn(3),
1077 2 : begin_lsn: Lsn(1),
1078 2 : end_lsn: Lsn(2),
1079 2 : commit_lsn: Lsn(0),
1080 2 : truncate_lsn: Lsn(0),
1081 2 : proposer_uuid: [0; 16],
1082 2 : };
1083 2 : let mut append_request = AppendRequest {
1084 2 : h: ar_hdr.clone(),
1085 2 : wal_data: Bytes::from_static(b"b"),
1086 2 : };
1087 2 :
1088 2 : let pem = ProposerElected {
1089 2 : term: 1,
1090 2 : start_streaming_at: Lsn(1),
1091 2 : term_history: TermHistory(vec![TermLsn {
1092 2 : term: 1,
1093 2 : lsn: Lsn(3),
1094 2 : }]),
1095 2 : timeline_start_lsn: Lsn(0),
1096 2 : };
1097 2 : sk.process_msg(&ProposerAcceptorMessage::Elected(pem))
1098 2 : .await
1099 2 : .unwrap();
1100 2 :
1101 2 : // check that AppendRequest before epochStartLsn doesn't switch epoch
1102 2 : let resp = sk
1103 2 : .process_msg(&ProposerAcceptorMessage::AppendRequest(append_request))
1104 2 : .await;
1105 2 : assert!(resp.is_ok());
1106 2 : assert_eq!(sk.get_epoch(), 0);
1107 2 :
1108 2 : // but record at epochStartLsn does the switch
1109 2 : ar_hdr.begin_lsn = Lsn(2);
1110 2 : ar_hdr.end_lsn = Lsn(3);
1111 2 : append_request = AppendRequest {
1112 2 : h: ar_hdr,
1113 2 : wal_data: Bytes::from_static(b"b"),
1114 2 : };
1115 2 : let resp = sk
1116 2 : .process_msg(&ProposerAcceptorMessage::AppendRequest(append_request))
1117 2 : .await;
1118 2 : assert!(resp.is_ok());
1119 2 : sk.wal_store.truncate_wal(Lsn(3)).await.unwrap(); // imitate the complete record at 3 %)
1120 2 : assert_eq!(sk.get_epoch(), 1);
1121 2 : }
1122 :
1123 : #[test]
1124 2 : fn test_find_highest_common_point_none() {
1125 2 : let prop_th = TermHistory(vec![(0, Lsn(1)).into()]);
1126 2 : let sk_th = TermHistory(vec![(1, Lsn(1)).into(), (2, Lsn(2)).into()]);
1127 2 : assert_eq!(
1128 2 : TermHistory::find_highest_common_point(&prop_th, &sk_th, Lsn(3),),
1129 2 : None
1130 2 : );
1131 2 : }
1132 :
1133 : #[test]
1134 2 : fn test_find_highest_common_point_middle() {
1135 2 : let prop_th = TermHistory(vec![
1136 2 : (1, Lsn(10)).into(),
1137 2 : (2, Lsn(20)).into(),
1138 2 : (4, Lsn(40)).into(),
1139 2 : ]);
1140 2 : let sk_th = TermHistory(vec![
1141 2 : (1, Lsn(10)).into(),
1142 2 : (2, Lsn(20)).into(),
1143 2 : (3, Lsn(30)).into(), // sk ends last common term 2 at 30
1144 2 : ]);
1145 2 : assert_eq!(
1146 2 : TermHistory::find_highest_common_point(&prop_th, &sk_th, Lsn(40),),
1147 2 : Some(TermLsn {
1148 2 : term: 2,
1149 2 : lsn: Lsn(30),
1150 2 : })
1151 2 : );
1152 2 : }
1153 :
1154 : #[test]
1155 2 : fn test_find_highest_common_point_sk_end() {
1156 2 : let prop_th = TermHistory(vec![
1157 2 : (1, Lsn(10)).into(),
1158 2 : (2, Lsn(20)).into(), // last common term 2, sk will end it at 32 sk_end_lsn
1159 2 : (4, Lsn(40)).into(),
1160 2 : ]);
1161 2 : let sk_th = TermHistory(vec![(1, Lsn(10)).into(), (2, Lsn(20)).into()]);
1162 2 : assert_eq!(
1163 2 : TermHistory::find_highest_common_point(&prop_th, &sk_th, Lsn(32),),
1164 2 : Some(TermLsn {
1165 2 : term: 2,
1166 2 : lsn: Lsn(32),
1167 2 : })
1168 2 : );
1169 2 : }
1170 :
1171 : #[test]
1172 2 : fn test_find_highest_common_point_walprop() {
1173 2 : let prop_th = TermHistory(vec![(1, Lsn(10)).into(), (2, Lsn(20)).into()]);
1174 2 : let sk_th = TermHistory(vec![(1, Lsn(10)).into(), (2, Lsn(20)).into()]);
1175 2 : assert_eq!(
1176 2 : TermHistory::find_highest_common_point(&prop_th, &sk_th, Lsn(32),),
1177 2 : Some(TermLsn {
1178 2 : term: 2,
1179 2 : lsn: Lsn(32),
1180 2 : })
1181 2 : );
1182 2 : }
1183 :
1184 : #[test]
1185 2 : fn test_sk_state_bincode_serde_roundtrip() {
1186 2 : use utils::Hex;
1187 2 : let tenant_id = TenantId::from_str("cf0480929707ee75372337efaa5ecf96").unwrap();
1188 2 : let timeline_id = TimelineId::from_str("112ded66422aa5e953e5440fa5427ac4").unwrap();
1189 2 : let state = TimelinePersistentState {
1190 2 : tenant_id,
1191 2 : timeline_id,
1192 2 : acceptor_state: AcceptorState {
1193 2 : term: 42,
1194 2 : term_history: TermHistory(vec![TermLsn {
1195 2 : lsn: Lsn(0x1),
1196 2 : term: 41,
1197 2 : }]),
1198 2 : },
1199 2 : server: ServerInfo {
1200 2 : pg_version: 14,
1201 2 : system_id: 0x1234567887654321,
1202 2 : wal_seg_size: 0x12345678,
1203 2 : },
1204 2 : proposer_uuid: {
1205 2 : let mut arr = timeline_id.as_arr();
1206 2 : arr.reverse();
1207 2 : arr
1208 2 : },
1209 2 : timeline_start_lsn: Lsn(0x12345600),
1210 2 : local_start_lsn: Lsn(0x12),
1211 2 : commit_lsn: Lsn(1234567800),
1212 2 : backup_lsn: Lsn(1234567300),
1213 2 : peer_horizon_lsn: Lsn(9999999),
1214 2 : remote_consistent_lsn: Lsn(1234560000),
1215 2 : peers: PersistedPeers(vec![(
1216 2 : NodeId(1),
1217 2 : PersistedPeerInfo {
1218 2 : backup_lsn: Lsn(1234567000),
1219 2 : term: 42,
1220 2 : flush_lsn: Lsn(1234567800 - 8),
1221 2 : commit_lsn: Lsn(1234567600),
1222 2 : },
1223 2 : )]),
1224 2 : partial_backup: crate::wal_backup_partial::State::default(),
1225 2 : };
1226 2 :
1227 2 : let ser = state.ser().unwrap();
1228 2 :
1229 2 : #[rustfmt::skip]
1230 2 : let expected = [
1231 2 : // tenant_id as length prefixed hex
1232 2 : 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1233 2 : 0x63, 0x66, 0x30, 0x34, 0x38, 0x30, 0x39, 0x32, 0x39, 0x37, 0x30, 0x37, 0x65, 0x65, 0x37, 0x35, 0x33, 0x37, 0x32, 0x33, 0x33, 0x37, 0x65, 0x66, 0x61, 0x61, 0x35, 0x65, 0x63, 0x66, 0x39, 0x36,
1234 2 : // timeline_id as length prefixed hex
1235 2 : 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1236 2 : 0x31, 0x31, 0x32, 0x64, 0x65, 0x64, 0x36, 0x36, 0x34, 0x32, 0x32, 0x61, 0x61, 0x35, 0x65, 0x39, 0x35, 0x33, 0x65, 0x35, 0x34, 0x34, 0x30, 0x66, 0x61, 0x35, 0x34, 0x32, 0x37, 0x61, 0x63, 0x34,
1237 2 : // term
1238 2 : 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1239 2 : // length prefix
1240 2 : 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1241 2 : // unsure why this order is swapped
1242 2 : 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1243 2 : 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1244 2 : // pg_version
1245 2 : 0x0e, 0x00, 0x00, 0x00,
1246 2 : // systemid
1247 2 : 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12,
1248 2 : // wal_seg_size
1249 2 : 0x78, 0x56, 0x34, 0x12,
1250 2 : // pguuid as length prefixed hex
1251 2 : 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1252 2 : 0x63, 0x34, 0x37, 0x61, 0x34, 0x32, 0x61, 0x35, 0x30, 0x66, 0x34, 0x34, 0x65, 0x35, 0x35, 0x33, 0x65, 0x39, 0x61, 0x35, 0x32, 0x61, 0x34, 0x32, 0x36, 0x36, 0x65, 0x64, 0x32, 0x64, 0x31, 0x31,
1253 2 :
1254 2 : // timeline_start_lsn
1255 2 : 0x00, 0x56, 0x34, 0x12, 0x00, 0x00, 0x00, 0x00,
1256 2 : 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1257 2 : 0x78, 0x02, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
1258 2 : 0x84, 0x00, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
1259 2 : 0x7f, 0x96, 0x98, 0x00, 0x00, 0x00, 0x00, 0x00,
1260 2 : 0x00, 0xe4, 0x95, 0x49, 0x00, 0x00, 0x00, 0x00,
1261 2 : // length prefix for persistentpeers
1262 2 : 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1263 2 : // nodeid
1264 2 : 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1265 2 : // backuplsn
1266 2 : 0x58, 0xff, 0x95, 0x49, 0x00, 0x00, 0x00, 0x00,
1267 2 : 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1268 2 : 0x70, 0x02, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
1269 2 : 0xb0, 0x01, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
1270 2 : // partial_backup
1271 2 : 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1272 2 : ];
1273 2 :
1274 2 : assert_eq!(Hex(&ser), Hex(&expected));
1275 :
1276 2 : let deser = TimelinePersistentState::des(&ser).unwrap();
1277 2 :
1278 2 : assert_eq!(deser, state);
1279 2 : }
1280 : }
|