Line data Source code
1 : //! Acceptor part of proposer-acceptor consensus algorithm.
2 :
3 : use anyhow::{bail, Context, Result};
4 : use byteorder::{LittleEndian, ReadBytesExt};
5 : use bytes::{Buf, BufMut, Bytes, BytesMut};
6 :
7 : use postgres_ffi::{TimeLineID, MAX_SEND_SIZE};
8 : use serde::{Deserialize, Serialize};
9 : use std::cmp::max;
10 : use std::cmp::min;
11 : use std::fmt;
12 : use std::io::Read;
13 : use std::time::Duration;
14 : use storage_broker::proto::SafekeeperTimelineInfo;
15 :
16 : use tracing::*;
17 :
18 : use crate::control_file;
19 : use crate::send_wal::HotStandbyFeedback;
20 :
21 : use crate::state::TimelineState;
22 : use crate::wal_storage;
23 : use pq_proto::SystemId;
24 : use utils::pageserver_feedback::PageserverFeedback;
25 : use utils::{
26 : bin_ser::LeSer,
27 : id::{NodeId, TenantId, TimelineId},
28 : lsn::Lsn,
29 : };
30 :
31 : const SK_PROTOCOL_VERSION: u32 = 2;
32 : pub const UNKNOWN_SERVER_VERSION: u32 = 0;
33 :
34 : /// Consensus logical timestamp.
35 : pub type Term = u64;
36 : pub const INVALID_TERM: Term = 0;
37 :
38 16 : #[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
39 : pub struct TermLsn {
40 : pub term: Term,
41 : pub lsn: Lsn,
42 : }
43 :
44 : // Creation from tuple provides less typing (e.g. for unit tests).
45 : impl From<(Term, Lsn)> for TermLsn {
46 36 : fn from(pair: (Term, Lsn)) -> TermLsn {
47 36 : TermLsn {
48 36 : term: pair.0,
49 36 : lsn: pair.1,
50 36 : }
51 36 : }
52 : }
53 :
54 217330 : #[derive(Clone, Serialize, Deserialize, PartialEq)]
55 : pub struct TermHistory(pub Vec<TermLsn>);
56 :
57 : impl TermHistory {
58 11455 : pub fn empty() -> TermHistory {
59 11455 : TermHistory(Vec::new())
60 11455 : }
61 :
62 : // Parse TermHistory as n_entries followed by TermLsn pairs
63 6419 : pub fn from_bytes(bytes: &mut Bytes) -> Result<TermHistory> {
64 6419 : if bytes.remaining() < 4 {
65 0 : bail!("TermHistory misses len");
66 6419 : }
67 6419 : let n_entries = bytes.get_u32_le();
68 6419 : let mut res = Vec::with_capacity(n_entries as usize);
69 6419 : for _ in 0..n_entries {
70 58888 : if bytes.remaining() < 16 {
71 0 : bail!("TermHistory is incomplete");
72 58888 : }
73 58888 : res.push(TermLsn {
74 58888 : term: bytes.get_u64_le(),
75 58888 : lsn: bytes.get_u64_le().into(),
76 58888 : })
77 : }
78 6419 : Ok(TermHistory(res))
79 6419 : }
80 :
81 : /// Return copy of self with switches happening strictly after up_to
82 : /// truncated.
83 28551 : pub fn up_to(&self, up_to: Lsn) -> TermHistory {
84 28551 : let mut res = Vec::with_capacity(self.0.len());
85 173970 : for e in &self.0 {
86 145451 : if e.lsn > up_to {
87 32 : break;
88 145419 : }
89 145419 : res.push(*e);
90 : }
91 28551 : TermHistory(res)
92 28551 : }
93 :
94 : /// Find point of divergence between leader (walproposer) term history and
95 : /// safekeeper. Arguments are not symmetrics as proposer history ends at
96 : /// +infinity while safekeeper at flush_lsn.
97 : /// C version is at walproposer SendProposerElected.
98 8 : pub fn find_highest_common_point(
99 8 : prop_th: &TermHistory,
100 8 : sk_th: &TermHistory,
101 8 : sk_wal_end: Lsn,
102 8 : ) -> Option<TermLsn> {
103 8 : let (prop_th, sk_th) = (&prop_th.0, &sk_th.0); // avoid .0 below
104 :
105 8 : if let Some(sk_th_last) = sk_th.last() {
106 8 : assert!(
107 8 : sk_th_last.lsn <= sk_wal_end,
108 0 : "safekeeper term history end {:?} LSN is higher than WAL end {:?}",
109 : sk_th_last,
110 : sk_wal_end
111 : );
112 0 : }
113 :
114 : // find last common term, if any...
115 8 : let mut last_common_idx = None;
116 16 : for i in 0..min(sk_th.len(), prop_th.len()) {
117 16 : if prop_th[i].term != sk_th[i].term {
118 4 : break;
119 12 : }
120 12 : // If term is the same, LSN must be equal as well.
121 12 : assert!(
122 12 : prop_th[i].lsn == sk_th[i].lsn,
123 0 : "same term {} has different start LSNs: prop {}, sk {}",
124 0 : prop_th[i].term,
125 0 : prop_th[i].lsn,
126 0 : sk_th[i].lsn
127 : );
128 12 : last_common_idx = Some(i);
129 : }
130 8 : let last_common_idx = match last_common_idx {
131 2 : None => return None, // no common point
132 6 : Some(lci) => lci,
133 6 : };
134 6 : // Now find where it ends at both prop and sk and take min. End of
135 6 : // (common) term is the start of the next except it is the last one;
136 6 : // there it is flush_lsn in case of safekeeper or, in case of proposer
137 6 : // +infinity, so we just take flush_lsn then.
138 6 : if last_common_idx == prop_th.len() - 1 {
139 2 : Some(TermLsn {
140 2 : term: prop_th[last_common_idx].term,
141 2 : lsn: sk_wal_end,
142 2 : })
143 : } else {
144 4 : let prop_common_term_end = prop_th[last_common_idx + 1].lsn;
145 4 : let sk_common_term_end = if last_common_idx + 1 < sk_th.len() {
146 2 : sk_th[last_common_idx + 1].lsn
147 : } else {
148 2 : sk_wal_end
149 : };
150 4 : Some(TermLsn {
151 4 : term: prop_th[last_common_idx].term,
152 4 : lsn: min(prop_common_term_end, sk_common_term_end),
153 4 : })
154 : }
155 8 : }
156 : }
157 :
158 : /// Display only latest entries for Debug.
159 : impl fmt::Debug for TermHistory {
160 400 : fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
161 400 : let n_printed = 20;
162 400 : write!(
163 400 : fmt,
164 400 : "{}{:?}",
165 400 : if self.0.len() > n_printed { "... " } else { "" },
166 400 : self.0
167 400 : .iter()
168 400 : .rev()
169 400 : .take(n_printed)
170 2216 : .map(|&e| (e.term, e.lsn)) // omit TermSwitchEntry
171 400 : .collect::<Vec<_>>()
172 400 : )
173 400 : }
174 : }
175 :
176 : /// Unique id of proposer. Not needed for correctness, used for monitoring.
177 : pub type PgUuid = [u8; 16];
178 :
179 : /// Persistent consensus state of the acceptor.
180 210909 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
181 : pub struct AcceptorState {
182 : /// acceptor's last term it voted for (advanced in 1 phase)
183 : pub term: Term,
184 : /// History of term switches for safekeeper's WAL.
185 : /// Actually it often goes *beyond* WAL contents as we adopt term history
186 : /// from the proposer before recovery.
187 : pub term_history: TermHistory,
188 : }
189 :
190 : impl AcceptorState {
191 : /// acceptor's epoch is the term of the highest entry in the log
192 6425 : pub fn get_epoch(&self, flush_lsn: Lsn) -> Term {
193 6425 : let th = self.term_history.up_to(flush_lsn);
194 6425 : match th.0.last() {
195 5439 : Some(e) => e.term,
196 986 : None => 0,
197 : }
198 6425 : }
199 : }
200 :
201 : /// Information about Postgres. Safekeeper gets it once and then verifies
202 : /// all further connections from computes match.
203 210909 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
204 : pub struct ServerInfo {
205 : /// Postgres server version
206 : pub pg_version: u32,
207 : pub system_id: SystemId,
208 : pub wal_seg_size: u32,
209 : }
210 :
211 8 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
212 : pub struct PersistedPeerInfo {
213 : /// LSN up to which safekeeper offloaded WAL to s3.
214 : pub backup_lsn: Lsn,
215 : /// Term of the last entry.
216 : pub term: Term,
217 : /// LSN of the last record.
218 : pub flush_lsn: Lsn,
219 : /// Up to which LSN safekeeper regards its WAL as committed.
220 : pub commit_lsn: Lsn,
221 : }
222 :
223 : impl PersistedPeerInfo {
224 0 : pub fn new() -> Self {
225 0 : Self {
226 0 : backup_lsn: Lsn::INVALID,
227 0 : term: INVALID_TERM,
228 0 : flush_lsn: Lsn(0),
229 0 : commit_lsn: Lsn(0),
230 0 : }
231 0 : }
232 : }
233 :
234 : // make clippy happy
235 : impl Default for PersistedPeerInfo {
236 0 : fn default() -> Self {
237 0 : Self::new()
238 0 : }
239 : }
240 :
241 : // protocol messages
242 :
243 : /// Initial Proposer -> Acceptor message
244 153540 : #[derive(Debug, Deserialize)]
245 : pub struct ProposerGreeting {
246 : /// proposer-acceptor protocol version
247 : pub protocol_version: u32,
248 : /// Postgres server version
249 : pub pg_version: u32,
250 : pub proposer_id: PgUuid,
251 : pub system_id: SystemId,
252 : pub timeline_id: TimelineId,
253 : pub tenant_id: TenantId,
254 : pub tli: TimeLineID,
255 : pub wal_seg_size: u32,
256 : }
257 :
258 : /// Acceptor -> Proposer initial response: the highest term known to me
259 : /// (acceptor voted for).
260 0 : #[derive(Debug, Serialize)]
261 : pub struct AcceptorGreeting {
262 : term: u64,
263 : node_id: NodeId,
264 : }
265 :
266 : /// Vote request sent from proposer to safekeepers
267 22122 : #[derive(Debug, Deserialize)]
268 : pub struct VoteRequest {
269 : pub term: Term,
270 : }
271 :
272 : /// Vote itself, sent from safekeeper to proposer
273 236 : #[derive(Debug, Serialize)]
274 : pub struct VoteResponse {
275 : pub term: Term, // safekeeper's current term; if it is higher than proposer's, the compute is out of date.
276 : vote_given: u64, // fixme u64 due to padding
277 : // Safekeeper flush_lsn (end of WAL) + history of term switches allow
278 : // proposer to choose the most advanced one.
279 : pub flush_lsn: Lsn,
280 : truncate_lsn: Lsn,
281 : pub term_history: TermHistory,
282 : timeline_start_lsn: Lsn,
283 : }
284 :
285 : /*
286 : * Proposer -> Acceptor message announcing proposer is elected and communicating
287 : * term history to it.
288 : */
289 164 : #[derive(Debug)]
290 : pub struct ProposerElected {
291 : pub term: Term,
292 : pub start_streaming_at: Lsn,
293 : pub term_history: TermHistory,
294 : pub timeline_start_lsn: Lsn,
295 : }
296 :
297 : /// Request with WAL message sent from proposer to safekeeper. Along the way it
298 : /// communicates commit_lsn.
299 272 : #[derive(Debug)]
300 : pub struct AppendRequest {
301 : pub h: AppendRequestHeader,
302 : pub wal_data: Bytes,
303 : }
304 21452 : #[derive(Debug, Clone, Deserialize)]
305 : pub struct AppendRequestHeader {
306 : // safekeeper's current term; if it is higher than proposer's, the compute is out of date.
307 : pub term: Term,
308 : // TODO: remove this field, it in unused -- LSN of term switch can be taken
309 : // from ProposerElected (as well as from term history).
310 : pub epoch_start_lsn: Lsn,
311 : /// start position of message in WAL
312 : pub begin_lsn: Lsn,
313 : /// end position of message in WAL
314 : pub end_lsn: Lsn,
315 : /// LSN committed by quorum of safekeepers
316 : pub commit_lsn: Lsn,
317 : /// minimal LSN which may be needed by proposer to perform recovery of some safekeeper
318 : pub truncate_lsn: Lsn,
319 : // only for logging/debugging
320 : pub proposer_uuid: PgUuid,
321 : }
322 :
323 : /// Report safekeeper state to proposer
324 0 : #[derive(Debug, Serialize)]
325 : pub struct AppendResponse {
326 : // Current term of the safekeeper; if it is higher than proposer's, the
327 : // compute is out of date.
328 : pub term: Term,
329 : // NOTE: this is physical end of wal on safekeeper; currently it doesn't
330 : // make much sense without taking epoch into account, as history can be
331 : // diverged.
332 : pub flush_lsn: Lsn,
333 : // We report back our awareness about which WAL is committed, as this is
334 : // a criterion for walproposer --sync mode exit
335 : pub commit_lsn: Lsn,
336 : pub hs_feedback: HotStandbyFeedback,
337 : pub pageserver_feedback: PageserverFeedback,
338 : }
339 :
340 : impl AppendResponse {
341 1 : fn term_only(term: Term) -> AppendResponse {
342 1 : AppendResponse {
343 1 : term,
344 1 : flush_lsn: Lsn(0),
345 1 : commit_lsn: Lsn(0),
346 1 : hs_feedback: HotStandbyFeedback::empty(),
347 1 : pageserver_feedback: PageserverFeedback::empty(),
348 1 : }
349 1 : }
350 : }
351 :
352 : /// Proposer -> Acceptor messages
353 892 : #[derive(Debug)]
354 : pub enum ProposerAcceptorMessage {
355 : Greeting(ProposerGreeting),
356 : VoteRequest(VoteRequest),
357 : Elected(ProposerElected),
358 : AppendRequest(AppendRequest),
359 : NoFlushAppendRequest(AppendRequest),
360 : FlushWAL,
361 : }
362 :
363 : impl ProposerAcceptorMessage {
364 : /// Parse proposer message.
365 203533 : pub fn parse(msg_bytes: Bytes) -> Result<ProposerAcceptorMessage> {
366 203533 : // xxx using Reader is inefficient but easy to work with bincode
367 203533 : let mut stream = msg_bytes.reader();
368 : // u64 is here to avoid padding; it will be removed once we stop packing C structs into the wire as is
369 203533 : let tag = stream.read_u64::<LittleEndian>()? as u8 as char;
370 203533 : match tag {
371 : 'g' => {
372 153540 : let msg = ProposerGreeting::des_from(&mut stream)?;
373 153540 : Ok(ProposerAcceptorMessage::Greeting(msg))
374 : }
375 : 'v' => {
376 22122 : let msg = VoteRequest::des_from(&mut stream)?;
377 22122 : Ok(ProposerAcceptorMessage::VoteRequest(msg))
378 : }
379 : 'e' => {
380 6419 : let mut msg_bytes = stream.into_inner();
381 6419 : if msg_bytes.remaining() < 16 {
382 0 : bail!("ProposerElected message is not complete");
383 6419 : }
384 6419 : let term = msg_bytes.get_u64_le();
385 6419 : let start_streaming_at = msg_bytes.get_u64_le().into();
386 6419 : let term_history = TermHistory::from_bytes(&mut msg_bytes)?;
387 6419 : if msg_bytes.remaining() < 8 {
388 0 : bail!("ProposerElected message is not complete");
389 6419 : }
390 6419 : let timeline_start_lsn = msg_bytes.get_u64_le().into();
391 6419 : let msg = ProposerElected {
392 6419 : term,
393 6419 : start_streaming_at,
394 6419 : timeline_start_lsn,
395 6419 : term_history,
396 6419 : };
397 6419 : Ok(ProposerAcceptorMessage::Elected(msg))
398 : }
399 : 'a' => {
400 : // read header followed by wal data
401 21452 : let hdr = AppendRequestHeader::des_from(&mut stream)?;
402 21452 : let rec_size = hdr
403 21452 : .end_lsn
404 21452 : .checked_sub(hdr.begin_lsn)
405 21452 : .context("begin_lsn > end_lsn in AppendRequest")?
406 : .0 as usize;
407 21452 : if rec_size > MAX_SEND_SIZE {
408 0 : bail!(
409 0 : "AppendRequest is longer than MAX_SEND_SIZE ({})",
410 0 : MAX_SEND_SIZE
411 0 : );
412 21452 : }
413 21452 :
414 21452 : let mut wal_data_vec: Vec<u8> = vec![0; rec_size];
415 21452 : stream.read_exact(&mut wal_data_vec)?;
416 21452 : let wal_data = Bytes::from(wal_data_vec);
417 21452 : let msg = AppendRequest { h: hdr, wal_data };
418 21452 :
419 21452 : Ok(ProposerAcceptorMessage::AppendRequest(msg))
420 : }
421 0 : _ => bail!("unknown proposer-acceptor message tag: {}", tag),
422 : }
423 203533 : }
424 : }
425 :
426 : /// Acceptor -> Proposer messages
427 0 : #[derive(Debug)]
428 : pub enum AcceptorProposerMessage {
429 : Greeting(AcceptorGreeting),
430 : VoteResponse(VoteResponse),
431 : AppendResponse(AppendResponse),
432 : }
433 :
434 : impl AcceptorProposerMessage {
435 : /// Serialize acceptor -> proposer message.
436 193490 : pub fn serialize(&self, buf: &mut BytesMut) -> Result<()> {
437 193490 : match self {
438 153540 : AcceptorProposerMessage::Greeting(msg) => {
439 153540 : buf.put_u64_le('g' as u64);
440 153540 : buf.put_u64_le(msg.term);
441 153540 : buf.put_u64_le(msg.node_id.0);
442 153540 : }
443 22122 : AcceptorProposerMessage::VoteResponse(msg) => {
444 22122 : buf.put_u64_le('v' as u64);
445 22122 : buf.put_u64_le(msg.term);
446 22122 : buf.put_u64_le(msg.vote_given);
447 22122 : buf.put_u64_le(msg.flush_lsn.into());
448 22122 : buf.put_u64_le(msg.truncate_lsn.into());
449 22122 : buf.put_u32_le(msg.term_history.0.len() as u32);
450 115715 : for e in &msg.term_history.0 {
451 93593 : buf.put_u64_le(e.term);
452 93593 : buf.put_u64_le(e.lsn.into());
453 93593 : }
454 22122 : buf.put_u64_le(msg.timeline_start_lsn.into());
455 : }
456 17828 : AcceptorProposerMessage::AppendResponse(msg) => {
457 17828 : buf.put_u64_le('a' as u64);
458 17828 : buf.put_u64_le(msg.term);
459 17828 : buf.put_u64_le(msg.flush_lsn.into());
460 17828 : buf.put_u64_le(msg.commit_lsn.into());
461 17828 : buf.put_i64_le(msg.hs_feedback.ts);
462 17828 : buf.put_u64_le(msg.hs_feedback.xmin);
463 17828 : buf.put_u64_le(msg.hs_feedback.catalog_xmin);
464 17828 :
465 17828 : msg.pageserver_feedback.serialize(buf);
466 17828 : }
467 : }
468 :
469 193490 : Ok(())
470 193490 : }
471 : }
472 :
473 : /// Safekeeper implements consensus to reliably persist WAL across nodes.
474 : /// It controls all WAL disk writes and updates of control file.
475 : ///
476 : /// Currently safekeeper processes:
477 : /// - messages from compute (proposers) and provides replies
478 : /// - messages from broker peers
479 : pub struct SafeKeeper<CTRL: control_file::Storage, WAL: wal_storage::Storage> {
480 : /// LSN since the proposer safekeeper currently talking to appends WAL;
481 : /// determines epoch switch point.
482 : pub epoch_start_lsn: Lsn,
483 :
484 : pub state: TimelineState<CTRL>, // persistent state storage
485 : pub wal_store: WAL,
486 :
487 : node_id: NodeId, // safekeeper's node id
488 : }
489 :
490 : impl<CTRL, WAL> SafeKeeper<CTRL, WAL>
491 : where
492 : CTRL: control_file::Storage,
493 : WAL: wal_storage::Storage,
494 : {
495 : /// Accepts a control file storage containing the safekeeper state.
496 : /// State must be initialized, i.e. contain filled `tenant_id`, `timeline_id`
497 : /// and `server` (`wal_seg_size` inside it) fields.
498 68765 : pub fn new(state: CTRL, wal_store: WAL, node_id: NodeId) -> Result<SafeKeeper<CTRL, WAL>> {
499 68765 : if state.tenant_id == TenantId::from([0u8; 16])
500 68765 : || state.timeline_id == TimelineId::from([0u8; 16])
501 : {
502 0 : bail!(
503 0 : "Calling SafeKeeper::new with empty tenant_id ({}) or timeline_id ({})",
504 0 : state.tenant_id,
505 0 : state.timeline_id
506 0 : );
507 68765 : }
508 68765 :
509 68765 : Ok(SafeKeeper {
510 68765 : epoch_start_lsn: Lsn(0),
511 68765 : state: TimelineState::new(state),
512 68765 : wal_store,
513 68765 : node_id,
514 68765 : })
515 68765 : }
516 :
517 : /// Get history of term switches for the available WAL
518 22126 : fn get_term_history(&self) -> TermHistory {
519 22126 : self.state
520 22126 : .acceptor_state
521 22126 : .term_history
522 22126 : .up_to(self.flush_lsn())
523 22126 : }
524 :
525 : /// Get current term.
526 0 : pub fn get_term(&self) -> Term {
527 0 : self.state.acceptor_state.term
528 0 : }
529 :
530 6425 : pub fn get_epoch(&self) -> Term {
531 6425 : self.state.acceptor_state.get_epoch(self.flush_lsn())
532 6425 : }
533 :
534 : /// wal_store wrapper avoiding commit_lsn <= flush_lsn violation when we don't have WAL yet.
535 79671 : pub fn flush_lsn(&self) -> Lsn {
536 79671 : max(self.wal_store.flush_lsn(), self.state.timeline_start_lsn)
537 79671 : }
538 :
539 : /// Process message from proposer and possibly form reply. Concurrent
540 : /// callers must exclude each other.
541 221370 : pub async fn process_msg(
542 221370 : &mut self,
543 221370 : msg: &ProposerAcceptorMessage,
544 221370 : ) -> Result<Option<AcceptorProposerMessage>> {
545 221370 : match msg {
546 153540 : ProposerAcceptorMessage::Greeting(msg) => self.handle_greeting(msg).await,
547 22126 : ProposerAcceptorMessage::VoteRequest(msg) => self.handle_vote_request(msg).await,
548 6421 : ProposerAcceptorMessage::Elected(msg) => self.handle_elected(msg).await,
549 4 : ProposerAcceptorMessage::AppendRequest(msg) => {
550 4 : self.handle_append_request(msg, true).await
551 : }
552 21452 : ProposerAcceptorMessage::NoFlushAppendRequest(msg) => {
553 21452 : self.handle_append_request(msg, false).await
554 : }
555 17827 : ProposerAcceptorMessage::FlushWAL => self.handle_flush().await,
556 : }
557 221370 : }
558 :
559 : /// Handle initial message from proposer: check its sanity and send my
560 : /// current term.
561 153540 : async fn handle_greeting(
562 153540 : &mut self,
563 153540 : msg: &ProposerGreeting,
564 153540 : ) -> Result<Option<AcceptorProposerMessage>> {
565 153540 : // Check protocol compatibility
566 153540 : if msg.protocol_version != SK_PROTOCOL_VERSION {
567 0 : bail!(
568 0 : "incompatible protocol version {}, expected {}",
569 0 : msg.protocol_version,
570 0 : SK_PROTOCOL_VERSION
571 0 : );
572 153540 : }
573 153540 : /* Postgres major version mismatch is treated as fatal error
574 153540 : * because safekeepers parse WAL headers and the format
575 153540 : * may change between versions.
576 153540 : */
577 153540 : if msg.pg_version / 10000 != self.state.server.pg_version / 10000
578 0 : && self.state.server.pg_version != UNKNOWN_SERVER_VERSION
579 : {
580 0 : bail!(
581 0 : "incompatible server version {}, expected {}",
582 0 : msg.pg_version,
583 0 : self.state.server.pg_version
584 0 : );
585 153540 : }
586 153540 :
587 153540 : if msg.tenant_id != self.state.tenant_id {
588 0 : bail!(
589 0 : "invalid tenant ID, got {}, expected {}",
590 0 : msg.tenant_id,
591 0 : self.state.tenant_id
592 0 : );
593 153540 : }
594 153540 : if msg.timeline_id != self.state.timeline_id {
595 0 : bail!(
596 0 : "invalid timeline ID, got {}, expected {}",
597 0 : msg.timeline_id,
598 0 : self.state.timeline_id
599 0 : );
600 153540 : }
601 153540 : if self.state.server.wal_seg_size != msg.wal_seg_size {
602 0 : bail!(
603 0 : "invalid wal_seg_size, got {}, expected {}",
604 0 : msg.wal_seg_size,
605 0 : self.state.server.wal_seg_size
606 0 : );
607 153540 : }
608 153540 :
609 153540 : // system_id will be updated on mismatch
610 153540 : // sync-safekeepers doesn't know sysid and sends 0, ignore it
611 153540 : if self.state.server.system_id != msg.system_id && msg.system_id != 0 {
612 0 : if self.state.server.system_id != 0 {
613 0 : warn!(
614 0 : "unexpected system ID arrived, got {}, expected {}",
615 0 : msg.system_id, self.state.server.system_id
616 0 : );
617 0 : }
618 :
619 0 : let mut state = self.state.start_change();
620 0 : state.server.system_id = msg.system_id;
621 0 : if msg.pg_version != UNKNOWN_SERVER_VERSION {
622 0 : state.server.pg_version = msg.pg_version;
623 0 : }
624 0 : self.state.finish_change(&state).await?;
625 153540 : }
626 :
627 302 : info!(
628 302 : "processed greeting from walproposer {}, sending term {:?}",
629 4832 : msg.proposer_id.map(|b| format!("{:X}", b)).join(""),
630 302 : self.state.acceptor_state.term
631 302 : );
632 153540 : Ok(Some(AcceptorProposerMessage::Greeting(AcceptorGreeting {
633 153540 : term: self.state.acceptor_state.term,
634 153540 : node_id: self.node_id,
635 153540 : })))
636 153540 : }
637 :
638 : /// Give vote for the given term, if we haven't done that previously.
639 22126 : async fn handle_vote_request(
640 22126 : &mut self,
641 22126 : msg: &VoteRequest,
642 22126 : ) -> Result<Option<AcceptorProposerMessage>> {
643 22126 : // Once voted, we won't accept data from older proposers; flush
644 22126 : // everything we've already received so that new proposer starts
645 22126 : // streaming at end of our WAL, without overlap. Currently we truncate
646 22126 : // WAL at streaming point, so this avoids truncating already committed
647 22126 : // WAL.
648 22126 : //
649 22126 : // TODO: it would be smoother to not truncate committed piece at
650 22126 : // handle_elected instead. Currently not a big deal, as proposer is the
651 22126 : // only source of WAL; with peer2peer recovery it would be more
652 22126 : // important.
653 22126 : self.wal_store.flush_wal().await?;
654 : // initialize with refusal
655 22126 : let mut resp = VoteResponse {
656 22126 : term: self.state.acceptor_state.term,
657 22126 : vote_given: false as u64,
658 22126 : flush_lsn: self.flush_lsn(),
659 22126 : truncate_lsn: self.state.inmem.peer_horizon_lsn,
660 22126 : term_history: self.get_term_history(),
661 22126 : timeline_start_lsn: self.state.timeline_start_lsn,
662 22126 : };
663 22126 : if self.state.acceptor_state.term < msg.term {
664 21021 : let mut state = self.state.start_change();
665 21021 : state.acceptor_state.term = msg.term;
666 21021 : // persist vote before sending it out
667 21021 : self.state.finish_change(&state).await?;
668 :
669 21021 : resp.term = self.state.acceptor_state.term;
670 21021 : resp.vote_given = true as u64;
671 1105 : }
672 236 : info!("processed VoteRequest for term {}: {:?}", msg.term, &resp);
673 22126 : Ok(Some(AcceptorProposerMessage::VoteResponse(resp)))
674 22126 : }
675 :
676 : /// Form AppendResponse from current state.
677 17831 : fn append_response(&self) -> AppendResponse {
678 17831 : let ar = AppendResponse {
679 17831 : term: self.state.acceptor_state.term,
680 17831 : flush_lsn: self.flush_lsn(),
681 17831 : commit_lsn: self.state.commit_lsn,
682 17831 : // will be filled by the upper code to avoid bothering safekeeper
683 17831 : hs_feedback: HotStandbyFeedback::empty(),
684 17831 : pageserver_feedback: PageserverFeedback::empty(),
685 17831 : };
686 17831 : trace!("formed AppendResponse {:?}", ar);
687 17831 : ar
688 17831 : }
689 :
690 6421 : async fn handle_elected(
691 6421 : &mut self,
692 6421 : msg: &ProposerElected,
693 6421 : ) -> Result<Option<AcceptorProposerMessage>> {
694 82 : info!("received ProposerElected {:?}", msg);
695 6421 : if self.state.acceptor_state.term < msg.term {
696 2 : let mut state = self.state.start_change();
697 2 : state.acceptor_state.term = msg.term;
698 2 : self.state.finish_change(&state).await?;
699 6419 : }
700 :
701 : // If our term is higher, ignore the message (next feedback will inform the compute)
702 6421 : if self.state.acceptor_state.term > msg.term {
703 0 : return Ok(None);
704 6421 : }
705 6421 :
706 6421 : // This might happen in a rare race when another (old) connection from
707 6421 : // the same walproposer writes + flushes WAL after this connection
708 6421 : // already sent flush_lsn in VoteRequest. It is generally safe to
709 6421 : // proceed, but to prevent commit_lsn surprisingly going down we should
710 6421 : // either refuse the session (simpler) or skip the part we already have
711 6421 : // from the stream (can be implemented).
712 6421 : if msg.term == self.get_epoch() && self.flush_lsn() > msg.start_streaming_at {
713 0 : bail!("refusing ProposerElected which is going to overwrite correct WAL: term={}, flush_lsn={}, start_streaming_at={}; restarting the handshake should help",
714 0 : msg.term, self.flush_lsn(), msg.start_streaming_at)
715 6421 : }
716 6421 : // Otherwise we must never attempt to truncate committed data.
717 6421 : assert!(
718 6421 : msg.start_streaming_at >= self.state.inmem.commit_lsn,
719 0 : "attempt to truncate committed data: start_streaming_at={}, commit_lsn={}",
720 : msg.start_streaming_at,
721 : self.state.inmem.commit_lsn
722 : );
723 :
724 : // TODO: cross check divergence point, check if msg.start_streaming_at corresponds to
725 : // intersection of our history and history from msg
726 :
727 : // truncate wal, update the LSNs
728 6421 : self.wal_store.truncate_wal(msg.start_streaming_at).await?;
729 :
730 : // and now adopt term history from proposer
731 : {
732 6421 : let mut state = self.state.start_change();
733 6421 :
734 6421 : // Here we learn initial LSN for the first time, set fields
735 6421 : // interested in that.
736 6421 :
737 6421 : if state.timeline_start_lsn == Lsn(0) {
738 : // Remember point where WAL begins globally.
739 984 : state.timeline_start_lsn = msg.timeline_start_lsn;
740 12 : info!(
741 12 : "setting timeline_start_lsn to {:?}",
742 12 : state.timeline_start_lsn
743 12 : );
744 5437 : }
745 6421 : if state.peer_horizon_lsn == Lsn(0) {
746 984 : // Update peer_horizon_lsn as soon as we know where timeline starts.
747 984 : // It means that peer_horizon_lsn cannot be zero after we know timeline_start_lsn.
748 984 : state.peer_horizon_lsn = msg.timeline_start_lsn;
749 5437 : }
750 6421 : if state.local_start_lsn == Lsn(0) {
751 984 : state.local_start_lsn = msg.start_streaming_at;
752 12 : info!("setting local_start_lsn to {:?}", state.local_start_lsn);
753 5437 : }
754 : // Initializing commit_lsn before acking first flushed record is
755 : // important to let find_end_of_wal skip the hole in the beginning
756 : // of the first segment.
757 : //
758 : // NB: on new clusters, this happens at the same time as
759 : // timeline_start_lsn initialization, it is taken outside to provide
760 : // upgrade.
761 6421 : state.commit_lsn = max(state.commit_lsn, state.timeline_start_lsn);
762 6421 :
763 6421 : // Initializing backup_lsn is useful to avoid making backup think it should upload 0 segment.
764 6421 : state.backup_lsn = max(state.backup_lsn, state.timeline_start_lsn);
765 6421 :
766 6421 : state.acceptor_state.term_history = msg.term_history.clone();
767 6421 : self.state.finish_change(&state).await?;
768 : }
769 :
770 82 : info!("start receiving WAL since {:?}", msg.start_streaming_at);
771 :
772 : // Cache LSN where term starts to immediately fsync control file with
773 : // commit_lsn once we reach it -- sync-safekeepers finishes when
774 : // persisted commit_lsn on majority of safekeepers aligns.
775 6421 : self.epoch_start_lsn = match msg.term_history.0.last() {
776 0 : None => bail!("proposer elected with empty term history"),
777 6421 : Some(term_lsn_start) => term_lsn_start.lsn,
778 6421 : };
779 6421 :
780 6421 : Ok(None)
781 6421 : }
782 :
783 : /// Advance commit_lsn taking into account what we have locally.
784 : ///
785 : /// Note: it is assumed that 'WAL we have is from the right term' check has
786 : /// already been done outside.
787 10786 : async fn update_commit_lsn(&mut self, mut candidate: Lsn) -> Result<()> {
788 10786 : // Both peers and walproposer communicate this value, we might already
789 10786 : // have a fresher (higher) version.
790 10786 : candidate = max(candidate, self.state.inmem.commit_lsn);
791 10786 : let commit_lsn = min(candidate, self.flush_lsn());
792 10786 : assert!(
793 10786 : commit_lsn >= self.state.inmem.commit_lsn,
794 0 : "commit_lsn monotonicity violated: old={} new={}",
795 : self.state.inmem.commit_lsn,
796 : commit_lsn
797 : );
798 :
799 10786 : self.state.inmem.commit_lsn = commit_lsn;
800 10786 :
801 10786 : // If new commit_lsn reached epoch switch, force sync of control
802 10786 : // file: walproposer in sync mode is very interested when this
803 10786 : // happens. Note: this is for sync-safekeepers mode only, as
804 10786 : // otherwise commit_lsn might jump over epoch_start_lsn.
805 10786 : if commit_lsn >= self.epoch_start_lsn && self.state.commit_lsn < self.epoch_start_lsn {
806 834 : self.state.flush().await?;
807 9952 : }
808 :
809 10786 : Ok(())
810 10786 : }
811 :
812 : /// Persist control file if there is something to save and enough time
813 : /// passed after the last save.
814 0 : pub async fn maybe_persist_inmem_control_file(&mut self) -> Result<()> {
815 0 : const CF_SAVE_INTERVAL: Duration = Duration::from_secs(300);
816 0 : if self.state.pers.last_persist_at().elapsed() < CF_SAVE_INTERVAL {
817 0 : return Ok(());
818 0 : }
819 0 : let need_persist = self.state.inmem.commit_lsn > self.state.commit_lsn
820 0 : || self.state.inmem.backup_lsn > self.state.backup_lsn
821 0 : || self.state.inmem.peer_horizon_lsn > self.state.peer_horizon_lsn
822 0 : || self.state.inmem.remote_consistent_lsn > self.state.remote_consistent_lsn;
823 0 : if need_persist {
824 0 : self.state.flush().await?;
825 0 : trace!("saved control file: {CF_SAVE_INTERVAL:?} passed");
826 0 : }
827 0 : Ok(())
828 0 : }
829 :
830 : /// Handle request to append WAL.
831 : #[allow(clippy::comparison_chain)]
832 21456 : async fn handle_append_request(
833 21456 : &mut self,
834 21456 : msg: &AppendRequest,
835 21456 : require_flush: bool,
836 21456 : ) -> Result<Option<AcceptorProposerMessage>> {
837 21456 : if self.state.acceptor_state.term < msg.h.term {
838 0 : bail!("got AppendRequest before ProposerElected");
839 21456 : }
840 21456 :
841 21456 : // If our term is higher, immediately refuse the message.
842 21456 : if self.state.acceptor_state.term > msg.h.term {
843 1 : let resp = AppendResponse::term_only(self.state.acceptor_state.term);
844 1 : return Ok(Some(AcceptorProposerMessage::AppendResponse(resp)));
845 21455 : }
846 21455 :
847 21455 : // Now we know that we are in the same term as the proposer,
848 21455 : // processing the message.
849 21455 :
850 21455 : self.state.inmem.proposer_uuid = msg.h.proposer_uuid;
851 21455 :
852 21455 : // do the job
853 21455 : if !msg.wal_data.is_empty() {
854 3921 : self.wal_store
855 3921 : .write_wal(msg.h.begin_lsn, &msg.wal_data)
856 0 : .await?;
857 17534 : }
858 :
859 : // flush wal to the disk, if required
860 21455 : if require_flush {
861 4 : self.wal_store.flush_wal().await?;
862 21451 : }
863 :
864 : // Update commit_lsn.
865 21455 : if msg.h.commit_lsn != Lsn(0) {
866 10786 : self.update_commit_lsn(msg.h.commit_lsn).await?;
867 10669 : }
868 : // Value calculated by walproposer can always lag:
869 : // - safekeepers can forget inmem value and send to proposer lower
870 : // persisted one on restart;
871 : // - if we make safekeepers always send persistent value,
872 : // any compute restart would pull it down.
873 : // Thus, take max before adopting.
874 21455 : self.state.inmem.peer_horizon_lsn =
875 21455 : max(self.state.inmem.peer_horizon_lsn, msg.h.truncate_lsn);
876 21455 :
877 21455 : // Update truncate and commit LSN in control file.
878 21455 : // To avoid negative impact on performance of extra fsync, do it only
879 21455 : // when commit_lsn delta exceeds WAL segment size.
880 21455 : if self.state.commit_lsn + (self.state.server.wal_seg_size as u64)
881 21455 : < self.state.inmem.commit_lsn
882 : {
883 0 : self.state.flush().await?;
884 21455 : }
885 :
886 0 : trace!(
887 0 : "processed AppendRequest of len {}, end_lsn={:?}, commit_lsn={:?}, truncate_lsn={:?}, flushed={:?}",
888 0 : msg.wal_data.len(),
889 0 : msg.h.end_lsn,
890 0 : msg.h.commit_lsn,
891 0 : msg.h.truncate_lsn,
892 0 : require_flush,
893 0 : );
894 :
895 : // If flush_lsn hasn't updated, AppendResponse is not very useful.
896 21455 : if !require_flush {
897 21451 : return Ok(None);
898 4 : }
899 4 :
900 4 : let resp = self.append_response();
901 4 : Ok(Some(AcceptorProposerMessage::AppendResponse(resp)))
902 21456 : }
903 :
904 : /// Flush WAL to disk. Return AppendResponse with latest LSNs.
905 17827 : async fn handle_flush(&mut self) -> Result<Option<AcceptorProposerMessage>> {
906 17827 : self.wal_store.flush_wal().await?;
907 17827 : Ok(Some(AcceptorProposerMessage::AppendResponse(
908 17827 : self.append_response(),
909 17827 : )))
910 17827 : }
911 :
912 : /// Update timeline state with peer safekeeper data.
913 0 : pub async fn record_safekeeper_info(&mut self, sk_info: &SafekeeperTimelineInfo) -> Result<()> {
914 0 : let mut sync_control_file = false;
915 0 :
916 0 : if (Lsn(sk_info.commit_lsn) != Lsn::INVALID) && (sk_info.last_log_term != INVALID_TERM) {
917 : // Note: the check is too restrictive, generally we can update local
918 : // commit_lsn if our history matches (is part of) history of advanced
919 : // commit_lsn provider.
920 0 : if sk_info.last_log_term == self.get_epoch() {
921 0 : self.update_commit_lsn(Lsn(sk_info.commit_lsn)).await?;
922 0 : }
923 0 : }
924 :
925 0 : self.state.inmem.backup_lsn = max(Lsn(sk_info.backup_lsn), self.state.inmem.backup_lsn);
926 0 : sync_control_file |= self.state.backup_lsn + (self.state.server.wal_seg_size as u64)
927 0 : < self.state.inmem.backup_lsn;
928 0 :
929 0 : self.state.inmem.remote_consistent_lsn = max(
930 0 : Lsn(sk_info.remote_consistent_lsn),
931 0 : self.state.inmem.remote_consistent_lsn,
932 0 : );
933 0 : sync_control_file |= self.state.remote_consistent_lsn
934 0 : + (self.state.server.wal_seg_size as u64)
935 0 : < self.state.inmem.remote_consistent_lsn;
936 0 :
937 0 : self.state.inmem.peer_horizon_lsn = max(
938 0 : Lsn(sk_info.peer_horizon_lsn),
939 0 : self.state.inmem.peer_horizon_lsn,
940 0 : );
941 0 : sync_control_file |= self.state.peer_horizon_lsn + (self.state.server.wal_seg_size as u64)
942 0 : < self.state.inmem.peer_horizon_lsn;
943 0 :
944 0 : if sync_control_file {
945 0 : self.state.flush().await?;
946 0 : }
947 0 : Ok(())
948 0 : }
949 : }
950 :
951 : #[cfg(test)]
952 : mod tests {
953 : use futures::future::BoxFuture;
954 : use postgres_ffi::{XLogSegNo, WAL_SEGMENT_SIZE};
955 :
956 : use super::*;
957 : use crate::{
958 : state::{PersistedPeers, TimelinePersistentState},
959 : wal_storage::Storage,
960 : };
961 : use std::{ops::Deref, str::FromStr, time::Instant};
962 :
963 : // fake storage for tests
964 : struct InMemoryState {
965 : persisted_state: TimelinePersistentState,
966 : }
967 :
968 : #[async_trait::async_trait]
969 : impl control_file::Storage for InMemoryState {
970 6 : async fn persist(&mut self, s: &TimelinePersistentState) -> Result<()> {
971 6 : self.persisted_state = s.clone();
972 6 : Ok(())
973 12 : }
974 :
975 0 : fn last_persist_at(&self) -> Instant {
976 0 : Instant::now()
977 0 : }
978 : }
979 :
980 : impl Deref for InMemoryState {
981 : type Target = TimelinePersistentState;
982 :
983 120 : fn deref(&self) -> &Self::Target {
984 120 : &self.persisted_state
985 120 : }
986 : }
987 :
988 4 : fn test_sk_state() -> TimelinePersistentState {
989 4 : let mut state = TimelinePersistentState::empty();
990 4 : state.server.wal_seg_size = WAL_SEGMENT_SIZE as u32;
991 4 : state.tenant_id = TenantId::from([1u8; 16]);
992 4 : state.timeline_id = TimelineId::from([1u8; 16]);
993 4 : state
994 4 : }
995 :
996 : struct DummyWalStore {
997 : lsn: Lsn,
998 : }
999 :
1000 : #[async_trait::async_trait]
1001 : impl wal_storage::Storage for DummyWalStore {
1002 18 : fn flush_lsn(&self) -> Lsn {
1003 18 : self.lsn
1004 18 : }
1005 :
1006 4 : async fn write_wal(&mut self, startpos: Lsn, buf: &[u8]) -> Result<()> {
1007 4 : self.lsn = startpos + buf.len() as u64;
1008 4 : Ok(())
1009 8 : }
1010 :
1011 4 : async fn truncate_wal(&mut self, end_pos: Lsn) -> Result<()> {
1012 4 : self.lsn = end_pos;
1013 4 : Ok(())
1014 8 : }
1015 :
1016 8 : async fn flush_wal(&mut self) -> Result<()> {
1017 8 : Ok(())
1018 16 : }
1019 :
1020 0 : fn remove_up_to(&self, _segno_up_to: XLogSegNo) -> BoxFuture<'static, anyhow::Result<()>> {
1021 0 : Box::pin(async { Ok(()) })
1022 0 : }
1023 :
1024 0 : fn get_metrics(&self) -> crate::metrics::WalStorageMetrics {
1025 0 : crate::metrics::WalStorageMetrics::default()
1026 0 : }
1027 : }
1028 :
1029 2 : #[tokio::test]
1030 2 : async fn test_voting() {
1031 2 : let storage = InMemoryState {
1032 2 : persisted_state: test_sk_state(),
1033 2 : };
1034 2 : let wal_store = DummyWalStore { lsn: Lsn(0) };
1035 2 : let mut sk = SafeKeeper::new(storage, wal_store, NodeId(0)).unwrap();
1036 2 :
1037 2 : // check voting for 1 is ok
1038 2 : let vote_request = ProposerAcceptorMessage::VoteRequest(VoteRequest { term: 1 });
1039 2 : let mut vote_resp = sk.process_msg(&vote_request).await;
1040 2 : match vote_resp.unwrap() {
1041 2 : Some(AcceptorProposerMessage::VoteResponse(resp)) => assert!(resp.vote_given != 0),
1042 2 : r => panic!("unexpected response: {:?}", r),
1043 2 : }
1044 2 :
1045 2 : // reboot...
1046 2 : let state = sk.state.deref().clone();
1047 2 : let storage = InMemoryState {
1048 2 : persisted_state: state,
1049 2 : };
1050 2 :
1051 2 : sk = SafeKeeper::new(storage, sk.wal_store, NodeId(0)).unwrap();
1052 2 :
1053 2 : // and ensure voting second time for 1 is not ok
1054 2 : vote_resp = sk.process_msg(&vote_request).await;
1055 2 : match vote_resp.unwrap() {
1056 2 : Some(AcceptorProposerMessage::VoteResponse(resp)) => assert!(resp.vote_given == 0),
1057 2 : r => panic!("unexpected response: {:?}", r),
1058 2 : }
1059 2 : }
1060 :
1061 2 : #[tokio::test]
1062 2 : async fn test_epoch_switch() {
1063 2 : let storage = InMemoryState {
1064 2 : persisted_state: test_sk_state(),
1065 2 : };
1066 2 : let wal_store = DummyWalStore { lsn: Lsn(0) };
1067 2 :
1068 2 : let mut sk = SafeKeeper::new(storage, wal_store, NodeId(0)).unwrap();
1069 2 :
1070 2 : let mut ar_hdr = AppendRequestHeader {
1071 2 : term: 1,
1072 2 : epoch_start_lsn: Lsn(3),
1073 2 : begin_lsn: Lsn(1),
1074 2 : end_lsn: Lsn(2),
1075 2 : commit_lsn: Lsn(0),
1076 2 : truncate_lsn: Lsn(0),
1077 2 : proposer_uuid: [0; 16],
1078 2 : };
1079 2 : let mut append_request = AppendRequest {
1080 2 : h: ar_hdr.clone(),
1081 2 : wal_data: Bytes::from_static(b"b"),
1082 2 : };
1083 2 :
1084 2 : let pem = ProposerElected {
1085 2 : term: 1,
1086 2 : start_streaming_at: Lsn(1),
1087 2 : term_history: TermHistory(vec![TermLsn {
1088 2 : term: 1,
1089 2 : lsn: Lsn(3),
1090 2 : }]),
1091 2 : timeline_start_lsn: Lsn(0),
1092 2 : };
1093 2 : sk.process_msg(&ProposerAcceptorMessage::Elected(pem))
1094 2 : .await
1095 2 : .unwrap();
1096 2 :
1097 2 : // check that AppendRequest before epochStartLsn doesn't switch epoch
1098 2 : let resp = sk
1099 2 : .process_msg(&ProposerAcceptorMessage::AppendRequest(append_request))
1100 2 : .await;
1101 2 : assert!(resp.is_ok());
1102 2 : assert_eq!(sk.get_epoch(), 0);
1103 2 :
1104 2 : // but record at epochStartLsn does the switch
1105 2 : ar_hdr.begin_lsn = Lsn(2);
1106 2 : ar_hdr.end_lsn = Lsn(3);
1107 2 : append_request = AppendRequest {
1108 2 : h: ar_hdr,
1109 2 : wal_data: Bytes::from_static(b"b"),
1110 2 : };
1111 2 : let resp = sk
1112 2 : .process_msg(&ProposerAcceptorMessage::AppendRequest(append_request))
1113 2 : .await;
1114 2 : assert!(resp.is_ok());
1115 2 : sk.wal_store.truncate_wal(Lsn(3)).await.unwrap(); // imitate the complete record at 3 %)
1116 2 : assert_eq!(sk.get_epoch(), 1);
1117 2 : }
1118 :
1119 2 : #[test]
1120 2 : fn test_find_highest_common_point_none() {
1121 2 : let prop_th = TermHistory(vec![(0, Lsn(1)).into()]);
1122 2 : let sk_th = TermHistory(vec![(1, Lsn(1)).into(), (2, Lsn(2)).into()]);
1123 2 : assert_eq!(
1124 2 : TermHistory::find_highest_common_point(&prop_th, &sk_th, Lsn(3),),
1125 2 : None
1126 2 : );
1127 2 : }
1128 :
1129 2 : #[test]
1130 2 : fn test_find_highest_common_point_middle() {
1131 2 : let prop_th = TermHistory(vec![
1132 2 : (1, Lsn(10)).into(),
1133 2 : (2, Lsn(20)).into(),
1134 2 : (4, Lsn(40)).into(),
1135 2 : ]);
1136 2 : let sk_th = TermHistory(vec![
1137 2 : (1, Lsn(10)).into(),
1138 2 : (2, Lsn(20)).into(),
1139 2 : (3, Lsn(30)).into(), // sk ends last common term 2 at 30
1140 2 : ]);
1141 2 : assert_eq!(
1142 2 : TermHistory::find_highest_common_point(&prop_th, &sk_th, Lsn(40),),
1143 2 : Some(TermLsn {
1144 2 : term: 2,
1145 2 : lsn: Lsn(30),
1146 2 : })
1147 2 : );
1148 2 : }
1149 :
1150 2 : #[test]
1151 2 : fn test_find_highest_common_point_sk_end() {
1152 2 : let prop_th = TermHistory(vec![
1153 2 : (1, Lsn(10)).into(),
1154 2 : (2, Lsn(20)).into(), // last common term 2, sk will end it at 32 sk_end_lsn
1155 2 : (4, Lsn(40)).into(),
1156 2 : ]);
1157 2 : let sk_th = TermHistory(vec![(1, Lsn(10)).into(), (2, Lsn(20)).into()]);
1158 2 : assert_eq!(
1159 2 : TermHistory::find_highest_common_point(&prop_th, &sk_th, Lsn(32),),
1160 2 : Some(TermLsn {
1161 2 : term: 2,
1162 2 : lsn: Lsn(32),
1163 2 : })
1164 2 : );
1165 2 : }
1166 :
1167 2 : #[test]
1168 2 : fn test_find_highest_common_point_walprop() {
1169 2 : let prop_th = TermHistory(vec![(1, Lsn(10)).into(), (2, Lsn(20)).into()]);
1170 2 : let sk_th = TermHistory(vec![(1, Lsn(10)).into(), (2, Lsn(20)).into()]);
1171 2 : assert_eq!(
1172 2 : TermHistory::find_highest_common_point(&prop_th, &sk_th, Lsn(32),),
1173 2 : Some(TermLsn {
1174 2 : term: 2,
1175 2 : lsn: Lsn(32),
1176 2 : })
1177 2 : );
1178 2 : }
1179 :
1180 2 : #[test]
1181 2 : fn test_sk_state_bincode_serde_roundtrip() {
1182 2 : use utils::Hex;
1183 2 : let tenant_id = TenantId::from_str("cf0480929707ee75372337efaa5ecf96").unwrap();
1184 2 : let timeline_id = TimelineId::from_str("112ded66422aa5e953e5440fa5427ac4").unwrap();
1185 2 : let state = TimelinePersistentState {
1186 2 : tenant_id,
1187 2 : timeline_id,
1188 2 : acceptor_state: AcceptorState {
1189 2 : term: 42,
1190 2 : term_history: TermHistory(vec![TermLsn {
1191 2 : lsn: Lsn(0x1),
1192 2 : term: 41,
1193 2 : }]),
1194 2 : },
1195 2 : server: ServerInfo {
1196 2 : pg_version: 14,
1197 2 : system_id: 0x1234567887654321,
1198 2 : wal_seg_size: 0x12345678,
1199 2 : },
1200 2 : proposer_uuid: {
1201 2 : let mut arr = timeline_id.as_arr();
1202 2 : arr.reverse();
1203 2 : arr
1204 2 : },
1205 2 : timeline_start_lsn: Lsn(0x12345600),
1206 2 : local_start_lsn: Lsn(0x12),
1207 2 : commit_lsn: Lsn(1234567800),
1208 2 : backup_lsn: Lsn(1234567300),
1209 2 : peer_horizon_lsn: Lsn(9999999),
1210 2 : remote_consistent_lsn: Lsn(1234560000),
1211 2 : peers: PersistedPeers(vec![(
1212 2 : NodeId(1),
1213 2 : PersistedPeerInfo {
1214 2 : backup_lsn: Lsn(1234567000),
1215 2 : term: 42,
1216 2 : flush_lsn: Lsn(1234567800 - 8),
1217 2 : commit_lsn: Lsn(1234567600),
1218 2 : },
1219 2 : )]),
1220 2 : };
1221 2 :
1222 2 : let ser = state.ser().unwrap();
1223 2 :
1224 2 : #[rustfmt::skip]
1225 2 : let expected = [
1226 2 : // tenant_id as length prefixed hex
1227 2 : 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1228 2 : 0x63, 0x66, 0x30, 0x34, 0x38, 0x30, 0x39, 0x32, 0x39, 0x37, 0x30, 0x37, 0x65, 0x65, 0x37, 0x35, 0x33, 0x37, 0x32, 0x33, 0x33, 0x37, 0x65, 0x66, 0x61, 0x61, 0x35, 0x65, 0x63, 0x66, 0x39, 0x36,
1229 2 : // timeline_id as length prefixed hex
1230 2 : 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1231 2 : 0x31, 0x31, 0x32, 0x64, 0x65, 0x64, 0x36, 0x36, 0x34, 0x32, 0x32, 0x61, 0x61, 0x35, 0x65, 0x39, 0x35, 0x33, 0x65, 0x35, 0x34, 0x34, 0x30, 0x66, 0x61, 0x35, 0x34, 0x32, 0x37, 0x61, 0x63, 0x34,
1232 2 : // term
1233 2 : 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1234 2 : // length prefix
1235 2 : 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1236 2 : // unsure why this order is swapped
1237 2 : 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1238 2 : 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1239 2 : // pg_version
1240 2 : 0x0e, 0x00, 0x00, 0x00,
1241 2 : // systemid
1242 2 : 0x21, 0x43, 0x65, 0x87, 0x78, 0x56, 0x34, 0x12,
1243 2 : // wal_seg_size
1244 2 : 0x78, 0x56, 0x34, 0x12,
1245 2 : // pguuid as length prefixed hex
1246 2 : 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1247 2 : 0x63, 0x34, 0x37, 0x61, 0x34, 0x32, 0x61, 0x35, 0x30, 0x66, 0x34, 0x34, 0x65, 0x35, 0x35, 0x33, 0x65, 0x39, 0x61, 0x35, 0x32, 0x61, 0x34, 0x32, 0x36, 0x36, 0x65, 0x64, 0x32, 0x64, 0x31, 0x31,
1248 2 :
1249 2 : // timeline_start_lsn
1250 2 : 0x00, 0x56, 0x34, 0x12, 0x00, 0x00, 0x00, 0x00,
1251 2 : 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1252 2 : 0x78, 0x02, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
1253 2 : 0x84, 0x00, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
1254 2 : 0x7f, 0x96, 0x98, 0x00, 0x00, 0x00, 0x00, 0x00,
1255 2 : 0x00, 0xe4, 0x95, 0x49, 0x00, 0x00, 0x00, 0x00,
1256 2 : // length prefix for persistentpeers
1257 2 : 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1258 2 : // nodeid
1259 2 : 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1260 2 : // backuplsn
1261 2 : 0x58, 0xff, 0x95, 0x49, 0x00, 0x00, 0x00, 0x00,
1262 2 : 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1263 2 : 0x70, 0x02, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
1264 2 : 0xb0, 0x01, 0x96, 0x49, 0x00, 0x00, 0x00, 0x00,
1265 2 : ];
1266 2 :
1267 2 : assert_eq!(Hex(&ser), Hex(&expected));
1268 :
1269 2 : let deser = TimelinePersistentState::des(&ser).unwrap();
1270 2 :
1271 2 : assert_eq!(deser, state);
1272 2 : }
1273 : }
|