Line data Source code
1 : use std::{ffi::CString, sync::Arc};
2 :
3 : use byteorder::{LittleEndian, WriteBytesExt};
4 : use crc32c::crc32c_append;
5 : use parking_lot::{Mutex, MutexGuard};
6 : use postgres_ffi::{
7 : pg_constants::{
8 : RM_LOGICALMSG_ID, XLOG_LOGICAL_MESSAGE, XLP_LONG_HEADER, XLR_BLOCK_ID_DATA_LONG,
9 : XLR_BLOCK_ID_DATA_SHORT,
10 : },
11 : v16::{
12 : wal_craft_test_export::{XLogLongPageHeaderData, XLogPageHeaderData, XLOG_PAGE_MAGIC},
13 : xlog_utils::{
14 : XLogSegNoOffsetToRecPtr, XlLogicalMessage, XLOG_RECORD_CRC_OFFS,
15 : XLOG_SIZE_OF_XLOG_LONG_PHD, XLOG_SIZE_OF_XLOG_RECORD, XLOG_SIZE_OF_XLOG_SHORT_PHD,
16 : XLP_FIRST_IS_CONTRECORD,
17 : },
18 : XLogRecord,
19 : },
20 : WAL_SEGMENT_SIZE, XLOG_BLCKSZ,
21 : };
22 : use utils::lsn::Lsn;
23 :
24 : use super::block_storage::BlockStorage;
25 :
26 : /// Simulation implementation of walproposer WAL storage.
27 : pub struct DiskWalProposer {
28 : state: Mutex<State>,
29 : }
30 :
31 : impl DiskWalProposer {
32 36720 : pub fn new() -> Arc<DiskWalProposer> {
33 36720 : Arc::new(DiskWalProposer {
34 36720 : state: Mutex::new(State {
35 36720 : internal_available_lsn: Lsn(0),
36 36720 : prev_lsn: Lsn(0),
37 36720 : disk: BlockStorage::new(),
38 36720 : }),
39 36720 : })
40 36720 : }
41 :
42 38121 : pub fn lock(&self) -> MutexGuard<State> {
43 38121 : self.state.lock()
44 38121 : }
45 : }
46 :
47 : pub struct State {
48 : // flush_lsn
49 : internal_available_lsn: Lsn,
50 : // needed for WAL generation
51 : prev_lsn: Lsn,
52 : // actual WAL storage
53 : disk: BlockStorage,
54 : }
55 :
56 : impl State {
57 13383 : pub fn read(&self, pos: u64, buf: &mut [u8]) {
58 13383 : self.disk.read(pos, buf);
59 13383 : // TODO: fail on reading uninitialized data
60 13383 : }
61 :
62 68557 : pub fn write(&mut self, pos: u64, buf: &[u8]) {
63 68557 : self.disk.write(pos, buf);
64 68557 : }
65 :
66 : /// Update the internal available LSN to the given value.
67 1407 : pub fn reset_to(&mut self, lsn: Lsn) {
68 1407 : self.internal_available_lsn = lsn;
69 1407 : }
70 :
71 : /// Get current LSN.
72 5640 : pub fn flush_rec_ptr(&self) -> Lsn {
73 5640 : self.internal_available_lsn
74 5640 : }
75 :
76 : /// Generate a new WAL record at the current LSN.
77 16898 : pub fn insert_logical_message(&mut self, prefix: &str, msg: &[u8]) -> anyhow::Result<()> {
78 16898 : let prefix_cstr = CString::new(prefix)?;
79 16898 : let prefix_bytes = prefix_cstr.as_bytes_with_nul();
80 16898 :
81 16898 : let lm = XlLogicalMessage {
82 16898 : db_id: 0,
83 16898 : transactional: 0,
84 16898 : prefix_size: prefix_bytes.len() as ::std::os::raw::c_ulong,
85 16898 : message_size: msg.len() as ::std::os::raw::c_ulong,
86 16898 : };
87 16898 :
88 16898 : let record_bytes = lm.encode();
89 16898 : let rdatas: Vec<&[u8]> = vec![&record_bytes, prefix_bytes, msg];
90 16898 : insert_wal_record(self, rdatas, RM_LOGICALMSG_ID, XLOG_LOGICAL_MESSAGE)
91 16898 : }
92 : }
93 :
94 16898 : fn insert_wal_record(
95 16898 : state: &mut State,
96 16898 : rdatas: Vec<&[u8]>,
97 16898 : rmid: u8,
98 16898 : info: u8,
99 16898 : ) -> anyhow::Result<()> {
100 16898 : // bytes right after the header, in the same rdata block
101 16898 : let mut scratch = Vec::new();
102 50694 : let mainrdata_len: usize = rdatas.iter().map(|rdata| rdata.len()).sum();
103 16898 :
104 16898 : if mainrdata_len > 0 {
105 16898 : if mainrdata_len > 255 {
106 0 : scratch.push(XLR_BLOCK_ID_DATA_LONG);
107 0 : // TODO: verify endiness
108 0 : let _ = scratch.write_u32::<LittleEndian>(mainrdata_len as u32);
109 16898 : } else {
110 16898 : scratch.push(XLR_BLOCK_ID_DATA_SHORT);
111 16898 : scratch.push(mainrdata_len as u8);
112 16898 : }
113 0 : }
114 :
115 16898 : let total_len: u32 = (XLOG_SIZE_OF_XLOG_RECORD + scratch.len() + mainrdata_len) as u32;
116 16898 : let size = maxalign(total_len);
117 16898 : assert!(size as usize > XLOG_SIZE_OF_XLOG_RECORD);
118 :
119 16898 : let start_bytepos = recptr_to_bytepos(state.internal_available_lsn);
120 16898 : let end_bytepos = start_bytepos + size as u64;
121 16898 :
122 16898 : let start_recptr = bytepos_to_recptr(start_bytepos);
123 16898 : let end_recptr = bytepos_to_recptr(end_bytepos);
124 16898 :
125 16898 : assert!(recptr_to_bytepos(start_recptr) == start_bytepos);
126 16898 : assert!(recptr_to_bytepos(end_recptr) == end_bytepos);
127 :
128 16898 : let mut crc = crc32c_append(0, &scratch);
129 67592 : for rdata in &rdatas {
130 50694 : crc = crc32c_append(crc, rdata);
131 50694 : }
132 :
133 16898 : let mut header = XLogRecord {
134 16898 : xl_tot_len: total_len,
135 16898 : xl_xid: 0,
136 16898 : xl_prev: state.prev_lsn.0,
137 16898 : xl_info: info,
138 16898 : xl_rmid: rmid,
139 16898 : __bindgen_padding_0: [0u8; 2usize],
140 16898 : xl_crc: crc,
141 16898 : };
142 :
143 : // now we have the header and can finish the crc
144 16898 : let header_bytes = header.encode()?;
145 16898 : let crc = crc32c_append(crc, &header_bytes[0..XLOG_RECORD_CRC_OFFS]);
146 16898 : header.xl_crc = crc;
147 :
148 16898 : let mut header_bytes = header.encode()?.to_vec();
149 16898 : assert!(header_bytes.len() == XLOG_SIZE_OF_XLOG_RECORD);
150 :
151 16898 : header_bytes.extend_from_slice(&scratch);
152 16898 :
153 16898 : // finish rdatas
154 16898 : let mut rdatas = rdatas;
155 16898 : rdatas.insert(0, &header_bytes);
156 16898 :
157 16898 : write_walrecord_to_disk(state, total_len as u64, rdatas, start_recptr, end_recptr)?;
158 :
159 16898 : state.internal_available_lsn = end_recptr;
160 16898 : state.prev_lsn = start_recptr;
161 16898 : Ok(())
162 16898 : }
163 :
164 16898 : fn write_walrecord_to_disk(
165 16898 : state: &mut State,
166 16898 : total_len: u64,
167 16898 : rdatas: Vec<&[u8]>,
168 16898 : start: Lsn,
169 16898 : end: Lsn,
170 16898 : ) -> anyhow::Result<()> {
171 16898 : let mut curr_ptr = start;
172 16898 : let mut freespace = insert_freespace(curr_ptr);
173 16898 : let mut written: usize = 0;
174 16898 :
175 16898 : assert!(freespace >= size_of::<u32>());
176 :
177 84490 : for mut rdata in rdatas {
178 67678 : while rdata.len() >= freespace {
179 86 : assert!(
180 86 : curr_ptr.segment_offset(WAL_SEGMENT_SIZE) >= XLOG_SIZE_OF_XLOG_SHORT_PHD
181 0 : || freespace == 0
182 : );
183 :
184 86 : state.write(curr_ptr.0, &rdata[..freespace]);
185 86 : rdata = &rdata[freespace..];
186 86 : written += freespace;
187 86 : curr_ptr = Lsn(curr_ptr.0 + freespace as u64);
188 86 :
189 86 : let mut new_page = XLogPageHeaderData {
190 86 : xlp_magic: XLOG_PAGE_MAGIC as u16,
191 86 : xlp_info: XLP_BKP_REMOVABLE,
192 86 : xlp_tli: 1,
193 86 : xlp_pageaddr: curr_ptr.0,
194 86 : xlp_rem_len: (total_len - written as u64) as u32,
195 86 : ..Default::default() // Put 0 in padding fields.
196 86 : };
197 86 : if new_page.xlp_rem_len > 0 {
198 77 : new_page.xlp_info |= XLP_FIRST_IS_CONTRECORD;
199 77 : }
200 :
201 86 : if curr_ptr.segment_offset(WAL_SEGMENT_SIZE) == 0 {
202 0 : new_page.xlp_info |= XLP_LONG_HEADER;
203 0 : let long_page = XLogLongPageHeaderData {
204 0 : std: new_page,
205 0 : xlp_sysid: 0,
206 0 : xlp_seg_size: WAL_SEGMENT_SIZE as u32,
207 0 : xlp_xlog_blcksz: XLOG_BLCKSZ as u32,
208 0 : };
209 0 : let header_bytes = long_page.encode()?;
210 0 : assert!(header_bytes.len() == XLOG_SIZE_OF_XLOG_LONG_PHD);
211 0 : state.write(curr_ptr.0, &header_bytes);
212 0 : curr_ptr = Lsn(curr_ptr.0 + header_bytes.len() as u64);
213 : } else {
214 86 : let header_bytes = new_page.encode()?;
215 86 : assert!(header_bytes.len() == XLOG_SIZE_OF_XLOG_SHORT_PHD);
216 86 : state.write(curr_ptr.0, &header_bytes);
217 86 : curr_ptr = Lsn(curr_ptr.0 + header_bytes.len() as u64);
218 : }
219 86 : freespace = insert_freespace(curr_ptr);
220 : }
221 :
222 67592 : assert!(
223 67592 : curr_ptr.segment_offset(WAL_SEGMENT_SIZE) >= XLOG_SIZE_OF_XLOG_SHORT_PHD
224 0 : || rdata.is_empty()
225 : );
226 67592 : state.write(curr_ptr.0, rdata);
227 67592 : curr_ptr = Lsn(curr_ptr.0 + rdata.len() as u64);
228 67592 : written += rdata.len();
229 67592 : freespace -= rdata.len();
230 : }
231 :
232 16898 : assert!(written == total_len as usize);
233 16898 : curr_ptr.0 = maxalign(curr_ptr.0);
234 16898 : assert!(curr_ptr == end);
235 16898 : Ok(())
236 16898 : }
237 :
238 33796 : fn maxalign<T>(size: T) -> T
239 33796 : where
240 33796 : T: std::ops::BitAnd<Output = T>
241 33796 : + std::ops::Add<Output = T>
242 33796 : + std::ops::Not<Output = T>
243 33796 : + From<u8>,
244 33796 : {
245 33796 : (size + T::from(7)) & !T::from(7)
246 33796 : }
247 :
248 16984 : fn insert_freespace(ptr: Lsn) -> usize {
249 16984 : if ptr.block_offset() == 0 {
250 0 : 0
251 : } else {
252 16984 : (XLOG_BLCKSZ as u64 - ptr.block_offset()) as usize
253 : }
254 16984 : }
255 :
256 : const XLP_BKP_REMOVABLE: u16 = 0x0004;
257 : const USABLE_BYTES_IN_PAGE: u64 = (XLOG_BLCKSZ - XLOG_SIZE_OF_XLOG_SHORT_PHD) as u64;
258 : const USABLE_BYTES_IN_SEGMENT: u64 = ((WAL_SEGMENT_SIZE / XLOG_BLCKSZ) as u64
259 : * USABLE_BYTES_IN_PAGE)
260 : - (XLOG_SIZE_OF_XLOG_RECORD - XLOG_SIZE_OF_XLOG_SHORT_PHD) as u64;
261 :
262 33796 : fn bytepos_to_recptr(bytepos: u64) -> Lsn {
263 33796 : let fullsegs = bytepos / USABLE_BYTES_IN_SEGMENT;
264 33796 : let mut bytesleft = bytepos % USABLE_BYTES_IN_SEGMENT;
265 :
266 33796 : let seg_offset = if bytesleft < (XLOG_BLCKSZ - XLOG_SIZE_OF_XLOG_SHORT_PHD) as u64 {
267 : // fits on first page of segment
268 0 : bytesleft + XLOG_SIZE_OF_XLOG_SHORT_PHD as u64
269 : } else {
270 : // account for the first page on segment with long header
271 33796 : bytesleft -= (XLOG_BLCKSZ - XLOG_SIZE_OF_XLOG_SHORT_PHD) as u64;
272 33796 : let fullpages = bytesleft / USABLE_BYTES_IN_PAGE;
273 33796 : bytesleft %= USABLE_BYTES_IN_PAGE;
274 33796 :
275 33796 : XLOG_BLCKSZ as u64
276 33796 : + fullpages * XLOG_BLCKSZ as u64
277 33796 : + bytesleft
278 33796 : + XLOG_SIZE_OF_XLOG_SHORT_PHD as u64
279 : };
280 :
281 33796 : Lsn(XLogSegNoOffsetToRecPtr(
282 33796 : fullsegs,
283 33796 : seg_offset as u32,
284 33796 : WAL_SEGMENT_SIZE,
285 33796 : ))
286 33796 : }
287 :
288 50694 : fn recptr_to_bytepos(ptr: Lsn) -> u64 {
289 50694 : let fullsegs = ptr.segment_number(WAL_SEGMENT_SIZE);
290 50694 : let offset = ptr.segment_offset(WAL_SEGMENT_SIZE) as u64;
291 50694 :
292 50694 : let fullpages = offset / XLOG_BLCKSZ as u64;
293 50694 : let offset = offset % XLOG_BLCKSZ as u64;
294 50694 :
295 50694 : if fullpages == 0 {
296 0 : fullsegs * USABLE_BYTES_IN_SEGMENT
297 0 : + if offset > 0 {
298 0 : assert!(offset >= XLOG_SIZE_OF_XLOG_SHORT_PHD as u64);
299 0 : offset - XLOG_SIZE_OF_XLOG_SHORT_PHD as u64
300 : } else {
301 0 : 0
302 : }
303 : } else {
304 50694 : fullsegs * USABLE_BYTES_IN_SEGMENT
305 50694 : + (XLOG_BLCKSZ - XLOG_SIZE_OF_XLOG_SHORT_PHD) as u64
306 50694 : + (fullpages - 1) * USABLE_BYTES_IN_PAGE
307 50694 : + if offset > 0 {
308 50694 : assert!(offset >= XLOG_SIZE_OF_XLOG_SHORT_PHD as u64);
309 50694 : offset - XLOG_SIZE_OF_XLOG_SHORT_PHD as u64
310 : } else {
311 0 : 0
312 : }
313 : }
314 50694 : }
|