Line data Source code
1 : //!
2 : //! Parse PostgreSQL WAL records and store them in a neon Timeline.
3 : //!
4 : //! The pipeline for ingesting WAL looks like this:
5 : //!
6 : //! WAL receiver -> WalIngest -> Repository
7 : //!
8 : //! The WAL receiver receives a stream of WAL from the WAL safekeepers,
9 : //! and decodes it to individual WAL records. It feeds the WAL records
10 : //! to WalIngest, which parses them and stores them in the Repository.
11 : //!
12 : //! The neon Repository can store page versions in two formats: as
13 : //! page images, or a WAL records. WalIngest::ingest_record() extracts
14 : //! page images out of some WAL records, but most it stores as WAL
15 : //! records. If a WAL record modifies multiple pages, WalIngest
16 : //! will call Repository::put_wal_record or put_page_image functions
17 : //! separately for each modified page.
18 : //!
19 : //! To reconstruct a page using a WAL record, the Repository calls the
20 : //! code in walredo.rs. walredo.rs passes most WAL records to the WAL
21 : //! redo Postgres process, but some records it can handle directly with
22 : //! bespoken Rust code.
23 :
24 : use pageserver_api::shard::ShardIdentity;
25 : use postgres_ffi::v14::nonrelfile_utils::clogpage_precedes;
26 : use postgres_ffi::v14::nonrelfile_utils::slru_may_delete_clogsegment;
27 : use postgres_ffi::{fsm_logical_to_physical, page_is_new, page_set_lsn};
28 :
29 : use anyhow::{bail, Context, Result};
30 : use bytes::{Buf, Bytes, BytesMut};
31 : use tracing::*;
32 : use utils::failpoint_support;
33 :
34 : use crate::context::RequestContext;
35 : use crate::metrics::WAL_INGEST;
36 : use crate::pgdatadir_mapping::{DatadirModification, Version};
37 : use crate::tenant::PageReconstructError;
38 : use crate::tenant::Timeline;
39 : use crate::walrecord::*;
40 : use crate::ZERO_PAGE;
41 : use pageserver_api::key::rel_block_to_key;
42 : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
43 : use postgres_ffi::pg_constants;
44 : use postgres_ffi::relfile_utils::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM};
45 : use postgres_ffi::v14::nonrelfile_utils::mx_offset_to_member_segment;
46 : use postgres_ffi::v14::xlog_utils::*;
47 : use postgres_ffi::v14::CheckPoint;
48 : use postgres_ffi::TransactionId;
49 : use postgres_ffi::BLCKSZ;
50 : use utils::lsn::Lsn;
51 :
52 : pub struct WalIngest {
53 : shard: ShardIdentity,
54 : checkpoint: CheckPoint,
55 : checkpoint_modified: bool,
56 : }
57 :
58 : impl WalIngest {
59 12 : pub async fn new(
60 12 : timeline: &Timeline,
61 12 : startpoint: Lsn,
62 12 : ctx: &RequestContext,
63 12 : ) -> anyhow::Result<WalIngest> {
64 : // Fetch the latest checkpoint into memory, so that we can compare with it
65 : // quickly in `ingest_record` and update it when it changes.
66 12 : let checkpoint_bytes = timeline.get_checkpoint(startpoint, ctx).await?;
67 12 : let checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
68 0 : trace!("CheckPoint.nextXid = {}", checkpoint.nextXid.value);
69 :
70 12 : Ok(WalIngest {
71 12 : shard: *timeline.get_shard_identity(),
72 12 : checkpoint,
73 12 : checkpoint_modified: false,
74 12 : })
75 12 : }
76 :
77 : ///
78 : /// Decode a PostgreSQL WAL record and store it in the repository, in the given timeline.
79 : ///
80 : /// This function updates `lsn` field of `DatadirModification`
81 : ///
82 : /// Helper function to parse a WAL record and call the Timeline's PUT functions for all the
83 : /// relations/pages that the record affects.
84 : ///
85 : /// This function returns `true` if the record was ingested, and `false` if it was filtered out
86 : ///
87 145852 : pub async fn ingest_record(
88 145852 : &mut self,
89 145852 : recdata: Bytes,
90 145852 : lsn: Lsn,
91 145852 : modification: &mut DatadirModification<'_>,
92 145852 : decoded: &mut DecodedWALRecord,
93 145852 : ctx: &RequestContext,
94 145852 : ) -> anyhow::Result<bool> {
95 145852 : WAL_INGEST.records_received.inc();
96 145852 : let pg_version = modification.tline.pg_version;
97 145852 : let prev_len = modification.len();
98 145852 :
99 145852 : modification.set_lsn(lsn)?;
100 145852 : decode_wal_record(recdata, decoded, pg_version)?;
101 :
102 145852 : let mut buf = decoded.record.clone();
103 145852 : buf.advance(decoded.main_data_offset);
104 145852 :
105 145852 : assert!(!self.checkpoint_modified);
106 145852 : if decoded.xl_xid != pg_constants::INVALID_TRANSACTION_ID
107 145834 : && self.checkpoint.update_next_xid(decoded.xl_xid)
108 2 : {
109 2 : self.checkpoint_modified = true;
110 145850 : }
111 :
112 145852 : match decoded.xl_rmid {
113 : pg_constants::RM_HEAP_ID | pg_constants::RM_HEAP2_ID => {
114 : // Heap AM records need some special handling, because they modify VM pages
115 : // without registering them with the standard mechanism.
116 145474 : self.ingest_heapam_record(&mut buf, modification, decoded, ctx)
117 0 : .await?;
118 : }
119 : pg_constants::RM_NEON_ID => {
120 0 : self.ingest_neonrmgr_record(&mut buf, modification, decoded, ctx)
121 0 : .await?;
122 : }
123 : // Handle other special record types
124 : pg_constants::RM_SMGR_ID => {
125 16 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
126 16 :
127 16 : if info == pg_constants::XLOG_SMGR_CREATE {
128 16 : let create = XlSmgrCreate::decode(&mut buf);
129 16 : self.ingest_xlog_smgr_create(modification, &create, ctx)
130 6 : .await?;
131 0 : } else if info == pg_constants::XLOG_SMGR_TRUNCATE {
132 0 : let truncate = XlSmgrTruncate::decode(&mut buf);
133 0 : self.ingest_xlog_smgr_truncate(modification, &truncate, ctx)
134 0 : .await?;
135 0 : }
136 : }
137 : pg_constants::RM_DBASE_ID => {
138 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
139 0 : debug!(%info, %pg_version, "handle RM_DBASE_ID");
140 :
141 0 : if pg_version == 14 {
142 0 : if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE {
143 0 : let createdb = XlCreateDatabase::decode(&mut buf);
144 0 : debug!("XLOG_DBASE_CREATE v14");
145 :
146 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
147 0 : .await?;
148 0 : } else if info == postgres_ffi::v14::bindings::XLOG_DBASE_DROP {
149 0 : let dropdb = XlDropDatabase::decode(&mut buf);
150 0 : for tablespace_id in dropdb.tablespace_ids {
151 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
152 0 : modification
153 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
154 0 : .await?;
155 : }
156 0 : }
157 0 : } else if pg_version == 15 {
158 0 : if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG {
159 0 : debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
160 0 : } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY {
161 : // The XLOG record was renamed between v14 and v15,
162 : // but the record format is the same.
163 : // So we can reuse XlCreateDatabase here.
164 0 : debug!("XLOG_DBASE_CREATE_FILE_COPY");
165 0 : let createdb = XlCreateDatabase::decode(&mut buf);
166 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
167 0 : .await?;
168 0 : } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_DROP {
169 0 : let dropdb = XlDropDatabase::decode(&mut buf);
170 0 : for tablespace_id in dropdb.tablespace_ids {
171 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
172 0 : modification
173 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
174 0 : .await?;
175 : }
176 0 : }
177 0 : } else if pg_version == 16 {
178 0 : if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG {
179 0 : debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
180 0 : } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY {
181 : // The XLOG record was renamed between v14 and v15,
182 : // but the record format is the same.
183 : // So we can reuse XlCreateDatabase here.
184 0 : debug!("XLOG_DBASE_CREATE_FILE_COPY");
185 0 : let createdb = XlCreateDatabase::decode(&mut buf);
186 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
187 0 : .await?;
188 0 : } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_DROP {
189 0 : let dropdb = XlDropDatabase::decode(&mut buf);
190 0 : for tablespace_id in dropdb.tablespace_ids {
191 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
192 0 : modification
193 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
194 0 : .await?;
195 : }
196 0 : }
197 0 : }
198 : }
199 : pg_constants::RM_TBLSPC_ID => {
200 0 : trace!("XLOG_TBLSPC_CREATE/DROP is not handled yet");
201 : }
202 : pg_constants::RM_CLOG_ID => {
203 0 : let info = decoded.xl_info & !pg_constants::XLR_INFO_MASK;
204 0 :
205 0 : if info == pg_constants::CLOG_ZEROPAGE {
206 0 : let pageno = buf.get_u32_le();
207 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
208 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
209 0 : self.put_slru_page_image(
210 0 : modification,
211 0 : SlruKind::Clog,
212 0 : segno,
213 0 : rpageno,
214 0 : ZERO_PAGE.clone(),
215 0 : ctx,
216 0 : )
217 0 : .await?;
218 : } else {
219 0 : assert!(info == pg_constants::CLOG_TRUNCATE);
220 0 : let xlrec = XlClogTruncate::decode(&mut buf);
221 0 : self.ingest_clog_truncate_record(modification, &xlrec, ctx)
222 0 : .await?;
223 : }
224 : }
225 : pg_constants::RM_XACT_ID => {
226 24 : let info = decoded.xl_info & pg_constants::XLOG_XACT_OPMASK;
227 24 :
228 24 : if info == pg_constants::XLOG_XACT_COMMIT || info == pg_constants::XLOG_XACT_ABORT {
229 8 : let parsed_xact =
230 8 : XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info);
231 8 : self.ingest_xact_record(
232 8 : modification,
233 8 : &parsed_xact,
234 8 : info == pg_constants::XLOG_XACT_COMMIT,
235 8 : ctx,
236 8 : )
237 0 : .await?;
238 16 : } else if info == pg_constants::XLOG_XACT_COMMIT_PREPARED
239 16 : || info == pg_constants::XLOG_XACT_ABORT_PREPARED
240 : {
241 0 : let parsed_xact =
242 0 : XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info);
243 0 : self.ingest_xact_record(
244 0 : modification,
245 0 : &parsed_xact,
246 0 : info == pg_constants::XLOG_XACT_COMMIT_PREPARED,
247 0 : ctx,
248 0 : )
249 0 : .await?;
250 : // Remove twophase file. see RemoveTwoPhaseFile() in postgres code
251 0 : trace!(
252 0 : "Drop twophaseFile for xid {} parsed_xact.xid {} here at {}",
253 0 : decoded.xl_xid,
254 0 : parsed_xact.xid,
255 0 : lsn,
256 0 : );
257 0 : modification
258 0 : .drop_twophase_file(parsed_xact.xid, ctx)
259 0 : .await?;
260 16 : } else if info == pg_constants::XLOG_XACT_PREPARE {
261 0 : modification
262 0 : .put_twophase_file(decoded.xl_xid, Bytes::copy_from_slice(&buf[..]), ctx)
263 0 : .await?;
264 16 : }
265 : }
266 : pg_constants::RM_MULTIXACT_ID => {
267 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
268 0 :
269 0 : if info == pg_constants::XLOG_MULTIXACT_ZERO_OFF_PAGE {
270 0 : let pageno = buf.get_u32_le();
271 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
272 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
273 0 : self.put_slru_page_image(
274 0 : modification,
275 0 : SlruKind::MultiXactOffsets,
276 0 : segno,
277 0 : rpageno,
278 0 : ZERO_PAGE.clone(),
279 0 : ctx,
280 0 : )
281 0 : .await?;
282 0 : } else if info == pg_constants::XLOG_MULTIXACT_ZERO_MEM_PAGE {
283 0 : let pageno = buf.get_u32_le();
284 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
285 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
286 0 : self.put_slru_page_image(
287 0 : modification,
288 0 : SlruKind::MultiXactMembers,
289 0 : segno,
290 0 : rpageno,
291 0 : ZERO_PAGE.clone(),
292 0 : ctx,
293 0 : )
294 0 : .await?;
295 0 : } else if info == pg_constants::XLOG_MULTIXACT_CREATE_ID {
296 0 : let xlrec = XlMultiXactCreate::decode(&mut buf);
297 0 : self.ingest_multixact_create_record(modification, &xlrec)?;
298 0 : } else if info == pg_constants::XLOG_MULTIXACT_TRUNCATE_ID {
299 0 : let xlrec = XlMultiXactTruncate::decode(&mut buf);
300 0 : self.ingest_multixact_truncate_record(modification, &xlrec, ctx)
301 0 : .await?;
302 0 : }
303 : }
304 : pg_constants::RM_RELMAP_ID => {
305 0 : let xlrec = XlRelmapUpdate::decode(&mut buf);
306 0 : self.ingest_relmap_page(modification, &xlrec, decoded, ctx)
307 0 : .await?;
308 : }
309 : pg_constants::RM_XLOG_ID => {
310 30 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
311 30 :
312 30 : if info == pg_constants::XLOG_NEXTOID {
313 2 : let next_oid = buf.get_u32_le();
314 2 : if self.checkpoint.nextOid != next_oid {
315 2 : self.checkpoint.nextOid = next_oid;
316 2 : self.checkpoint_modified = true;
317 2 : }
318 28 : } else if info == pg_constants::XLOG_CHECKPOINT_ONLINE
319 28 : || info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN
320 : {
321 2 : let mut checkpoint_bytes = [0u8; SIZEOF_CHECKPOINT];
322 2 : buf.copy_to_slice(&mut checkpoint_bytes);
323 2 : let xlog_checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
324 0 : trace!(
325 0 : "xlog_checkpoint.oldestXid={}, checkpoint.oldestXid={}",
326 0 : xlog_checkpoint.oldestXid,
327 0 : self.checkpoint.oldestXid
328 0 : );
329 2 : if (self
330 2 : .checkpoint
331 2 : .oldestXid
332 2 : .wrapping_sub(xlog_checkpoint.oldestXid) as i32)
333 2 : < 0
334 0 : {
335 0 : self.checkpoint.oldestXid = xlog_checkpoint.oldestXid;
336 2 : }
337 0 : trace!(
338 0 : "xlog_checkpoint.oldestActiveXid={}, checkpoint.oldestActiveXid={}",
339 0 : xlog_checkpoint.oldestActiveXid,
340 0 : self.checkpoint.oldestActiveXid
341 0 : );
342 2 : self.checkpoint.oldestActiveXid = xlog_checkpoint.oldestActiveXid;
343 2 :
344 2 : // Write a new checkpoint key-value pair on every checkpoint record, even
345 2 : // if nothing really changed. Not strictly required, but it seems nice to
346 2 : // have some trace of the checkpoint records in the layer files at the same
347 2 : // LSNs.
348 2 : self.checkpoint_modified = true;
349 26 : }
350 : }
351 : pg_constants::RM_LOGICALMSG_ID => {
352 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
353 0 :
354 0 : if info == pg_constants::XLOG_LOGICAL_MESSAGE {
355 0 : let xlrec = crate::walrecord::XlLogicalMessage::decode(&mut buf);
356 0 : let prefix = std::str::from_utf8(&buf[0..xlrec.prefix_size - 1])?;
357 0 : let message = &buf[xlrec.prefix_size..xlrec.prefix_size + xlrec.message_size];
358 0 : if prefix == "neon-test" {
359 : // This is a convenient way to make the WAL ingestion pause at
360 : // particular point in the WAL. For more fine-grained control,
361 : // we could peek into the message and only pause if it contains
362 : // a particular string, for example, but this is enough for now.
363 0 : failpoint_support::sleep_millis_async!("wal-ingest-logical-message-sleep");
364 0 : } else if let Some(path) = prefix.strip_prefix("neon-file:") {
365 0 : modification.put_file(path, message, ctx).await?;
366 0 : }
367 0 : }
368 : }
369 : pg_constants::RM_STANDBY_ID => {
370 16 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
371 16 : if info == pg_constants::XLOG_RUNNING_XACTS {
372 0 : let xlrec = crate::walrecord::XlRunningXacts::decode(&mut buf);
373 0 : self.checkpoint.oldestActiveXid = xlrec.oldest_running_xid;
374 16 : }
375 : }
376 292 : _x => {
377 292 : // TODO: should probably log & fail here instead of blindly
378 292 : // doing something without understanding the protocol
379 292 : }
380 : }
381 :
382 : // Iterate through all the blocks that the record modifies, and
383 : // "put" a separate copy of the record for each block.
384 145852 : for blk in decoded.blocks.iter() {
385 145642 : let rel = RelTag {
386 145642 : spcnode: blk.rnode_spcnode,
387 145642 : dbnode: blk.rnode_dbnode,
388 145642 : relnode: blk.rnode_relnode,
389 145642 : forknum: blk.forknum,
390 145642 : };
391 145642 :
392 145642 : let key = rel_block_to_key(rel, blk.blkno);
393 145642 : let key_is_local = self.shard.is_key_local(&key);
394 :
395 0 : tracing::debug!(
396 0 : lsn=%lsn,
397 0 : key=%key,
398 0 : "ingest: shard decision {} (checkpoint={})",
399 0 : if !key_is_local { "drop" } else { "keep" },
400 0 : self.checkpoint_modified
401 0 : );
402 :
403 145642 : if !key_is_local {
404 0 : if self.shard.is_zero() {
405 : // Shard 0 tracks relation sizes. Although we will not store this block, we will observe
406 : // its blkno in case it implicitly extends a relation.
407 0 : self.observe_decoded_block(modification, blk, ctx).await?;
408 0 : }
409 :
410 0 : continue;
411 145642 : }
412 145642 : self.ingest_decoded_block(modification, lsn, decoded, blk, ctx)
413 95 : .await?;
414 : }
415 :
416 : // If checkpoint data was updated, store the new version in the repository
417 145852 : if self.checkpoint_modified {
418 6 : let new_checkpoint_bytes = self.checkpoint.encode()?;
419 :
420 6 : modification.put_checkpoint(new_checkpoint_bytes)?;
421 6 : self.checkpoint_modified = false;
422 145846 : }
423 :
424 : // Note that at this point this record is only cached in the modification
425 : // until commit() is called to flush the data into the repository and update
426 : // the latest LSN.
427 :
428 145852 : Ok(modification.len() > prev_len)
429 145852 : }
430 :
431 : /// Do not store this block, but observe it for the purposes of updating our relation size state.
432 0 : async fn observe_decoded_block(
433 0 : &mut self,
434 0 : modification: &mut DatadirModification<'_>,
435 0 : blk: &DecodedBkpBlock,
436 0 : ctx: &RequestContext,
437 0 : ) -> Result<(), PageReconstructError> {
438 0 : let rel = RelTag {
439 0 : spcnode: blk.rnode_spcnode,
440 0 : dbnode: blk.rnode_dbnode,
441 0 : relnode: blk.rnode_relnode,
442 0 : forknum: blk.forknum,
443 0 : };
444 0 : self.handle_rel_extend(modification, rel, blk.blkno, ctx)
445 0 : .await
446 0 : }
447 :
448 145642 : async fn ingest_decoded_block(
449 145642 : &mut self,
450 145642 : modification: &mut DatadirModification<'_>,
451 145642 : lsn: Lsn,
452 145642 : decoded: &DecodedWALRecord,
453 145642 : blk: &DecodedBkpBlock,
454 145642 : ctx: &RequestContext,
455 145642 : ) -> Result<(), PageReconstructError> {
456 145642 : let rel = RelTag {
457 145642 : spcnode: blk.rnode_spcnode,
458 145642 : dbnode: blk.rnode_dbnode,
459 145642 : relnode: blk.rnode_relnode,
460 145642 : forknum: blk.forknum,
461 145642 : };
462 145642 :
463 145642 : //
464 145642 : // Instead of storing full-page-image WAL record,
465 145642 : // it is better to store extracted image: we can skip wal-redo
466 145642 : // in this case. Also some FPI records may contain multiple (up to 32) pages,
467 145642 : // so them have to be copied multiple times.
468 145642 : //
469 145642 : if blk.apply_image
470 60 : && blk.has_image
471 60 : && decoded.xl_rmid == pg_constants::RM_XLOG_ID
472 24 : && (decoded.xl_info == pg_constants::XLOG_FPI
473 0 : || decoded.xl_info == pg_constants::XLOG_FPI_FOR_HINT)
474 : // compression of WAL is not yet supported: fall back to storing the original WAL record
475 24 : && !postgres_ffi::bkpimage_is_compressed(blk.bimg_info, modification.tline.pg_version)?
476 : // do not materialize null pages because them most likely be soon replaced with real data
477 24 : && blk.bimg_len != 0
478 : {
479 : // Extract page image from FPI record
480 24 : let img_len = blk.bimg_len as usize;
481 24 : let img_offs = blk.bimg_offset as usize;
482 24 : let mut image = BytesMut::with_capacity(BLCKSZ as usize);
483 24 : image.extend_from_slice(&decoded.record[img_offs..img_offs + img_len]);
484 24 :
485 24 : if blk.hole_length != 0 {
486 0 : let tail = image.split_off(blk.hole_offset as usize);
487 0 : image.resize(image.len() + blk.hole_length as usize, 0u8);
488 0 : image.unsplit(tail);
489 24 : }
490 : //
491 : // Match the logic of XLogReadBufferForRedoExtended:
492 : // The page may be uninitialized. If so, we can't set the LSN because
493 : // that would corrupt the page.
494 : //
495 24 : if !page_is_new(&image) {
496 18 : page_set_lsn(&mut image, lsn)
497 6 : }
498 24 : assert_eq!(image.len(), BLCKSZ as usize);
499 24 : self.put_rel_page_image(modification, rel, blk.blkno, image.freeze(), ctx)
500 0 : .await?;
501 : } else {
502 145618 : let rec = NeonWalRecord::Postgres {
503 145618 : will_init: blk.will_init || blk.apply_image,
504 145618 : rec: decoded.record.clone(),
505 145618 : };
506 145618 : self.put_rel_wal_record(modification, rel, blk.blkno, rec, ctx)
507 95 : .await?;
508 : }
509 145642 : Ok(())
510 145642 : }
511 :
512 145474 : async fn ingest_heapam_record(
513 145474 : &mut self,
514 145474 : buf: &mut Bytes,
515 145474 : modification: &mut DatadirModification<'_>,
516 145474 : decoded: &DecodedWALRecord,
517 145474 : ctx: &RequestContext,
518 145474 : ) -> anyhow::Result<()> {
519 145474 : // Handle VM bit updates that are implicitly part of heap records.
520 145474 :
521 145474 : // First, look at the record to determine which VM bits need
522 145474 : // to be cleared. If either of these variables is set, we
523 145474 : // need to clear the corresponding bits in the visibility map.
524 145474 : let mut new_heap_blkno: Option<u32> = None;
525 145474 : let mut old_heap_blkno: Option<u32> = None;
526 145474 : let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS;
527 145474 :
528 145474 : match modification.tline.pg_version {
529 : 14 => {
530 0 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
531 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
532 0 :
533 0 : if info == pg_constants::XLOG_HEAP_INSERT {
534 0 : let xlrec = v14::XlHeapInsert::decode(buf);
535 0 : assert_eq!(0, buf.remaining());
536 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
537 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
538 0 : }
539 0 : } else if info == pg_constants::XLOG_HEAP_DELETE {
540 0 : let xlrec = v14::XlHeapDelete::decode(buf);
541 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
542 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
543 0 : }
544 0 : } else if info == pg_constants::XLOG_HEAP_UPDATE
545 0 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
546 : {
547 0 : let xlrec = v14::XlHeapUpdate::decode(buf);
548 0 : // the size of tuple data is inferred from the size of the record.
549 0 : // we can't validate the remaining number of bytes without parsing
550 0 : // the tuple data.
551 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
552 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
553 0 : }
554 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
555 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
556 0 : // non-HOT update where the new tuple goes to different page than
557 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
558 0 : // set.
559 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
560 0 : }
561 0 : } else if info == pg_constants::XLOG_HEAP_LOCK {
562 0 : let xlrec = v14::XlHeapLock::decode(buf);
563 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
564 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
565 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
566 0 : }
567 0 : }
568 0 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
569 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
570 0 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
571 0 : let xlrec = v14::XlHeapMultiInsert::decode(buf);
572 :
573 0 : let offset_array_len =
574 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
575 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
576 0 : 0
577 : } else {
578 0 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
579 : };
580 0 : assert_eq!(offset_array_len, buf.remaining());
581 :
582 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
583 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
584 0 : }
585 0 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
586 0 : let xlrec = v14::XlHeapLockUpdated::decode(buf);
587 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
588 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
589 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
590 0 : }
591 0 : }
592 : } else {
593 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
594 : }
595 : }
596 : 15 => {
597 145474 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
598 145286 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
599 145286 :
600 145286 : if info == pg_constants::XLOG_HEAP_INSERT {
601 145276 : let xlrec = v15::XlHeapInsert::decode(buf);
602 145276 : assert_eq!(0, buf.remaining());
603 145276 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
604 4 : new_heap_blkno = Some(decoded.blocks[0].blkno);
605 145272 : }
606 10 : } else if info == pg_constants::XLOG_HEAP_DELETE {
607 0 : let xlrec = v15::XlHeapDelete::decode(buf);
608 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
609 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
610 0 : }
611 10 : } else if info == pg_constants::XLOG_HEAP_UPDATE
612 2 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
613 : {
614 8 : let xlrec = v15::XlHeapUpdate::decode(buf);
615 8 : // the size of tuple data is inferred from the size of the record.
616 8 : // we can't validate the remaining number of bytes without parsing
617 8 : // the tuple data.
618 8 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
619 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
620 8 : }
621 8 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
622 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
623 0 : // non-HOT update where the new tuple goes to different page than
624 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
625 0 : // set.
626 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
627 8 : }
628 2 : } else if info == pg_constants::XLOG_HEAP_LOCK {
629 0 : let xlrec = v15::XlHeapLock::decode(buf);
630 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
631 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
632 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
633 0 : }
634 2 : }
635 188 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
636 188 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
637 188 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
638 42 : let xlrec = v15::XlHeapMultiInsert::decode(buf);
639 :
640 42 : let offset_array_len =
641 42 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
642 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
643 2 : 0
644 : } else {
645 40 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
646 : };
647 42 : assert_eq!(offset_array_len, buf.remaining());
648 :
649 42 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
650 8 : new_heap_blkno = Some(decoded.blocks[0].blkno);
651 34 : }
652 146 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
653 0 : let xlrec = v15::XlHeapLockUpdated::decode(buf);
654 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
655 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
656 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
657 0 : }
658 146 : }
659 : } else {
660 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
661 : }
662 : }
663 : 16 => {
664 0 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
665 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
666 0 :
667 0 : if info == pg_constants::XLOG_HEAP_INSERT {
668 0 : let xlrec = v16::XlHeapInsert::decode(buf);
669 0 : assert_eq!(0, buf.remaining());
670 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
671 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
672 0 : }
673 0 : } else if info == pg_constants::XLOG_HEAP_DELETE {
674 0 : let xlrec = v16::XlHeapDelete::decode(buf);
675 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
676 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
677 0 : }
678 0 : } else if info == pg_constants::XLOG_HEAP_UPDATE
679 0 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
680 : {
681 0 : let xlrec = v16::XlHeapUpdate::decode(buf);
682 0 : // the size of tuple data is inferred from the size of the record.
683 0 : // we can't validate the remaining number of bytes without parsing
684 0 : // the tuple data.
685 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
686 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
687 0 : }
688 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
689 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
690 0 : // non-HOT update where the new tuple goes to different page than
691 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
692 0 : // set.
693 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
694 0 : }
695 0 : } else if info == pg_constants::XLOG_HEAP_LOCK {
696 0 : let xlrec = v16::XlHeapLock::decode(buf);
697 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
698 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
699 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
700 0 : }
701 0 : }
702 0 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
703 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
704 0 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
705 0 : let xlrec = v16::XlHeapMultiInsert::decode(buf);
706 :
707 0 : let offset_array_len =
708 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
709 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
710 0 : 0
711 : } else {
712 0 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
713 : };
714 0 : assert_eq!(offset_array_len, buf.remaining());
715 :
716 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
717 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
718 0 : }
719 0 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
720 0 : let xlrec = v16::XlHeapLockUpdated::decode(buf);
721 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
722 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
723 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
724 0 : }
725 0 : }
726 : } else {
727 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
728 : }
729 : }
730 0 : _ => {}
731 : }
732 :
733 : // Clear the VM bits if required.
734 145474 : if new_heap_blkno.is_some() || old_heap_blkno.is_some() {
735 12 : let vm_rel = RelTag {
736 12 : forknum: VISIBILITYMAP_FORKNUM,
737 12 : spcnode: decoded.blocks[0].rnode_spcnode,
738 12 : dbnode: decoded.blocks[0].rnode_dbnode,
739 12 : relnode: decoded.blocks[0].rnode_relnode,
740 12 : };
741 12 :
742 12 : let mut new_vm_blk = new_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
743 12 : let mut old_vm_blk = old_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
744 :
745 : // Sometimes, Postgres seems to create heap WAL records with the
746 : // ALL_VISIBLE_CLEARED flag set, even though the bit in the VM page is
747 : // not set. In fact, it's possible that the VM page does not exist at all.
748 : // In that case, we don't want to store a record to clear the VM bit;
749 : // replaying it would fail to find the previous image of the page, because
750 : // it doesn't exist. So check if the VM page(s) exist, and skip the WAL
751 : // record if it doesn't.
752 12 : let vm_size = get_relsize(modification, vm_rel, ctx).await?;
753 12 : if let Some(blknum) = new_vm_blk {
754 12 : if blknum >= vm_size {
755 0 : new_vm_blk = None;
756 12 : }
757 0 : }
758 12 : if let Some(blknum) = old_vm_blk {
759 0 : if blknum >= vm_size {
760 0 : old_vm_blk = None;
761 0 : }
762 12 : }
763 :
764 12 : if new_vm_blk.is_some() || old_vm_blk.is_some() {
765 12 : if new_vm_blk == old_vm_blk {
766 : // An UPDATE record that needs to clear the bits for both old and the
767 : // new page, both of which reside on the same VM page.
768 0 : self.put_rel_wal_record(
769 0 : modification,
770 0 : vm_rel,
771 0 : new_vm_blk.unwrap(),
772 0 : NeonWalRecord::ClearVisibilityMapFlags {
773 0 : new_heap_blkno,
774 0 : old_heap_blkno,
775 0 : flags,
776 0 : },
777 0 : ctx,
778 0 : )
779 0 : .await?;
780 : } else {
781 : // Clear VM bits for one heap page, or for two pages that reside on
782 : // different VM pages.
783 12 : if let Some(new_vm_blk) = new_vm_blk {
784 12 : self.put_rel_wal_record(
785 12 : modification,
786 12 : vm_rel,
787 12 : new_vm_blk,
788 12 : NeonWalRecord::ClearVisibilityMapFlags {
789 12 : new_heap_blkno,
790 12 : old_heap_blkno: None,
791 12 : flags,
792 12 : },
793 12 : ctx,
794 12 : )
795 0 : .await?;
796 0 : }
797 12 : if let Some(old_vm_blk) = old_vm_blk {
798 0 : self.put_rel_wal_record(
799 0 : modification,
800 0 : vm_rel,
801 0 : old_vm_blk,
802 0 : NeonWalRecord::ClearVisibilityMapFlags {
803 0 : new_heap_blkno: None,
804 0 : old_heap_blkno,
805 0 : flags,
806 0 : },
807 0 : ctx,
808 0 : )
809 0 : .await?;
810 12 : }
811 : }
812 0 : }
813 145462 : }
814 :
815 145474 : Ok(())
816 145474 : }
817 :
818 0 : async fn ingest_neonrmgr_record(
819 0 : &mut self,
820 0 : buf: &mut Bytes,
821 0 : modification: &mut DatadirModification<'_>,
822 0 : decoded: &DecodedWALRecord,
823 0 : ctx: &RequestContext,
824 0 : ) -> anyhow::Result<()> {
825 0 : // Handle VM bit updates that are implicitly part of heap records.
826 0 :
827 0 : // First, look at the record to determine which VM bits need
828 0 : // to be cleared. If either of these variables is set, we
829 0 : // need to clear the corresponding bits in the visibility map.
830 0 : let mut new_heap_blkno: Option<u32> = None;
831 0 : let mut old_heap_blkno: Option<u32> = None;
832 0 : let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS;
833 0 : let pg_version = modification.tline.pg_version;
834 0 :
835 0 : assert_eq!(decoded.xl_rmid, pg_constants::RM_NEON_ID);
836 :
837 0 : match pg_version {
838 : 16 => {
839 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
840 0 :
841 0 : match info {
842 : pg_constants::XLOG_NEON_HEAP_INSERT => {
843 0 : let xlrec = v16::rm_neon::XlNeonHeapInsert::decode(buf);
844 0 : assert_eq!(0, buf.remaining());
845 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
846 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
847 0 : }
848 : }
849 : pg_constants::XLOG_NEON_HEAP_DELETE => {
850 0 : let xlrec = v16::rm_neon::XlNeonHeapDelete::decode(buf);
851 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
852 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
853 0 : }
854 : }
855 : pg_constants::XLOG_NEON_HEAP_UPDATE
856 : | pg_constants::XLOG_NEON_HEAP_HOT_UPDATE => {
857 0 : let xlrec = v16::rm_neon::XlNeonHeapUpdate::decode(buf);
858 0 : // the size of tuple data is inferred from the size of the record.
859 0 : // we can't validate the remaining number of bytes without parsing
860 0 : // the tuple data.
861 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
862 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
863 0 : }
864 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
865 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
866 0 : // non-HOT update where the new tuple goes to different page than
867 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
868 0 : // set.
869 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
870 0 : }
871 : }
872 : pg_constants::XLOG_NEON_HEAP_MULTI_INSERT => {
873 0 : let xlrec = v16::rm_neon::XlNeonHeapMultiInsert::decode(buf);
874 :
875 0 : let offset_array_len =
876 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
877 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
878 0 : 0
879 : } else {
880 0 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
881 : };
882 0 : assert_eq!(offset_array_len, buf.remaining());
883 :
884 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
885 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
886 0 : }
887 : }
888 : pg_constants::XLOG_NEON_HEAP_LOCK => {
889 0 : let xlrec = v16::rm_neon::XlNeonHeapLock::decode(buf);
890 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
891 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
892 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
893 0 : }
894 : }
895 0 : info => bail!("Unknown WAL record type for Neon RMGR: {}", info),
896 : }
897 : }
898 0 : _ => bail!(
899 0 : "Neon RMGR has no known compatibility with PostgreSQL version {}",
900 0 : pg_version
901 0 : ),
902 : }
903 :
904 : // Clear the VM bits if required.
905 0 : if new_heap_blkno.is_some() || old_heap_blkno.is_some() {
906 0 : let vm_rel = RelTag {
907 0 : forknum: VISIBILITYMAP_FORKNUM,
908 0 : spcnode: decoded.blocks[0].rnode_spcnode,
909 0 : dbnode: decoded.blocks[0].rnode_dbnode,
910 0 : relnode: decoded.blocks[0].rnode_relnode,
911 0 : };
912 0 :
913 0 : let mut new_vm_blk = new_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
914 0 : let mut old_vm_blk = old_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
915 :
916 : // Sometimes, Postgres seems to create heap WAL records with the
917 : // ALL_VISIBLE_CLEARED flag set, even though the bit in the VM page is
918 : // not set. In fact, it's possible that the VM page does not exist at all.
919 : // In that case, we don't want to store a record to clear the VM bit;
920 : // replaying it would fail to find the previous image of the page, because
921 : // it doesn't exist. So check if the VM page(s) exist, and skip the WAL
922 : // record if it doesn't.
923 0 : let vm_size = get_relsize(modification, vm_rel, ctx).await?;
924 0 : if let Some(blknum) = new_vm_blk {
925 0 : if blknum >= vm_size {
926 0 : new_vm_blk = None;
927 0 : }
928 0 : }
929 0 : if let Some(blknum) = old_vm_blk {
930 0 : if blknum >= vm_size {
931 0 : old_vm_blk = None;
932 0 : }
933 0 : }
934 :
935 0 : if new_vm_blk.is_some() || old_vm_blk.is_some() {
936 0 : if new_vm_blk == old_vm_blk {
937 : // An UPDATE record that needs to clear the bits for both old and the
938 : // new page, both of which reside on the same VM page.
939 0 : self.put_rel_wal_record(
940 0 : modification,
941 0 : vm_rel,
942 0 : new_vm_blk.unwrap(),
943 0 : NeonWalRecord::ClearVisibilityMapFlags {
944 0 : new_heap_blkno,
945 0 : old_heap_blkno,
946 0 : flags,
947 0 : },
948 0 : ctx,
949 0 : )
950 0 : .await?;
951 : } else {
952 : // Clear VM bits for one heap page, or for two pages that reside on
953 : // different VM pages.
954 0 : if let Some(new_vm_blk) = new_vm_blk {
955 0 : self.put_rel_wal_record(
956 0 : modification,
957 0 : vm_rel,
958 0 : new_vm_blk,
959 0 : NeonWalRecord::ClearVisibilityMapFlags {
960 0 : new_heap_blkno,
961 0 : old_heap_blkno: None,
962 0 : flags,
963 0 : },
964 0 : ctx,
965 0 : )
966 0 : .await?;
967 0 : }
968 0 : if let Some(old_vm_blk) = old_vm_blk {
969 0 : self.put_rel_wal_record(
970 0 : modification,
971 0 : vm_rel,
972 0 : old_vm_blk,
973 0 : NeonWalRecord::ClearVisibilityMapFlags {
974 0 : new_heap_blkno: None,
975 0 : old_heap_blkno,
976 0 : flags,
977 0 : },
978 0 : ctx,
979 0 : )
980 0 : .await?;
981 0 : }
982 : }
983 0 : }
984 0 : }
985 :
986 0 : Ok(())
987 0 : }
988 :
989 : /// Subroutine of ingest_record(), to handle an XLOG_DBASE_CREATE record.
990 0 : async fn ingest_xlog_dbase_create(
991 0 : &mut self,
992 0 : modification: &mut DatadirModification<'_>,
993 0 : rec: &XlCreateDatabase,
994 0 : ctx: &RequestContext,
995 0 : ) -> anyhow::Result<()> {
996 0 : let db_id = rec.db_id;
997 0 : let tablespace_id = rec.tablespace_id;
998 0 : let src_db_id = rec.src_db_id;
999 0 : let src_tablespace_id = rec.src_tablespace_id;
1000 :
1001 0 : let rels = modification
1002 0 : .tline
1003 0 : .list_rels(
1004 0 : src_tablespace_id,
1005 0 : src_db_id,
1006 0 : Version::Modified(modification),
1007 0 : ctx,
1008 0 : )
1009 0 : .await?;
1010 :
1011 0 : debug!("ingest_xlog_dbase_create: {} rels", rels.len());
1012 :
1013 : // Copy relfilemap
1014 0 : let filemap = modification
1015 0 : .tline
1016 0 : .get_relmap_file(
1017 0 : src_tablespace_id,
1018 0 : src_db_id,
1019 0 : Version::Modified(modification),
1020 0 : ctx,
1021 0 : )
1022 0 : .await?;
1023 0 : modification
1024 0 : .put_relmap_file(tablespace_id, db_id, filemap, ctx)
1025 0 : .await?;
1026 :
1027 0 : let mut num_rels_copied = 0;
1028 0 : let mut num_blocks_copied = 0;
1029 0 : for src_rel in rels {
1030 0 : assert_eq!(src_rel.spcnode, src_tablespace_id);
1031 0 : assert_eq!(src_rel.dbnode, src_db_id);
1032 :
1033 0 : let nblocks = modification
1034 0 : .tline
1035 0 : .get_rel_size(src_rel, Version::Modified(modification), true, ctx)
1036 0 : .await?;
1037 0 : let dst_rel = RelTag {
1038 0 : spcnode: tablespace_id,
1039 0 : dbnode: db_id,
1040 0 : relnode: src_rel.relnode,
1041 0 : forknum: src_rel.forknum,
1042 0 : };
1043 0 :
1044 0 : modification.put_rel_creation(dst_rel, nblocks, ctx).await?;
1045 :
1046 : // Copy content
1047 0 : debug!("copying rel {} to {}, {} blocks", src_rel, dst_rel, nblocks);
1048 0 : for blknum in 0..nblocks {
1049 : // Sharding:
1050 : // - src and dst are always on the same shard, because they differ only by dbNode, and
1051 : // dbNode is not included in the hash inputs for sharding.
1052 : // - This WAL command is replayed on all shards, but each shard only copies the blocks
1053 : // that belong to it.
1054 0 : let src_key = rel_block_to_key(src_rel, blknum);
1055 0 : if !self.shard.is_key_local(&src_key) {
1056 0 : debug!(
1057 0 : "Skipping non-local key {} during XLOG_DBASE_CREATE",
1058 0 : src_key
1059 0 : );
1060 0 : continue;
1061 0 : }
1062 0 : debug!(
1063 0 : "copying block {} from {} ({}) to {}",
1064 0 : blknum, src_rel, src_key, dst_rel
1065 0 : );
1066 :
1067 0 : let content = modification
1068 0 : .tline
1069 0 : .get_rel_page_at_lsn(
1070 0 : src_rel,
1071 0 : blknum,
1072 0 : Version::Modified(modification),
1073 0 : true,
1074 0 : ctx,
1075 0 : )
1076 0 : .await?;
1077 0 : modification.put_rel_page_image(dst_rel, blknum, content)?;
1078 0 : num_blocks_copied += 1;
1079 : }
1080 :
1081 0 : num_rels_copied += 1;
1082 : }
1083 :
1084 0 : info!(
1085 0 : "Created database {}/{}, copied {} blocks in {} rels",
1086 0 : tablespace_id, db_id, num_blocks_copied, num_rels_copied
1087 0 : );
1088 0 : Ok(())
1089 0 : }
1090 :
1091 16 : async fn ingest_xlog_smgr_create(
1092 16 : &mut self,
1093 16 : modification: &mut DatadirModification<'_>,
1094 16 : rec: &XlSmgrCreate,
1095 16 : ctx: &RequestContext,
1096 16 : ) -> anyhow::Result<()> {
1097 16 : let rel = RelTag {
1098 16 : spcnode: rec.rnode.spcnode,
1099 16 : dbnode: rec.rnode.dbnode,
1100 16 : relnode: rec.rnode.relnode,
1101 16 : forknum: rec.forknum,
1102 16 : };
1103 16 : self.put_rel_creation(modification, rel, ctx).await?;
1104 16 : Ok(())
1105 16 : }
1106 :
1107 : /// Subroutine of ingest_record(), to handle an XLOG_SMGR_TRUNCATE record.
1108 : ///
1109 : /// This is the same logic as in PostgreSQL's smgr_redo() function.
1110 0 : async fn ingest_xlog_smgr_truncate(
1111 0 : &mut self,
1112 0 : modification: &mut DatadirModification<'_>,
1113 0 : rec: &XlSmgrTruncate,
1114 0 : ctx: &RequestContext,
1115 0 : ) -> anyhow::Result<()> {
1116 0 : let spcnode = rec.rnode.spcnode;
1117 0 : let dbnode = rec.rnode.dbnode;
1118 0 : let relnode = rec.rnode.relnode;
1119 0 :
1120 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_HEAP) != 0 {
1121 0 : let rel = RelTag {
1122 0 : spcnode,
1123 0 : dbnode,
1124 0 : relnode,
1125 0 : forknum: MAIN_FORKNUM,
1126 0 : };
1127 0 : self.put_rel_truncation(modification, rel, rec.blkno, ctx)
1128 0 : .await?;
1129 0 : }
1130 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_FSM) != 0 {
1131 0 : let rel = RelTag {
1132 0 : spcnode,
1133 0 : dbnode,
1134 0 : relnode,
1135 0 : forknum: FSM_FORKNUM,
1136 0 : };
1137 0 :
1138 0 : let fsm_logical_page_no = rec.blkno / pg_constants::SLOTS_PER_FSM_PAGE;
1139 0 : let mut fsm_physical_page_no = fsm_logical_to_physical(fsm_logical_page_no);
1140 0 : if rec.blkno % pg_constants::SLOTS_PER_FSM_PAGE != 0 {
1141 : // Tail of last remaining FSM page has to be zeroed.
1142 : // We are not precise here and instead of digging in FSM bitmap format just clear the whole page.
1143 0 : modification.put_rel_page_image(rel, fsm_physical_page_no, ZERO_PAGE.clone())?;
1144 0 : fsm_physical_page_no += 1;
1145 0 : }
1146 0 : let nblocks = get_relsize(modification, rel, ctx).await?;
1147 0 : if nblocks > fsm_physical_page_no {
1148 : // check if something to do: FSM is larger than truncate position
1149 0 : self.put_rel_truncation(modification, rel, fsm_physical_page_no, ctx)
1150 0 : .await?;
1151 0 : }
1152 0 : }
1153 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_VM) != 0 {
1154 0 : let rel = RelTag {
1155 0 : spcnode,
1156 0 : dbnode,
1157 0 : relnode,
1158 0 : forknum: VISIBILITYMAP_FORKNUM,
1159 0 : };
1160 0 :
1161 0 : let mut vm_page_no = rec.blkno / pg_constants::VM_HEAPBLOCKS_PER_PAGE;
1162 0 : if rec.blkno % pg_constants::VM_HEAPBLOCKS_PER_PAGE != 0 {
1163 : // Tail of last remaining vm page has to be zeroed.
1164 : // We are not precise here and instead of digging in VM bitmap format just clear the whole page.
1165 0 : modification.put_rel_page_image(rel, vm_page_no, ZERO_PAGE.clone())?;
1166 0 : vm_page_no += 1;
1167 0 : }
1168 0 : let nblocks = get_relsize(modification, rel, ctx).await?;
1169 0 : if nblocks > vm_page_no {
1170 : // check if something to do: VM is larger than truncate position
1171 0 : self.put_rel_truncation(modification, rel, vm_page_no, ctx)
1172 0 : .await?;
1173 0 : }
1174 0 : }
1175 0 : Ok(())
1176 0 : }
1177 :
1178 : /// Subroutine of ingest_record(), to handle an XLOG_XACT_* records.
1179 : ///
1180 8 : async fn ingest_xact_record(
1181 8 : &mut self,
1182 8 : modification: &mut DatadirModification<'_>,
1183 8 : parsed: &XlXactParsedRecord,
1184 8 : is_commit: bool,
1185 8 : ctx: &RequestContext,
1186 8 : ) -> anyhow::Result<()> {
1187 8 : // Record update of CLOG pages
1188 8 : let mut pageno = parsed.xid / pg_constants::CLOG_XACTS_PER_PAGE;
1189 8 : let mut segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1190 8 : let mut rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1191 8 : let mut page_xids: Vec<TransactionId> = vec![parsed.xid];
1192 :
1193 8 : for subxact in &parsed.subxacts {
1194 0 : let subxact_pageno = subxact / pg_constants::CLOG_XACTS_PER_PAGE;
1195 0 : if subxact_pageno != pageno {
1196 : // This subxact goes to different page. Write the record
1197 : // for all the XIDs on the previous page, and continue
1198 : // accumulating XIDs on this new page.
1199 0 : modification.put_slru_wal_record(
1200 0 : SlruKind::Clog,
1201 0 : segno,
1202 0 : rpageno,
1203 0 : if is_commit {
1204 0 : NeonWalRecord::ClogSetCommitted {
1205 0 : xids: page_xids,
1206 0 : timestamp: parsed.xact_time,
1207 0 : }
1208 : } else {
1209 0 : NeonWalRecord::ClogSetAborted { xids: page_xids }
1210 : },
1211 0 : )?;
1212 0 : page_xids = Vec::new();
1213 0 : }
1214 0 : pageno = subxact_pageno;
1215 0 : segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1216 0 : rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1217 0 : page_xids.push(*subxact);
1218 : }
1219 8 : modification.put_slru_wal_record(
1220 8 : SlruKind::Clog,
1221 8 : segno,
1222 8 : rpageno,
1223 8 : if is_commit {
1224 8 : NeonWalRecord::ClogSetCommitted {
1225 8 : xids: page_xids,
1226 8 : timestamp: parsed.xact_time,
1227 8 : }
1228 : } else {
1229 0 : NeonWalRecord::ClogSetAborted { xids: page_xids }
1230 : },
1231 0 : )?;
1232 :
1233 8 : for xnode in &parsed.xnodes {
1234 0 : for forknum in MAIN_FORKNUM..=INIT_FORKNUM {
1235 0 : let rel = RelTag {
1236 0 : forknum,
1237 0 : spcnode: xnode.spcnode,
1238 0 : dbnode: xnode.dbnode,
1239 0 : relnode: xnode.relnode,
1240 0 : };
1241 0 : if modification
1242 0 : .tline
1243 0 : .get_rel_exists(rel, Version::Modified(modification), true, ctx)
1244 0 : .await?
1245 : {
1246 0 : self.put_rel_drop(modification, rel, ctx).await?;
1247 0 : }
1248 : }
1249 : }
1250 8 : Ok(())
1251 8 : }
1252 :
1253 0 : async fn ingest_clog_truncate_record(
1254 0 : &mut self,
1255 0 : modification: &mut DatadirModification<'_>,
1256 0 : xlrec: &XlClogTruncate,
1257 0 : ctx: &RequestContext,
1258 0 : ) -> anyhow::Result<()> {
1259 0 : info!(
1260 0 : "RM_CLOG_ID truncate pageno {} oldestXid {} oldestXidDB {}",
1261 0 : xlrec.pageno, xlrec.oldest_xid, xlrec.oldest_xid_db
1262 0 : );
1263 :
1264 : // Here we treat oldestXid and oldestXidDB
1265 : // differently from postgres redo routines.
1266 : // In postgres checkpoint.oldestXid lags behind xlrec.oldest_xid
1267 : // until checkpoint happens and updates the value.
1268 : // Here we can use the most recent value.
1269 : // It's just an optimization, though and can be deleted.
1270 : // TODO Figure out if there will be any issues with replica.
1271 0 : self.checkpoint.oldestXid = xlrec.oldest_xid;
1272 0 : self.checkpoint.oldestXidDB = xlrec.oldest_xid_db;
1273 0 : self.checkpoint_modified = true;
1274 0 :
1275 0 : // TODO Treat AdvanceOldestClogXid() or write a comment why we don't need it
1276 0 :
1277 0 : let latest_page_number =
1278 0 : self.checkpoint.nextXid.value as u32 / pg_constants::CLOG_XACTS_PER_PAGE;
1279 0 :
1280 0 : // Now delete all segments containing pages between xlrec.pageno
1281 0 : // and latest_page_number.
1282 0 :
1283 0 : // First, make an important safety check:
1284 0 : // the current endpoint page must not be eligible for removal.
1285 0 : // See SimpleLruTruncate() in slru.c
1286 0 : if clogpage_precedes(latest_page_number, xlrec.pageno) {
1287 0 : info!("could not truncate directory pg_xact apparent wraparound");
1288 0 : return Ok(());
1289 0 : }
1290 :
1291 : // Iterate via SLRU CLOG segments and drop segments that we're ready to truncate
1292 : //
1293 : // We cannot pass 'lsn' to the Timeline.list_nonrels(), or it
1294 : // will block waiting for the last valid LSN to advance up to
1295 : // it. So we use the previous record's LSN in the get calls
1296 : // instead.
1297 0 : for segno in modification
1298 0 : .tline
1299 0 : .list_slru_segments(SlruKind::Clog, Version::Modified(modification), ctx)
1300 0 : .await?
1301 : {
1302 0 : let segpage = segno * pg_constants::SLRU_PAGES_PER_SEGMENT;
1303 0 : if slru_may_delete_clogsegment(segpage, xlrec.pageno) {
1304 0 : modification
1305 0 : .drop_slru_segment(SlruKind::Clog, segno, ctx)
1306 0 : .await?;
1307 0 : trace!("Drop CLOG segment {:>04X}", segno);
1308 0 : }
1309 : }
1310 :
1311 0 : Ok(())
1312 0 : }
1313 :
1314 0 : fn ingest_multixact_create_record(
1315 0 : &mut self,
1316 0 : modification: &mut DatadirModification,
1317 0 : xlrec: &XlMultiXactCreate,
1318 0 : ) -> Result<()> {
1319 0 : // Create WAL record for updating the multixact-offsets page
1320 0 : let pageno = xlrec.mid / pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32;
1321 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1322 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1323 0 :
1324 0 : modification.put_slru_wal_record(
1325 0 : SlruKind::MultiXactOffsets,
1326 0 : segno,
1327 0 : rpageno,
1328 0 : NeonWalRecord::MultixactOffsetCreate {
1329 0 : mid: xlrec.mid,
1330 0 : moff: xlrec.moff,
1331 0 : },
1332 0 : )?;
1333 :
1334 : // Create WAL records for the update of each affected multixact-members page
1335 0 : let mut members = xlrec.members.iter();
1336 0 : let mut offset = xlrec.moff;
1337 : loop {
1338 0 : let pageno = offset / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
1339 0 :
1340 0 : // How many members fit on this page?
1341 0 : let page_remain = pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32
1342 0 : - offset % pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
1343 0 :
1344 0 : let mut this_page_members: Vec<MultiXactMember> = Vec::new();
1345 0 : for _ in 0..page_remain {
1346 0 : if let Some(m) = members.next() {
1347 0 : this_page_members.push(m.clone());
1348 0 : } else {
1349 0 : break;
1350 : }
1351 : }
1352 0 : if this_page_members.is_empty() {
1353 : // all done
1354 0 : break;
1355 0 : }
1356 0 : let n_this_page = this_page_members.len();
1357 0 :
1358 0 : modification.put_slru_wal_record(
1359 0 : SlruKind::MultiXactMembers,
1360 0 : pageno / pg_constants::SLRU_PAGES_PER_SEGMENT,
1361 0 : pageno % pg_constants::SLRU_PAGES_PER_SEGMENT,
1362 0 : NeonWalRecord::MultixactMembersCreate {
1363 0 : moff: offset,
1364 0 : members: this_page_members,
1365 0 : },
1366 0 : )?;
1367 :
1368 : // Note: The multixact members can wrap around, even within one WAL record.
1369 0 : offset = offset.wrapping_add(n_this_page as u32);
1370 : }
1371 0 : if xlrec.mid >= self.checkpoint.nextMulti {
1372 0 : self.checkpoint.nextMulti = xlrec.mid + 1;
1373 0 : self.checkpoint_modified = true;
1374 0 : }
1375 0 : if xlrec.moff + xlrec.nmembers > self.checkpoint.nextMultiOffset {
1376 0 : self.checkpoint.nextMultiOffset = xlrec.moff + xlrec.nmembers;
1377 0 : self.checkpoint_modified = true;
1378 0 : }
1379 0 : let max_mbr_xid = xlrec.members.iter().fold(None, |acc, mbr| {
1380 0 : if let Some(max_xid) = acc {
1381 0 : if mbr.xid.wrapping_sub(max_xid) as i32 > 0 {
1382 0 : Some(mbr.xid)
1383 : } else {
1384 0 : acc
1385 : }
1386 : } else {
1387 0 : Some(mbr.xid)
1388 : }
1389 0 : });
1390 :
1391 0 : if let Some(max_xid) = max_mbr_xid {
1392 0 : if self.checkpoint.update_next_xid(max_xid) {
1393 0 : self.checkpoint_modified = true;
1394 0 : }
1395 0 : }
1396 0 : Ok(())
1397 0 : }
1398 :
1399 0 : async fn ingest_multixact_truncate_record(
1400 0 : &mut self,
1401 0 : modification: &mut DatadirModification<'_>,
1402 0 : xlrec: &XlMultiXactTruncate,
1403 0 : ctx: &RequestContext,
1404 0 : ) -> Result<()> {
1405 0 : self.checkpoint.oldestMulti = xlrec.end_trunc_off;
1406 0 : self.checkpoint.oldestMultiDB = xlrec.oldest_multi_db;
1407 0 : self.checkpoint_modified = true;
1408 0 :
1409 0 : // PerformMembersTruncation
1410 0 : let maxsegment: i32 = mx_offset_to_member_segment(pg_constants::MAX_MULTIXACT_OFFSET);
1411 0 : let startsegment: i32 = mx_offset_to_member_segment(xlrec.start_trunc_memb);
1412 0 : let endsegment: i32 = mx_offset_to_member_segment(xlrec.end_trunc_memb);
1413 0 : let mut segment: i32 = startsegment;
1414 :
1415 : // Delete all the segments except the last one. The last segment can still
1416 : // contain, possibly partially, valid data.
1417 0 : while segment != endsegment {
1418 0 : modification
1419 0 : .drop_slru_segment(SlruKind::MultiXactMembers, segment as u32, ctx)
1420 0 : .await?;
1421 :
1422 : /* move to next segment, handling wraparound correctly */
1423 0 : if segment == maxsegment {
1424 0 : segment = 0;
1425 0 : } else {
1426 0 : segment += 1;
1427 0 : }
1428 : }
1429 :
1430 : // Truncate offsets
1431 : // FIXME: this did not handle wraparound correctly
1432 :
1433 0 : Ok(())
1434 0 : }
1435 :
1436 0 : async fn ingest_relmap_page(
1437 0 : &mut self,
1438 0 : modification: &mut DatadirModification<'_>,
1439 0 : xlrec: &XlRelmapUpdate,
1440 0 : decoded: &DecodedWALRecord,
1441 0 : ctx: &RequestContext,
1442 0 : ) -> Result<()> {
1443 0 : let mut buf = decoded.record.clone();
1444 0 : buf.advance(decoded.main_data_offset);
1445 0 : // skip xl_relmap_update
1446 0 : buf.advance(12);
1447 0 :
1448 0 : modification
1449 0 : .put_relmap_file(
1450 0 : xlrec.tsid,
1451 0 : xlrec.dbid,
1452 0 : Bytes::copy_from_slice(&buf[..]),
1453 0 : ctx,
1454 0 : )
1455 0 : .await
1456 0 : }
1457 :
1458 18 : async fn put_rel_creation(
1459 18 : &mut self,
1460 18 : modification: &mut DatadirModification<'_>,
1461 18 : rel: RelTag,
1462 18 : ctx: &RequestContext,
1463 18 : ) -> Result<()> {
1464 18 : modification.put_rel_creation(rel, 0, ctx).await?;
1465 18 : Ok(())
1466 18 : }
1467 :
1468 272426 : async fn put_rel_page_image(
1469 272426 : &mut self,
1470 272426 : modification: &mut DatadirModification<'_>,
1471 272426 : rel: RelTag,
1472 272426 : blknum: BlockNumber,
1473 272426 : img: Bytes,
1474 272426 : ctx: &RequestContext,
1475 272426 : ) -> Result<(), PageReconstructError> {
1476 272426 : self.handle_rel_extend(modification, rel, blknum, ctx)
1477 6174 : .await?;
1478 272426 : modification.put_rel_page_image(rel, blknum, img)?;
1479 272426 : Ok(())
1480 272426 : }
1481 :
1482 145630 : async fn put_rel_wal_record(
1483 145630 : &mut self,
1484 145630 : modification: &mut DatadirModification<'_>,
1485 145630 : rel: RelTag,
1486 145630 : blknum: BlockNumber,
1487 145630 : rec: NeonWalRecord,
1488 145630 : ctx: &RequestContext,
1489 145630 : ) -> Result<()> {
1490 145630 : self.handle_rel_extend(modification, rel, blknum, ctx)
1491 95 : .await?;
1492 145630 : modification.put_rel_wal_record(rel, blknum, rec)?;
1493 145630 : Ok(())
1494 145630 : }
1495 :
1496 6012 : async fn put_rel_truncation(
1497 6012 : &mut self,
1498 6012 : modification: &mut DatadirModification<'_>,
1499 6012 : rel: RelTag,
1500 6012 : nblocks: BlockNumber,
1501 6012 : ctx: &RequestContext,
1502 6012 : ) -> anyhow::Result<()> {
1503 6012 : modification.put_rel_truncation(rel, nblocks, ctx).await?;
1504 6012 : Ok(())
1505 6012 : }
1506 :
1507 2 : async fn put_rel_drop(
1508 2 : &mut self,
1509 2 : modification: &mut DatadirModification<'_>,
1510 2 : rel: RelTag,
1511 2 : ctx: &RequestContext,
1512 2 : ) -> Result<()> {
1513 2 : modification.put_rel_drop(rel, ctx).await?;
1514 2 : Ok(())
1515 2 : }
1516 :
1517 418056 : async fn handle_rel_extend(
1518 418056 : &mut self,
1519 418056 : modification: &mut DatadirModification<'_>,
1520 418056 : rel: RelTag,
1521 418056 : blknum: BlockNumber,
1522 418056 : ctx: &RequestContext,
1523 418056 : ) -> Result<(), PageReconstructError> {
1524 418056 : let new_nblocks = blknum + 1;
1525 : // Check if the relation exists. We implicitly create relations on first
1526 : // record.
1527 : // TODO: would be nice if to be more explicit about it
1528 :
1529 : // Get current size and put rel creation if rel doesn't exist
1530 : //
1531 : // NOTE: we check the cache first even though get_rel_exists and get_rel_size would
1532 : // check the cache too. This is because eagerly checking the cache results in
1533 : // less work overall and 10% better performance. It's more work on cache miss
1534 : // but cache miss is rare.
1535 418056 : let old_nblocks = if let Some(nblocks) = modification
1536 418056 : .tline
1537 418056 : .get_cached_rel_size(&rel, modification.get_lsn())
1538 : {
1539 418046 : nblocks
1540 10 : } else if !modification
1541 10 : .tline
1542 10 : .get_rel_exists(rel, Version::Modified(modification), true, ctx)
1543 0 : .await?
1544 : {
1545 : // create it with 0 size initially, the logic below will extend it
1546 10 : modification
1547 10 : .put_rel_creation(rel, 0, ctx)
1548 0 : .await
1549 10 : .context("Relation Error")?;
1550 10 : 0
1551 : } else {
1552 0 : modification
1553 0 : .tline
1554 0 : .get_rel_size(rel, Version::Modified(modification), true, ctx)
1555 0 : .await?
1556 : };
1557 :
1558 418056 : if new_nblocks > old_nblocks {
1559 : //info!("extending {} {} to {}", rel, old_nblocks, new_nblocks);
1560 274788 : modification.put_rel_extend(rel, new_nblocks, ctx).await?;
1561 :
1562 274788 : let mut key = rel_block_to_key(rel, blknum);
1563 : // fill the gap with zeros
1564 274788 : for gap_blknum in old_nblocks..blknum {
1565 2998 : key.field6 = gap_blknum;
1566 2998 :
1567 2998 : if self.shard.get_shard_number(&key) != self.shard.number {
1568 0 : continue;
1569 2998 : }
1570 2998 :
1571 2998 : modification.put_rel_page_image(rel, gap_blknum, ZERO_PAGE.clone())?;
1572 : }
1573 143268 : }
1574 418056 : Ok(())
1575 418056 : }
1576 :
1577 0 : async fn put_slru_page_image(
1578 0 : &mut self,
1579 0 : modification: &mut DatadirModification<'_>,
1580 0 : kind: SlruKind,
1581 0 : segno: u32,
1582 0 : blknum: BlockNumber,
1583 0 : img: Bytes,
1584 0 : ctx: &RequestContext,
1585 0 : ) -> Result<()> {
1586 0 : self.handle_slru_extend(modification, kind, segno, blknum, ctx)
1587 0 : .await?;
1588 0 : modification.put_slru_page_image(kind, segno, blknum, img)?;
1589 0 : Ok(())
1590 0 : }
1591 :
1592 0 : async fn handle_slru_extend(
1593 0 : &mut self,
1594 0 : modification: &mut DatadirModification<'_>,
1595 0 : kind: SlruKind,
1596 0 : segno: u32,
1597 0 : blknum: BlockNumber,
1598 0 : ctx: &RequestContext,
1599 0 : ) -> anyhow::Result<()> {
1600 0 : // we don't use a cache for this like we do for relations. SLRUS are explcitly
1601 0 : // extended with ZEROPAGE records, not with commit records, so it happens
1602 0 : // a lot less frequently.
1603 0 :
1604 0 : let new_nblocks = blknum + 1;
1605 : // Check if the relation exists. We implicitly create relations on first
1606 : // record.
1607 : // TODO: would be nice if to be more explicit about it
1608 0 : let old_nblocks = if !modification
1609 0 : .tline
1610 0 : .get_slru_segment_exists(kind, segno, Version::Modified(modification), ctx)
1611 0 : .await?
1612 : {
1613 : // create it with 0 size initially, the logic below will extend it
1614 0 : modification
1615 0 : .put_slru_segment_creation(kind, segno, 0, ctx)
1616 0 : .await?;
1617 0 : 0
1618 : } else {
1619 0 : modification
1620 0 : .tline
1621 0 : .get_slru_segment_size(kind, segno, Version::Modified(modification), ctx)
1622 0 : .await?
1623 : };
1624 :
1625 0 : if new_nblocks > old_nblocks {
1626 0 : trace!(
1627 0 : "extending SLRU {:?} seg {} from {} to {} blocks",
1628 0 : kind,
1629 0 : segno,
1630 0 : old_nblocks,
1631 0 : new_nblocks
1632 0 : );
1633 0 : modification.put_slru_extend(kind, segno, new_nblocks)?;
1634 :
1635 : // fill the gap with zeros
1636 0 : for gap_blknum in old_nblocks..blknum {
1637 0 : modification.put_slru_page_image(kind, segno, gap_blknum, ZERO_PAGE.clone())?;
1638 : }
1639 0 : }
1640 0 : Ok(())
1641 0 : }
1642 : }
1643 :
1644 12 : async fn get_relsize(
1645 12 : modification: &DatadirModification<'_>,
1646 12 : rel: RelTag,
1647 12 : ctx: &RequestContext,
1648 12 : ) -> anyhow::Result<BlockNumber> {
1649 12 : let nblocks = if !modification
1650 12 : .tline
1651 12 : .get_rel_exists(rel, Version::Modified(modification), true, ctx)
1652 0 : .await?
1653 : {
1654 0 : 0
1655 : } else {
1656 12 : modification
1657 12 : .tline
1658 12 : .get_rel_size(rel, Version::Modified(modification), true, ctx)
1659 0 : .await?
1660 : };
1661 12 : Ok(nblocks)
1662 12 : }
1663 :
1664 : #[allow(clippy::bool_assert_comparison)]
1665 : #[cfg(test)]
1666 : mod tests {
1667 : use super::*;
1668 : use crate::tenant::harness::*;
1669 : use crate::tenant::remote_timeline_client::{remote_initdb_archive_path, INITDB_PATH};
1670 : use crate::tenant::Timeline;
1671 : use postgres_ffi::v14::xlog_utils::SIZEOF_CHECKPOINT;
1672 : use postgres_ffi::RELSEG_SIZE;
1673 :
1674 : use crate::DEFAULT_PG_VERSION;
1675 :
1676 : /// Arbitrary relation tag, for testing.
1677 : const TESTREL_A: RelTag = RelTag {
1678 : spcnode: 0,
1679 : dbnode: 111,
1680 : relnode: 1000,
1681 : forknum: 0,
1682 : };
1683 :
1684 12 : fn assert_current_logical_size(_timeline: &Timeline, _lsn: Lsn) {
1685 12 : // TODO
1686 12 : }
1687 :
1688 : static ZERO_CHECKPOINT: Bytes = Bytes::from_static(&[0u8; SIZEOF_CHECKPOINT]);
1689 :
1690 8 : async fn init_walingest_test(tline: &Timeline, ctx: &RequestContext) -> Result<WalIngest> {
1691 8 : let mut m = tline.begin_modification(Lsn(0x10));
1692 8 : m.put_checkpoint(ZERO_CHECKPOINT.clone())?;
1693 16 : m.put_relmap_file(0, 111, Bytes::from(""), ctx).await?; // dummy relmapper file
1694 8 : m.commit(ctx).await?;
1695 8 : let walingest = WalIngest::new(tline, Lsn(0x10), ctx).await?;
1696 :
1697 8 : Ok(walingest)
1698 8 : }
1699 :
1700 2 : #[tokio::test]
1701 2 : async fn test_relsize() -> Result<()> {
1702 2 : let (tenant, ctx) = TenantHarness::create("test_relsize")?.load().await;
1703 2 : let tline = tenant
1704 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
1705 6 : .await?;
1706 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
1707 2 :
1708 2 : let mut m = tline.begin_modification(Lsn(0x20));
1709 2 : walingest.put_rel_creation(&mut m, TESTREL_A, &ctx).await?;
1710 2 : walingest
1711 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 2"), &ctx)
1712 2 : .await?;
1713 2 : m.commit(&ctx).await?;
1714 2 : let mut m = tline.begin_modification(Lsn(0x30));
1715 2 : walingest
1716 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 3"), &ctx)
1717 2 : .await?;
1718 2 : m.commit(&ctx).await?;
1719 2 : let mut m = tline.begin_modification(Lsn(0x40));
1720 2 : walingest
1721 2 : .put_rel_page_image(&mut m, TESTREL_A, 1, test_img("foo blk 1 at 4"), &ctx)
1722 2 : .await?;
1723 2 : m.commit(&ctx).await?;
1724 2 : let mut m = tline.begin_modification(Lsn(0x50));
1725 2 : walingest
1726 2 : .put_rel_page_image(&mut m, TESTREL_A, 2, test_img("foo blk 2 at 5"), &ctx)
1727 2 : .await?;
1728 2 : m.commit(&ctx).await?;
1729 2 :
1730 2 : assert_current_logical_size(&tline, Lsn(0x50));
1731 2 :
1732 2 : // The relation was created at LSN 2, not visible at LSN 1 yet.
1733 2 : assert_eq!(
1734 2 : tline
1735 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x10)), false, &ctx)
1736 2 : .await?,
1737 2 : false
1738 2 : );
1739 2 : assert!(tline
1740 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x10)), false, &ctx)
1741 2 : .await
1742 2 : .is_err());
1743 2 : assert_eq!(
1744 2 : tline
1745 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
1746 2 : .await?,
1747 2 : true
1748 2 : );
1749 2 : assert_eq!(
1750 2 : tline
1751 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
1752 2 : .await?,
1753 2 : 1
1754 2 : );
1755 2 : assert_eq!(
1756 2 : tline
1757 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), false, &ctx)
1758 2 : .await?,
1759 2 : 3
1760 2 : );
1761 2 :
1762 2 : // Check page contents at each LSN
1763 2 : assert_eq!(
1764 2 : tline
1765 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x20)), false, &ctx)
1766 2 : .await?,
1767 2 : test_img("foo blk 0 at 2")
1768 2 : );
1769 2 :
1770 2 : assert_eq!(
1771 2 : tline
1772 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x30)), false, &ctx)
1773 2 : .await?,
1774 2 : test_img("foo blk 0 at 3")
1775 2 : );
1776 2 :
1777 2 : assert_eq!(
1778 2 : tline
1779 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x40)), false, &ctx)
1780 2 : .await?,
1781 2 : test_img("foo blk 0 at 3")
1782 2 : );
1783 2 : assert_eq!(
1784 2 : tline
1785 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x40)), false, &ctx)
1786 2 : .await?,
1787 2 : test_img("foo blk 1 at 4")
1788 2 : );
1789 2 :
1790 2 : assert_eq!(
1791 2 : tline
1792 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x50)), false, &ctx)
1793 2 : .await?,
1794 2 : test_img("foo blk 0 at 3")
1795 2 : );
1796 2 : assert_eq!(
1797 2 : tline
1798 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x50)), false, &ctx)
1799 2 : .await?,
1800 2 : test_img("foo blk 1 at 4")
1801 2 : );
1802 2 : assert_eq!(
1803 2 : tline
1804 2 : .get_rel_page_at_lsn(TESTREL_A, 2, Version::Lsn(Lsn(0x50)), false, &ctx)
1805 2 : .await?,
1806 2 : test_img("foo blk 2 at 5")
1807 2 : );
1808 2 :
1809 2 : // Truncate last block
1810 2 : let mut m = tline.begin_modification(Lsn(0x60));
1811 2 : walingest
1812 2 : .put_rel_truncation(&mut m, TESTREL_A, 2, &ctx)
1813 2 : .await?;
1814 2 : m.commit(&ctx).await?;
1815 2 : assert_current_logical_size(&tline, Lsn(0x60));
1816 2 :
1817 2 : // Check reported size and contents after truncation
1818 2 : assert_eq!(
1819 2 : tline
1820 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x60)), false, &ctx)
1821 2 : .await?,
1822 2 : 2
1823 2 : );
1824 2 : assert_eq!(
1825 2 : tline
1826 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x60)), false, &ctx)
1827 2 : .await?,
1828 2 : test_img("foo blk 0 at 3")
1829 2 : );
1830 2 : assert_eq!(
1831 2 : tline
1832 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x60)), false, &ctx)
1833 2 : .await?,
1834 2 : test_img("foo blk 1 at 4")
1835 2 : );
1836 2 :
1837 2 : // should still see the truncated block with older LSN
1838 2 : assert_eq!(
1839 2 : tline
1840 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), false, &ctx)
1841 2 : .await?,
1842 2 : 3
1843 2 : );
1844 2 : assert_eq!(
1845 2 : tline
1846 2 : .get_rel_page_at_lsn(TESTREL_A, 2, Version::Lsn(Lsn(0x50)), false, &ctx)
1847 2 : .await?,
1848 2 : test_img("foo blk 2 at 5")
1849 2 : );
1850 2 :
1851 2 : // Truncate to zero length
1852 2 : let mut m = tline.begin_modification(Lsn(0x68));
1853 2 : walingest
1854 2 : .put_rel_truncation(&mut m, TESTREL_A, 0, &ctx)
1855 2 : .await?;
1856 2 : m.commit(&ctx).await?;
1857 2 : assert_eq!(
1858 2 : tline
1859 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x68)), false, &ctx)
1860 2 : .await?,
1861 2 : 0
1862 2 : );
1863 2 :
1864 2 : // Extend from 0 to 2 blocks, leaving a gap
1865 2 : let mut m = tline.begin_modification(Lsn(0x70));
1866 2 : walingest
1867 2 : .put_rel_page_image(&mut m, TESTREL_A, 1, test_img("foo blk 1"), &ctx)
1868 2 : .await?;
1869 2 : m.commit(&ctx).await?;
1870 2 : assert_eq!(
1871 2 : tline
1872 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x70)), false, &ctx)
1873 2 : .await?,
1874 2 : 2
1875 2 : );
1876 2 : assert_eq!(
1877 2 : tline
1878 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x70)), false, &ctx)
1879 2 : .await?,
1880 2 : ZERO_PAGE
1881 2 : );
1882 2 : assert_eq!(
1883 2 : tline
1884 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x70)), false, &ctx)
1885 2 : .await?,
1886 2 : test_img("foo blk 1")
1887 2 : );
1888 2 :
1889 2 : // Extend a lot more, leaving a big gap that spans across segments
1890 2 : let mut m = tline.begin_modification(Lsn(0x80));
1891 2 : walingest
1892 2 : .put_rel_page_image(&mut m, TESTREL_A, 1500, test_img("foo blk 1500"), &ctx)
1893 2 : .await?;
1894 71 : m.commit(&ctx).await?;
1895 2 : assert_eq!(
1896 2 : tline
1897 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x80)), false, &ctx)
1898 2 : .await?,
1899 2 : 1501
1900 2 : );
1901 2998 : for blk in 2..1500 {
1902 2996 : assert_eq!(
1903 2996 : tline
1904 2996 : .get_rel_page_at_lsn(TESTREL_A, blk, Version::Lsn(Lsn(0x80)), false, &ctx)
1905 3009 : .await?,
1906 2996 : ZERO_PAGE
1907 2 : );
1908 2 : }
1909 2 : assert_eq!(
1910 2 : tline
1911 2 : .get_rel_page_at_lsn(TESTREL_A, 1500, Version::Lsn(Lsn(0x80)), false, &ctx)
1912 2 : .await?,
1913 2 : test_img("foo blk 1500")
1914 2 : );
1915 2 :
1916 2 : Ok(())
1917 2 : }
1918 :
1919 : // Test what happens if we dropped a relation
1920 : // and then created it again within the same layer.
1921 2 : #[tokio::test]
1922 2 : async fn test_drop_extend() -> Result<()> {
1923 2 : let (tenant, ctx) = TenantHarness::create("test_drop_extend")?.load().await;
1924 2 : let tline = tenant
1925 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
1926 6 : .await?;
1927 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
1928 2 :
1929 2 : let mut m = tline.begin_modification(Lsn(0x20));
1930 2 : walingest
1931 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 2"), &ctx)
1932 2 : .await?;
1933 2 : m.commit(&ctx).await?;
1934 2 :
1935 2 : // Check that rel exists and size is correct
1936 2 : assert_eq!(
1937 2 : tline
1938 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
1939 2 : .await?,
1940 2 : true
1941 2 : );
1942 2 : assert_eq!(
1943 2 : tline
1944 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
1945 2 : .await?,
1946 2 : 1
1947 2 : );
1948 2 :
1949 2 : // Drop rel
1950 2 : let mut m = tline.begin_modification(Lsn(0x30));
1951 2 : walingest.put_rel_drop(&mut m, TESTREL_A, &ctx).await?;
1952 2 : m.commit(&ctx).await?;
1953 2 :
1954 2 : // Check that rel is not visible anymore
1955 2 : assert_eq!(
1956 2 : tline
1957 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x30)), false, &ctx)
1958 2 : .await?,
1959 2 : false
1960 2 : );
1961 2 :
1962 2 : // FIXME: should fail
1963 2 : //assert!(tline.get_rel_size(TESTREL_A, Lsn(0x30), false)?.is_none());
1964 2 :
1965 2 : // Re-create it
1966 2 : let mut m = tline.begin_modification(Lsn(0x40));
1967 2 : walingest
1968 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 4"), &ctx)
1969 2 : .await?;
1970 2 : m.commit(&ctx).await?;
1971 2 :
1972 2 : // Check that rel exists and size is correct
1973 2 : assert_eq!(
1974 2 : tline
1975 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x40)), false, &ctx)
1976 2 : .await?,
1977 2 : true
1978 2 : );
1979 2 : assert_eq!(
1980 2 : tline
1981 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x40)), false, &ctx)
1982 2 : .await?,
1983 2 : 1
1984 2 : );
1985 2 :
1986 2 : Ok(())
1987 2 : }
1988 :
1989 : // Test what happens if we truncated a relation
1990 : // so that one of its segments was dropped
1991 : // and then extended it again within the same layer.
1992 2 : #[tokio::test]
1993 2 : async fn test_truncate_extend() -> Result<()> {
1994 2 : let (tenant, ctx) = TenantHarness::create("test_truncate_extend")?.load().await;
1995 2 : let tline = tenant
1996 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
1997 6 : .await?;
1998 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
1999 2 :
2000 2 : // Create a 20 MB relation (the size is arbitrary)
2001 2 : let relsize = 20 * 1024 * 1024 / 8192;
2002 2 : let mut m = tline.begin_modification(Lsn(0x20));
2003 5120 : for blkno in 0..relsize {
2004 5120 : let data = format!("foo blk {} at {}", blkno, Lsn(0x20));
2005 5120 : walingest
2006 5120 : .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx)
2007 2 : .await?;
2008 2 : }
2009 2 : m.commit(&ctx).await?;
2010 2 :
2011 2 : // The relation was created at LSN 20, not visible at LSN 1 yet.
2012 2 : assert_eq!(
2013 2 : tline
2014 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x10)), false, &ctx)
2015 2 : .await?,
2016 2 : false
2017 2 : );
2018 2 : assert!(tline
2019 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x10)), false, &ctx)
2020 2 : .await
2021 2 : .is_err());
2022 2 :
2023 2 : assert_eq!(
2024 2 : tline
2025 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
2026 2 : .await?,
2027 2 : true
2028 2 : );
2029 2 : assert_eq!(
2030 2 : tline
2031 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
2032 2 : .await?,
2033 2 : relsize
2034 2 : );
2035 2 :
2036 2 : // Check relation content
2037 5120 : for blkno in 0..relsize {
2038 5120 : let lsn = Lsn(0x20);
2039 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2040 5120 : assert_eq!(
2041 5120 : tline
2042 5120 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(lsn), false, &ctx)
2043 201 : .await?,
2044 5120 : test_img(&data)
2045 2 : );
2046 2 : }
2047 2 :
2048 2 : // Truncate relation so that second segment was dropped
2049 2 : // - only leave one page
2050 2 : let mut m = tline.begin_modification(Lsn(0x60));
2051 2 : walingest
2052 2 : .put_rel_truncation(&mut m, TESTREL_A, 1, &ctx)
2053 2 : .await?;
2054 2 : m.commit(&ctx).await?;
2055 2 :
2056 2 : // Check reported size and contents after truncation
2057 2 : assert_eq!(
2058 2 : tline
2059 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x60)), false, &ctx)
2060 2 : .await?,
2061 2 : 1
2062 2 : );
2063 2 :
2064 4 : for blkno in 0..1 {
2065 2 : let lsn = Lsn(0x20);
2066 2 : let data = format!("foo blk {} at {}", blkno, lsn);
2067 2 : assert_eq!(
2068 2 : tline
2069 2 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x60)), false, &ctx)
2070 2 : .await?,
2071 2 : test_img(&data)
2072 2 : );
2073 2 : }
2074 2 :
2075 2 : // should still see all blocks with older LSN
2076 2 : assert_eq!(
2077 2 : tline
2078 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), false, &ctx)
2079 2 : .await?,
2080 2 : relsize
2081 2 : );
2082 5120 : for blkno in 0..relsize {
2083 5120 : let lsn = Lsn(0x20);
2084 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2085 5120 : assert_eq!(
2086 5120 : tline
2087 5120 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x50)), false, &ctx)
2088 400 : .await?,
2089 5120 : test_img(&data)
2090 2 : );
2091 2 : }
2092 2 :
2093 2 : // Extend relation again.
2094 2 : // Add enough blocks to create second segment
2095 2 : let lsn = Lsn(0x80);
2096 2 : let mut m = tline.begin_modification(lsn);
2097 5120 : for blkno in 0..relsize {
2098 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2099 5120 : walingest
2100 5120 : .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx)
2101 2 : .await?;
2102 2 : }
2103 2 : m.commit(&ctx).await?;
2104 2 :
2105 2 : assert_eq!(
2106 2 : tline
2107 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x80)), false, &ctx)
2108 2 : .await?,
2109 2 : true
2110 2 : );
2111 2 : assert_eq!(
2112 2 : tline
2113 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x80)), false, &ctx)
2114 2 : .await?,
2115 2 : relsize
2116 2 : );
2117 2 : // Check relation content
2118 5120 : for blkno in 0..relsize {
2119 5120 : let lsn = Lsn(0x80);
2120 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2121 5120 : assert_eq!(
2122 5120 : tline
2123 5120 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x80)), false, &ctx)
2124 201 : .await?,
2125 5120 : test_img(&data)
2126 2 : );
2127 2 : }
2128 2 :
2129 2 : Ok(())
2130 2 : }
2131 :
2132 : /// Test get_relsize() and truncation with a file larger than 1 GB, so that it's
2133 : /// split into multiple 1 GB segments in Postgres.
2134 2 : #[tokio::test]
2135 2 : async fn test_large_rel() -> Result<()> {
2136 2 : let (tenant, ctx) = TenantHarness::create("test_large_rel")?.load().await;
2137 2 : let tline = tenant
2138 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
2139 6 : .await?;
2140 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
2141 2 :
2142 2 : let mut lsn = 0x10;
2143 262146 : for blknum in 0..RELSEG_SIZE + 1 {
2144 262146 : lsn += 0x10;
2145 262146 : let mut m = tline.begin_modification(Lsn(lsn));
2146 262146 : let img = test_img(&format!("foo blk {} at {}", blknum, Lsn(lsn)));
2147 262146 : walingest
2148 262146 : .put_rel_page_image(&mut m, TESTREL_A, blknum as BlockNumber, img, &ctx)
2149 6174 : .await?;
2150 262146 : m.commit(&ctx).await?;
2151 2 : }
2152 2 :
2153 2 : assert_current_logical_size(&tline, Lsn(lsn));
2154 2 :
2155 2 : assert_eq!(
2156 2 : tline
2157 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), false, &ctx)
2158 2 : .await?,
2159 2 : RELSEG_SIZE + 1
2160 2 : );
2161 2 :
2162 2 : // Truncate one block
2163 2 : lsn += 0x10;
2164 2 : let mut m = tline.begin_modification(Lsn(lsn));
2165 2 : walingest
2166 2 : .put_rel_truncation(&mut m, TESTREL_A, RELSEG_SIZE, &ctx)
2167 2 : .await?;
2168 2 : m.commit(&ctx).await?;
2169 2 : assert_eq!(
2170 2 : tline
2171 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), false, &ctx)
2172 2 : .await?,
2173 2 : RELSEG_SIZE
2174 2 : );
2175 2 : assert_current_logical_size(&tline, Lsn(lsn));
2176 2 :
2177 2 : // Truncate another block
2178 2 : lsn += 0x10;
2179 2 : let mut m = tline.begin_modification(Lsn(lsn));
2180 2 : walingest
2181 2 : .put_rel_truncation(&mut m, TESTREL_A, RELSEG_SIZE - 1, &ctx)
2182 2 : .await?;
2183 2 : m.commit(&ctx).await?;
2184 2 : assert_eq!(
2185 2 : tline
2186 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), false, &ctx)
2187 2 : .await?,
2188 2 : RELSEG_SIZE - 1
2189 2 : );
2190 2 : assert_current_logical_size(&tline, Lsn(lsn));
2191 2 :
2192 2 : // Truncate to 1500, and then truncate all the way down to 0, one block at a time
2193 2 : // This tests the behavior at segment boundaries
2194 2 : let mut size: i32 = 3000;
2195 6004 : while size >= 0 {
2196 6002 : lsn += 0x10;
2197 6002 : let mut m = tline.begin_modification(Lsn(lsn));
2198 6002 : walingest
2199 6002 : .put_rel_truncation(&mut m, TESTREL_A, size as BlockNumber, &ctx)
2200 144 : .await?;
2201 6002 : m.commit(&ctx).await?;
2202 6002 : assert_eq!(
2203 6002 : tline
2204 6002 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), false, &ctx)
2205 2 : .await?,
2206 6002 : size as BlockNumber
2207 2 : );
2208 2 :
2209 6002 : size -= 1;
2210 2 : }
2211 2 : assert_current_logical_size(&tline, Lsn(lsn));
2212 2 :
2213 2 : Ok(())
2214 2 : }
2215 :
2216 : /// Replay a wal segment file taken directly from safekeepers.
2217 : ///
2218 : /// This test is useful for benchmarking since it allows us to profile only
2219 : /// the walingest code in a single-threaded executor, and iterate more quickly
2220 : /// without waiting for unrelated steps.
2221 2 : #[tokio::test]
2222 2 : async fn test_ingest_real_wal() {
2223 2 : use crate::tenant::harness::*;
2224 2 : use postgres_ffi::waldecoder::WalStreamDecoder;
2225 2 : use postgres_ffi::WAL_SEGMENT_SIZE;
2226 2 :
2227 2 : // Define test data path and constants.
2228 2 : //
2229 2 : // Steps to reconstruct the data, if needed:
2230 2 : // 1. Run the pgbench python test
2231 2 : // 2. Take the first wal segment file from safekeeper
2232 2 : // 3. Compress it using `zstd --long input_file`
2233 2 : // 4. Copy initdb.tar.zst from local_fs_remote_storage
2234 2 : // 5. Grep sk logs for "restart decoder" to get startpoint
2235 2 : // 6. Run just the decoder from this test to get the endpoint.
2236 2 : // It's the last LSN the decoder will output.
2237 2 : let pg_version = 15; // The test data was generated by pg15
2238 2 : let path = "test_data/sk_wal_segment_from_pgbench";
2239 2 : let wal_segment_path = format!("{path}/000000010000000000000001.zst");
2240 2 : let source_initdb_path = format!("{path}/{INITDB_PATH}");
2241 2 : let startpoint = Lsn::from_hex("14AEC08").unwrap();
2242 2 : let _endpoint = Lsn::from_hex("1FFFF98").unwrap();
2243 2 :
2244 2 : let harness = TenantHarness::create("test_ingest_real_wal").unwrap();
2245 2 : let (tenant, ctx) = harness.load().await;
2246 2 :
2247 2 : let remote_initdb_path =
2248 2 : remote_initdb_archive_path(&tenant.tenant_shard_id().tenant_id, &TIMELINE_ID);
2249 2 : let initdb_path = harness.remote_fs_dir.join(remote_initdb_path.get_path());
2250 2 :
2251 2 : std::fs::create_dir_all(initdb_path.parent().unwrap())
2252 2 : .expect("creating test dir should work");
2253 2 : std::fs::copy(source_initdb_path, initdb_path).expect("copying the initdb.tar.zst works");
2254 2 :
2255 2 : // Bootstrap a real timeline. We can't use create_test_timeline because
2256 2 : // it doesn't create a real checkpoint, and Walingest::new tries to parse
2257 2 : // the garbage data.
2258 2 : let tline = tenant
2259 2 : .bootstrap_timeline_test(TIMELINE_ID, pg_version, Some(TIMELINE_ID), &ctx)
2260 20216 : .await
2261 2 : .unwrap();
2262 2 :
2263 2 : // We fully read and decompress this into memory before decoding
2264 2 : // to get a more accurate perf profile of the decoder.
2265 2 : let bytes = {
2266 2 : use async_compression::tokio::bufread::ZstdDecoder;
2267 2 : let file = tokio::fs::File::open(wal_segment_path).await.unwrap();
2268 2 : let reader = tokio::io::BufReader::new(file);
2269 2 : let decoder = ZstdDecoder::new(reader);
2270 2 : let mut reader = tokio::io::BufReader::new(decoder);
2271 2 : let mut buffer = Vec::new();
2272 222 : tokio::io::copy_buf(&mut reader, &mut buffer).await.unwrap();
2273 2 : buffer
2274 2 : };
2275 2 :
2276 2 : // TODO start a profiler too
2277 2 : let started_at = std::time::Instant::now();
2278 2 :
2279 2 : // Initialize walingest
2280 2 : let xlogoff: usize = startpoint.segment_offset(WAL_SEGMENT_SIZE);
2281 2 : let mut decoder = WalStreamDecoder::new(startpoint, pg_version);
2282 2 : let mut walingest = WalIngest::new(tline.as_ref(), startpoint, &ctx)
2283 5 : .await
2284 2 : .unwrap();
2285 2 : let mut modification = tline.begin_modification(startpoint);
2286 2 : let mut decoded = DecodedWALRecord::default();
2287 2 : println!("decoding {} bytes", bytes.len() - xlogoff);
2288 2 :
2289 2 : // Decode and ingest wal. We process the wal in chunks because
2290 2 : // that's what happens when we get bytes from safekeepers.
2291 474686 : for chunk in bytes[xlogoff..].chunks(50) {
2292 474686 : decoder.feed_bytes(chunk);
2293 620536 : while let Some((lsn, recdata)) = decoder.poll_decode().unwrap() {
2294 145850 : walingest
2295 145850 : .ingest_record(recdata, lsn, &mut modification, &mut decoded, &ctx)
2296 101 : .await
2297 145850 : .unwrap();
2298 2 : }
2299 474686 : modification.commit(&ctx).await.unwrap();
2300 2 : }
2301 2 :
2302 2 : let duration = started_at.elapsed();
2303 2 : println!("done in {:?}", duration);
2304 2 : }
2305 : }
|