Line data Source code
1 : //!
2 : //! Parse PostgreSQL WAL records and store them in a neon Timeline.
3 : //!
4 : //! The pipeline for ingesting WAL looks like this:
5 : //!
6 : //! WAL receiver -> WalIngest -> Repository
7 : //!
8 : //! The WAL receiver receives a stream of WAL from the WAL safekeepers,
9 : //! and decodes it to individual WAL records. It feeds the WAL records
10 : //! to WalIngest, which parses them and stores them in the Repository.
11 : //!
12 : //! The neon Repository can store page versions in two formats: as
13 : //! page images, or a WAL records. WalIngest::ingest_record() extracts
14 : //! page images out of some WAL records, but most it stores as WAL
15 : //! records. If a WAL record modifies multiple pages, WalIngest
16 : //! will call Repository::put_wal_record or put_page_image functions
17 : //! separately for each modified page.
18 : //!
19 : //! To reconstruct a page using a WAL record, the Repository calls the
20 : //! code in walredo.rs. walredo.rs passes most WAL records to the WAL
21 : //! redo Postgres process, but some records it can handle directly with
22 : //! bespoken Rust code.
23 :
24 : use pageserver_api::shard::ShardIdentity;
25 : use postgres_ffi::v14::nonrelfile_utils::clogpage_precedes;
26 : use postgres_ffi::v14::nonrelfile_utils::slru_may_delete_clogsegment;
27 : use postgres_ffi::{fsm_logical_to_physical, page_is_new, page_set_lsn};
28 :
29 : use anyhow::{bail, Context, Result};
30 : use bytes::{Buf, Bytes, BytesMut};
31 : use tracing::*;
32 : use utils::failpoint_support;
33 :
34 : use crate::context::RequestContext;
35 : use crate::metrics::WAL_INGEST;
36 : use crate::pgdatadir_mapping::{DatadirModification, Version};
37 : use crate::tenant::PageReconstructError;
38 : use crate::tenant::Timeline;
39 : use crate::walrecord::*;
40 : use crate::ZERO_PAGE;
41 : use pageserver_api::key::rel_block_to_key;
42 : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
43 : use postgres_ffi::pg_constants;
44 : use postgres_ffi::relfile_utils::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM};
45 : use postgres_ffi::v14::nonrelfile_utils::mx_offset_to_member_segment;
46 : use postgres_ffi::v14::xlog_utils::*;
47 : use postgres_ffi::v14::CheckPoint;
48 : use postgres_ffi::TransactionId;
49 : use postgres_ffi::BLCKSZ;
50 : use utils::lsn::Lsn;
51 :
52 : pub struct WalIngest {
53 : shard: ShardIdentity,
54 : checkpoint: CheckPoint,
55 : checkpoint_modified: bool,
56 : }
57 :
58 : impl WalIngest {
59 12 : pub async fn new(
60 12 : timeline: &Timeline,
61 12 : startpoint: Lsn,
62 12 : ctx: &RequestContext,
63 12 : ) -> anyhow::Result<WalIngest> {
64 : // Fetch the latest checkpoint into memory, so that we can compare with it
65 : // quickly in `ingest_record` and update it when it changes.
66 12 : let checkpoint_bytes = timeline.get_checkpoint(startpoint, ctx).await?;
67 12 : let checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
68 12 : trace!("CheckPoint.nextXid = {}", checkpoint.nextXid.value);
69 :
70 12 : Ok(WalIngest {
71 12 : shard: *timeline.get_shard_identity(),
72 12 : checkpoint,
73 12 : checkpoint_modified: false,
74 12 : })
75 12 : }
76 :
77 : ///
78 : /// Decode a PostgreSQL WAL record and store it in the repository, in the given timeline.
79 : ///
80 : /// This function updates `lsn` field of `DatadirModification`
81 : ///
82 : /// Helper function to parse a WAL record and call the Timeline's PUT functions for all the
83 : /// relations/pages that the record affects.
84 : ///
85 : /// This function returns `true` if the record was ingested, and `false` if it was filtered out
86 : ///
87 145852 : pub async fn ingest_record(
88 145852 : &mut self,
89 145852 : recdata: Bytes,
90 145852 : lsn: Lsn,
91 145852 : modification: &mut DatadirModification<'_>,
92 145852 : decoded: &mut DecodedWALRecord,
93 145852 : ctx: &RequestContext,
94 145852 : ) -> anyhow::Result<bool> {
95 145852 : WAL_INGEST.records_received.inc();
96 145852 : let pg_version = modification.tline.pg_version;
97 145852 : let prev_len = modification.len();
98 145852 :
99 145852 : modification.set_lsn(lsn)?;
100 145852 : decode_wal_record(recdata, decoded, pg_version)?;
101 :
102 145852 : let mut buf = decoded.record.clone();
103 145852 : buf.advance(decoded.main_data_offset);
104 145852 :
105 145852 : assert!(!self.checkpoint_modified);
106 145852 : if decoded.xl_xid != pg_constants::INVALID_TRANSACTION_ID
107 145834 : && self.checkpoint.update_next_xid(decoded.xl_xid)
108 2 : {
109 2 : self.checkpoint_modified = true;
110 145850 : }
111 :
112 145852 : failpoint_support::sleep_millis_async!("wal-ingest-record-sleep");
113 :
114 145852 : match decoded.xl_rmid {
115 : pg_constants::RM_HEAP_ID | pg_constants::RM_HEAP2_ID => {
116 : // Heap AM records need some special handling, because they modify VM pages
117 : // without registering them with the standard mechanism.
118 145474 : self.ingest_heapam_record(&mut buf, modification, decoded, ctx)
119 0 : .await?;
120 : }
121 : pg_constants::RM_NEON_ID => {
122 0 : self.ingest_neonrmgr_record(&mut buf, modification, decoded, ctx)
123 0 : .await?;
124 : }
125 : // Handle other special record types
126 : pg_constants::RM_SMGR_ID => {
127 16 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
128 16 :
129 16 : if info == pg_constants::XLOG_SMGR_CREATE {
130 16 : let create = XlSmgrCreate::decode(&mut buf);
131 16 : self.ingest_xlog_smgr_create(modification, &create, ctx)
132 5 : .await?;
133 0 : } else if info == pg_constants::XLOG_SMGR_TRUNCATE {
134 0 : let truncate = XlSmgrTruncate::decode(&mut buf);
135 0 : self.ingest_xlog_smgr_truncate(modification, &truncate, ctx)
136 0 : .await?;
137 0 : }
138 : }
139 : pg_constants::RM_DBASE_ID => {
140 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
141 0 : debug!(%info, %pg_version, "handle RM_DBASE_ID");
142 :
143 0 : if pg_version == 14 {
144 0 : if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE {
145 0 : let createdb = XlCreateDatabase::decode(&mut buf);
146 0 : debug!("XLOG_DBASE_CREATE v14");
147 :
148 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
149 0 : .await?;
150 0 : } else if info == postgres_ffi::v14::bindings::XLOG_DBASE_DROP {
151 0 : let dropdb = XlDropDatabase::decode(&mut buf);
152 0 : for tablespace_id in dropdb.tablespace_ids {
153 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
154 0 : modification
155 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
156 0 : .await?;
157 : }
158 0 : }
159 0 : } else if pg_version == 15 {
160 0 : if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG {
161 0 : debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
162 0 : } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY {
163 : // The XLOG record was renamed between v14 and v15,
164 : // but the record format is the same.
165 : // So we can reuse XlCreateDatabase here.
166 0 : debug!("XLOG_DBASE_CREATE_FILE_COPY");
167 0 : let createdb = XlCreateDatabase::decode(&mut buf);
168 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
169 0 : .await?;
170 0 : } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_DROP {
171 0 : let dropdb = XlDropDatabase::decode(&mut buf);
172 0 : for tablespace_id in dropdb.tablespace_ids {
173 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
174 0 : modification
175 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
176 0 : .await?;
177 : }
178 0 : }
179 0 : } else if pg_version == 16 {
180 0 : if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG {
181 0 : debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
182 0 : } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY {
183 : // The XLOG record was renamed between v14 and v15,
184 : // but the record format is the same.
185 : // So we can reuse XlCreateDatabase here.
186 0 : debug!("XLOG_DBASE_CREATE_FILE_COPY");
187 0 : let createdb = XlCreateDatabase::decode(&mut buf);
188 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
189 0 : .await?;
190 0 : } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_DROP {
191 0 : let dropdb = XlDropDatabase::decode(&mut buf);
192 0 : for tablespace_id in dropdb.tablespace_ids {
193 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
194 0 : modification
195 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
196 0 : .await?;
197 : }
198 0 : }
199 0 : }
200 : }
201 : pg_constants::RM_TBLSPC_ID => {
202 0 : trace!("XLOG_TBLSPC_CREATE/DROP is not handled yet");
203 : }
204 : pg_constants::RM_CLOG_ID => {
205 0 : let info = decoded.xl_info & !pg_constants::XLR_INFO_MASK;
206 0 :
207 0 : if info == pg_constants::CLOG_ZEROPAGE {
208 0 : let pageno = buf.get_u32_le();
209 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
210 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
211 0 : self.put_slru_page_image(
212 0 : modification,
213 0 : SlruKind::Clog,
214 0 : segno,
215 0 : rpageno,
216 0 : ZERO_PAGE.clone(),
217 0 : ctx,
218 0 : )
219 0 : .await?;
220 : } else {
221 0 : assert!(info == pg_constants::CLOG_TRUNCATE);
222 0 : let xlrec = XlClogTruncate::decode(&mut buf);
223 0 : self.ingest_clog_truncate_record(modification, &xlrec, ctx)
224 0 : .await?;
225 : }
226 : }
227 : pg_constants::RM_XACT_ID => {
228 24 : let info = decoded.xl_info & pg_constants::XLOG_XACT_OPMASK;
229 24 :
230 24 : if info == pg_constants::XLOG_XACT_COMMIT || info == pg_constants::XLOG_XACT_ABORT {
231 8 : let parsed_xact =
232 8 : XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info);
233 8 : self.ingest_xact_record(
234 8 : modification,
235 8 : &parsed_xact,
236 8 : info == pg_constants::XLOG_XACT_COMMIT,
237 8 : ctx,
238 8 : )
239 0 : .await?;
240 16 : } else if info == pg_constants::XLOG_XACT_COMMIT_PREPARED
241 16 : || info == pg_constants::XLOG_XACT_ABORT_PREPARED
242 : {
243 0 : let parsed_xact =
244 0 : XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info);
245 0 : self.ingest_xact_record(
246 0 : modification,
247 0 : &parsed_xact,
248 0 : info == pg_constants::XLOG_XACT_COMMIT_PREPARED,
249 0 : ctx,
250 0 : )
251 0 : .await?;
252 : // Remove twophase file. see RemoveTwoPhaseFile() in postgres code
253 0 : trace!(
254 0 : "Drop twophaseFile for xid {} parsed_xact.xid {} here at {}",
255 0 : decoded.xl_xid,
256 0 : parsed_xact.xid,
257 0 : lsn,
258 0 : );
259 0 : modification
260 0 : .drop_twophase_file(parsed_xact.xid, ctx)
261 0 : .await?;
262 16 : } else if info == pg_constants::XLOG_XACT_PREPARE {
263 0 : modification
264 0 : .put_twophase_file(decoded.xl_xid, Bytes::copy_from_slice(&buf[..]), ctx)
265 0 : .await?;
266 16 : }
267 : }
268 : pg_constants::RM_MULTIXACT_ID => {
269 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
270 0 :
271 0 : if info == pg_constants::XLOG_MULTIXACT_ZERO_OFF_PAGE {
272 0 : let pageno = buf.get_u32_le();
273 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
274 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
275 0 : self.put_slru_page_image(
276 0 : modification,
277 0 : SlruKind::MultiXactOffsets,
278 0 : segno,
279 0 : rpageno,
280 0 : ZERO_PAGE.clone(),
281 0 : ctx,
282 0 : )
283 0 : .await?;
284 0 : } else if info == pg_constants::XLOG_MULTIXACT_ZERO_MEM_PAGE {
285 0 : let pageno = buf.get_u32_le();
286 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
287 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
288 0 : self.put_slru_page_image(
289 0 : modification,
290 0 : SlruKind::MultiXactMembers,
291 0 : segno,
292 0 : rpageno,
293 0 : ZERO_PAGE.clone(),
294 0 : ctx,
295 0 : )
296 0 : .await?;
297 0 : } else if info == pg_constants::XLOG_MULTIXACT_CREATE_ID {
298 0 : let xlrec = XlMultiXactCreate::decode(&mut buf);
299 0 : self.ingest_multixact_create_record(modification, &xlrec)?;
300 0 : } else if info == pg_constants::XLOG_MULTIXACT_TRUNCATE_ID {
301 0 : let xlrec = XlMultiXactTruncate::decode(&mut buf);
302 0 : self.ingest_multixact_truncate_record(modification, &xlrec, ctx)
303 0 : .await?;
304 0 : }
305 : }
306 : pg_constants::RM_RELMAP_ID => {
307 0 : let xlrec = XlRelmapUpdate::decode(&mut buf);
308 0 : self.ingest_relmap_page(modification, &xlrec, decoded, ctx)
309 0 : .await?;
310 : }
311 : pg_constants::RM_XLOG_ID => {
312 30 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
313 30 :
314 30 : if info == pg_constants::XLOG_NEXTOID {
315 2 : let next_oid = buf.get_u32_le();
316 2 : if self.checkpoint.nextOid != next_oid {
317 2 : self.checkpoint.nextOid = next_oid;
318 2 : self.checkpoint_modified = true;
319 2 : }
320 28 : } else if info == pg_constants::XLOG_CHECKPOINT_ONLINE
321 28 : || info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN
322 : {
323 2 : let mut checkpoint_bytes = [0u8; SIZEOF_CHECKPOINT];
324 2 : buf.copy_to_slice(&mut checkpoint_bytes);
325 2 : let xlog_checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
326 2 : trace!(
327 0 : "xlog_checkpoint.oldestXid={}, checkpoint.oldestXid={}",
328 0 : xlog_checkpoint.oldestXid,
329 0 : self.checkpoint.oldestXid
330 0 : );
331 2 : if (self
332 2 : .checkpoint
333 2 : .oldestXid
334 2 : .wrapping_sub(xlog_checkpoint.oldestXid) as i32)
335 2 : < 0
336 0 : {
337 0 : self.checkpoint.oldestXid = xlog_checkpoint.oldestXid;
338 2 : }
339 2 : trace!(
340 0 : "xlog_checkpoint.oldestActiveXid={}, checkpoint.oldestActiveXid={}",
341 0 : xlog_checkpoint.oldestActiveXid,
342 0 : self.checkpoint.oldestActiveXid
343 0 : );
344 2 : self.checkpoint.oldestActiveXid = xlog_checkpoint.oldestActiveXid;
345 2 :
346 2 : // Write a new checkpoint key-value pair on every checkpoint record, even
347 2 : // if nothing really changed. Not strictly required, but it seems nice to
348 2 : // have some trace of the checkpoint records in the layer files at the same
349 2 : // LSNs.
350 2 : self.checkpoint_modified = true;
351 26 : }
352 : }
353 : pg_constants::RM_LOGICALMSG_ID => {
354 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
355 0 :
356 0 : if info == pg_constants::XLOG_LOGICAL_MESSAGE {
357 0 : let xlrec = crate::walrecord::XlLogicalMessage::decode(&mut buf);
358 0 : let prefix = std::str::from_utf8(&buf[0..xlrec.prefix_size - 1])?;
359 0 : let message = &buf[xlrec.prefix_size..xlrec.prefix_size + xlrec.message_size];
360 0 : if prefix == "neon-test" {
361 : // This is a convenient way to make the WAL ingestion pause at
362 : // particular point in the WAL. For more fine-grained control,
363 : // we could peek into the message and only pause if it contains
364 : // a particular string, for example, but this is enough for now.
365 0 : failpoint_support::sleep_millis_async!("wal-ingest-logical-message-sleep");
366 0 : } else if let Some(path) = prefix.strip_prefix("neon-file:") {
367 0 : modification.put_file(path, message, ctx).await?;
368 0 : }
369 0 : }
370 : }
371 : pg_constants::RM_STANDBY_ID => {
372 16 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
373 16 : if info == pg_constants::XLOG_RUNNING_XACTS {
374 0 : let xlrec = crate::walrecord::XlRunningXacts::decode(&mut buf);
375 0 : self.checkpoint.oldestActiveXid = xlrec.oldest_running_xid;
376 16 : }
377 : }
378 292 : _x => {
379 292 : // TODO: should probably log & fail here instead of blindly
380 292 : // doing something without understanding the protocol
381 292 : }
382 : }
383 :
384 : // Iterate through all the blocks that the record modifies, and
385 : // "put" a separate copy of the record for each block.
386 145852 : for blk in decoded.blocks.iter() {
387 145642 : let rel = RelTag {
388 145642 : spcnode: blk.rnode_spcnode,
389 145642 : dbnode: blk.rnode_dbnode,
390 145642 : relnode: blk.rnode_relnode,
391 145642 : forknum: blk.forknum,
392 145642 : };
393 145642 :
394 145642 : let key = rel_block_to_key(rel, blk.blkno);
395 145642 : let key_is_local = self.shard.is_key_local(&key);
396 145642 :
397 145642 : tracing::debug!(
398 0 : lsn=%lsn,
399 0 : key=%key,
400 0 : "ingest: shard decision {} (checkpoint={})",
401 0 : if !key_is_local { "drop" } else { "keep" },
402 0 : self.checkpoint_modified
403 0 : );
404 :
405 145642 : if !key_is_local {
406 0 : if self.shard.is_shard_zero() {
407 : // Shard 0 tracks relation sizes. Although we will not store this block, we will observe
408 : // its blkno in case it implicitly extends a relation.
409 0 : self.observe_decoded_block(modification, blk, ctx).await?;
410 0 : }
411 :
412 0 : continue;
413 145642 : }
414 145642 : self.ingest_decoded_block(modification, lsn, decoded, blk, ctx)
415 84 : .await?;
416 : }
417 :
418 : // If checkpoint data was updated, store the new version in the repository
419 145852 : if self.checkpoint_modified {
420 6 : let new_checkpoint_bytes = self.checkpoint.encode()?;
421 :
422 6 : modification.put_checkpoint(new_checkpoint_bytes)?;
423 6 : self.checkpoint_modified = false;
424 145846 : }
425 :
426 : // Note that at this point this record is only cached in the modification
427 : // until commit() is called to flush the data into the repository and update
428 : // the latest LSN.
429 :
430 145852 : Ok(modification.len() > prev_len)
431 145852 : }
432 :
433 : /// Do not store this block, but observe it for the purposes of updating our relation size state.
434 0 : async fn observe_decoded_block(
435 0 : &mut self,
436 0 : modification: &mut DatadirModification<'_>,
437 0 : blk: &DecodedBkpBlock,
438 0 : ctx: &RequestContext,
439 0 : ) -> Result<(), PageReconstructError> {
440 0 : let rel = RelTag {
441 0 : spcnode: blk.rnode_spcnode,
442 0 : dbnode: blk.rnode_dbnode,
443 0 : relnode: blk.rnode_relnode,
444 0 : forknum: blk.forknum,
445 0 : };
446 0 : self.handle_rel_extend(modification, rel, blk.blkno, ctx)
447 0 : .await
448 0 : }
449 :
450 145642 : async fn ingest_decoded_block(
451 145642 : &mut self,
452 145642 : modification: &mut DatadirModification<'_>,
453 145642 : lsn: Lsn,
454 145642 : decoded: &DecodedWALRecord,
455 145642 : blk: &DecodedBkpBlock,
456 145642 : ctx: &RequestContext,
457 145642 : ) -> Result<(), PageReconstructError> {
458 145642 : let rel = RelTag {
459 145642 : spcnode: blk.rnode_spcnode,
460 145642 : dbnode: blk.rnode_dbnode,
461 145642 : relnode: blk.rnode_relnode,
462 145642 : forknum: blk.forknum,
463 145642 : };
464 145642 :
465 145642 : //
466 145642 : // Instead of storing full-page-image WAL record,
467 145642 : // it is better to store extracted image: we can skip wal-redo
468 145642 : // in this case. Also some FPI records may contain multiple (up to 32) pages,
469 145642 : // so them have to be copied multiple times.
470 145642 : //
471 145642 : if blk.apply_image
472 60 : && blk.has_image
473 60 : && decoded.xl_rmid == pg_constants::RM_XLOG_ID
474 24 : && (decoded.xl_info == pg_constants::XLOG_FPI
475 0 : || decoded.xl_info == pg_constants::XLOG_FPI_FOR_HINT)
476 : // compression of WAL is not yet supported: fall back to storing the original WAL record
477 24 : && !postgres_ffi::bkpimage_is_compressed(blk.bimg_info, modification.tline.pg_version)?
478 : // do not materialize null pages because them most likely be soon replaced with real data
479 24 : && blk.bimg_len != 0
480 : {
481 : // Extract page image from FPI record
482 24 : let img_len = blk.bimg_len as usize;
483 24 : let img_offs = blk.bimg_offset as usize;
484 24 : let mut image = BytesMut::with_capacity(BLCKSZ as usize);
485 24 : image.extend_from_slice(&decoded.record[img_offs..img_offs + img_len]);
486 24 :
487 24 : if blk.hole_length != 0 {
488 0 : let tail = image.split_off(blk.hole_offset as usize);
489 0 : image.resize(image.len() + blk.hole_length as usize, 0u8);
490 0 : image.unsplit(tail);
491 24 : }
492 : //
493 : // Match the logic of XLogReadBufferForRedoExtended:
494 : // The page may be uninitialized. If so, we can't set the LSN because
495 : // that would corrupt the page.
496 : //
497 24 : if !page_is_new(&image) {
498 18 : page_set_lsn(&mut image, lsn)
499 6 : }
500 24 : assert_eq!(image.len(), BLCKSZ as usize);
501 24 : self.put_rel_page_image(modification, rel, blk.blkno, image.freeze(), ctx)
502 0 : .await?;
503 : } else {
504 145618 : let rec = NeonWalRecord::Postgres {
505 145618 : will_init: blk.will_init || blk.apply_image,
506 145618 : rec: decoded.record.clone(),
507 145618 : };
508 145618 : self.put_rel_wal_record(modification, rel, blk.blkno, rec, ctx)
509 84 : .await?;
510 : }
511 145642 : Ok(())
512 145642 : }
513 :
514 145474 : async fn ingest_heapam_record(
515 145474 : &mut self,
516 145474 : buf: &mut Bytes,
517 145474 : modification: &mut DatadirModification<'_>,
518 145474 : decoded: &DecodedWALRecord,
519 145474 : ctx: &RequestContext,
520 145474 : ) -> anyhow::Result<()> {
521 145474 : // Handle VM bit updates that are implicitly part of heap records.
522 145474 :
523 145474 : // First, look at the record to determine which VM bits need
524 145474 : // to be cleared. If either of these variables is set, we
525 145474 : // need to clear the corresponding bits in the visibility map.
526 145474 : let mut new_heap_blkno: Option<u32> = None;
527 145474 : let mut old_heap_blkno: Option<u32> = None;
528 145474 : let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS;
529 145474 :
530 145474 : match modification.tline.pg_version {
531 : 14 => {
532 0 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
533 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
534 0 :
535 0 : if info == pg_constants::XLOG_HEAP_INSERT {
536 0 : let xlrec = v14::XlHeapInsert::decode(buf);
537 0 : assert_eq!(0, buf.remaining());
538 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
539 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
540 0 : }
541 0 : } else if info == pg_constants::XLOG_HEAP_DELETE {
542 0 : let xlrec = v14::XlHeapDelete::decode(buf);
543 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
544 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
545 0 : }
546 0 : } else if info == pg_constants::XLOG_HEAP_UPDATE
547 0 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
548 : {
549 0 : let xlrec = v14::XlHeapUpdate::decode(buf);
550 0 : // the size of tuple data is inferred from the size of the record.
551 0 : // we can't validate the remaining number of bytes without parsing
552 0 : // the tuple data.
553 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
554 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
555 0 : }
556 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
557 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
558 0 : // non-HOT update where the new tuple goes to different page than
559 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
560 0 : // set.
561 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
562 0 : }
563 0 : } else if info == pg_constants::XLOG_HEAP_LOCK {
564 0 : let xlrec = v14::XlHeapLock::decode(buf);
565 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
566 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
567 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
568 0 : }
569 0 : }
570 0 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
571 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
572 0 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
573 0 : let xlrec = v14::XlHeapMultiInsert::decode(buf);
574 :
575 0 : let offset_array_len =
576 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
577 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
578 0 : 0
579 : } else {
580 0 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
581 : };
582 0 : assert_eq!(offset_array_len, buf.remaining());
583 :
584 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
585 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
586 0 : }
587 0 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
588 0 : let xlrec = v14::XlHeapLockUpdated::decode(buf);
589 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
590 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
591 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
592 0 : }
593 0 : }
594 : } else {
595 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
596 : }
597 : }
598 : 15 => {
599 145474 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
600 145286 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
601 145286 :
602 145286 : if info == pg_constants::XLOG_HEAP_INSERT {
603 145276 : let xlrec = v15::XlHeapInsert::decode(buf);
604 145276 : assert_eq!(0, buf.remaining());
605 145276 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
606 4 : new_heap_blkno = Some(decoded.blocks[0].blkno);
607 145272 : }
608 10 : } else if info == pg_constants::XLOG_HEAP_DELETE {
609 0 : let xlrec = v15::XlHeapDelete::decode(buf);
610 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
611 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
612 0 : }
613 10 : } else if info == pg_constants::XLOG_HEAP_UPDATE
614 2 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
615 : {
616 8 : let xlrec = v15::XlHeapUpdate::decode(buf);
617 8 : // the size of tuple data is inferred from the size of the record.
618 8 : // we can't validate the remaining number of bytes without parsing
619 8 : // the tuple data.
620 8 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
621 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
622 8 : }
623 8 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
624 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
625 0 : // non-HOT update where the new tuple goes to different page than
626 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
627 0 : // set.
628 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
629 8 : }
630 2 : } else if info == pg_constants::XLOG_HEAP_LOCK {
631 0 : let xlrec = v15::XlHeapLock::decode(buf);
632 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
633 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
634 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
635 0 : }
636 2 : }
637 188 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
638 188 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
639 188 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
640 42 : let xlrec = v15::XlHeapMultiInsert::decode(buf);
641 :
642 42 : let offset_array_len =
643 42 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
644 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
645 2 : 0
646 : } else {
647 40 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
648 : };
649 42 : assert_eq!(offset_array_len, buf.remaining());
650 :
651 42 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
652 8 : new_heap_blkno = Some(decoded.blocks[0].blkno);
653 34 : }
654 146 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
655 0 : let xlrec = v15::XlHeapLockUpdated::decode(buf);
656 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
657 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
658 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
659 0 : }
660 146 : }
661 : } else {
662 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
663 : }
664 : }
665 : 16 => {
666 0 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
667 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
668 0 :
669 0 : if info == pg_constants::XLOG_HEAP_INSERT {
670 0 : let xlrec = v16::XlHeapInsert::decode(buf);
671 0 : assert_eq!(0, buf.remaining());
672 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
673 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
674 0 : }
675 0 : } else if info == pg_constants::XLOG_HEAP_DELETE {
676 0 : let xlrec = v16::XlHeapDelete::decode(buf);
677 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
678 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
679 0 : }
680 0 : } else if info == pg_constants::XLOG_HEAP_UPDATE
681 0 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
682 : {
683 0 : let xlrec = v16::XlHeapUpdate::decode(buf);
684 0 : // the size of tuple data is inferred from the size of the record.
685 0 : // we can't validate the remaining number of bytes without parsing
686 0 : // the tuple data.
687 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
688 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
689 0 : }
690 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
691 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
692 0 : // non-HOT update where the new tuple goes to different page than
693 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
694 0 : // set.
695 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
696 0 : }
697 0 : } else if info == pg_constants::XLOG_HEAP_LOCK {
698 0 : let xlrec = v16::XlHeapLock::decode(buf);
699 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
700 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
701 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
702 0 : }
703 0 : }
704 0 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
705 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
706 0 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
707 0 : let xlrec = v16::XlHeapMultiInsert::decode(buf);
708 :
709 0 : let offset_array_len =
710 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
711 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
712 0 : 0
713 : } else {
714 0 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
715 : };
716 0 : assert_eq!(offset_array_len, buf.remaining());
717 :
718 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
719 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
720 0 : }
721 0 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
722 0 : let xlrec = v16::XlHeapLockUpdated::decode(buf);
723 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
724 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
725 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
726 0 : }
727 0 : }
728 : } else {
729 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
730 : }
731 : }
732 0 : _ => {}
733 : }
734 :
735 : // Clear the VM bits if required.
736 145474 : if new_heap_blkno.is_some() || old_heap_blkno.is_some() {
737 12 : let vm_rel = RelTag {
738 12 : forknum: VISIBILITYMAP_FORKNUM,
739 12 : spcnode: decoded.blocks[0].rnode_spcnode,
740 12 : dbnode: decoded.blocks[0].rnode_dbnode,
741 12 : relnode: decoded.blocks[0].rnode_relnode,
742 12 : };
743 12 :
744 12 : let mut new_vm_blk = new_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
745 12 : let mut old_vm_blk = old_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
746 :
747 : // Sometimes, Postgres seems to create heap WAL records with the
748 : // ALL_VISIBLE_CLEARED flag set, even though the bit in the VM page is
749 : // not set. In fact, it's possible that the VM page does not exist at all.
750 : // In that case, we don't want to store a record to clear the VM bit;
751 : // replaying it would fail to find the previous image of the page, because
752 : // it doesn't exist. So check if the VM page(s) exist, and skip the WAL
753 : // record if it doesn't.
754 12 : let vm_size = get_relsize(modification, vm_rel, ctx).await?;
755 12 : if let Some(blknum) = new_vm_blk {
756 12 : if blknum >= vm_size {
757 0 : new_vm_blk = None;
758 12 : }
759 0 : }
760 12 : if let Some(blknum) = old_vm_blk {
761 0 : if blknum >= vm_size {
762 0 : old_vm_blk = None;
763 0 : }
764 12 : }
765 :
766 12 : if new_vm_blk.is_some() || old_vm_blk.is_some() {
767 12 : if new_vm_blk == old_vm_blk {
768 : // An UPDATE record that needs to clear the bits for both old and the
769 : // new page, both of which reside on the same VM page.
770 0 : self.put_rel_wal_record(
771 0 : modification,
772 0 : vm_rel,
773 0 : new_vm_blk.unwrap(),
774 0 : NeonWalRecord::ClearVisibilityMapFlags {
775 0 : new_heap_blkno,
776 0 : old_heap_blkno,
777 0 : flags,
778 0 : },
779 0 : ctx,
780 0 : )
781 0 : .await?;
782 : } else {
783 : // Clear VM bits for one heap page, or for two pages that reside on
784 : // different VM pages.
785 12 : if let Some(new_vm_blk) = new_vm_blk {
786 12 : self.put_rel_wal_record(
787 12 : modification,
788 12 : vm_rel,
789 12 : new_vm_blk,
790 12 : NeonWalRecord::ClearVisibilityMapFlags {
791 12 : new_heap_blkno,
792 12 : old_heap_blkno: None,
793 12 : flags,
794 12 : },
795 12 : ctx,
796 12 : )
797 0 : .await?;
798 0 : }
799 12 : if let Some(old_vm_blk) = old_vm_blk {
800 0 : self.put_rel_wal_record(
801 0 : modification,
802 0 : vm_rel,
803 0 : old_vm_blk,
804 0 : NeonWalRecord::ClearVisibilityMapFlags {
805 0 : new_heap_blkno: None,
806 0 : old_heap_blkno,
807 0 : flags,
808 0 : },
809 0 : ctx,
810 0 : )
811 0 : .await?;
812 12 : }
813 : }
814 0 : }
815 145462 : }
816 :
817 145474 : Ok(())
818 145474 : }
819 :
820 0 : async fn ingest_neonrmgr_record(
821 0 : &mut self,
822 0 : buf: &mut Bytes,
823 0 : modification: &mut DatadirModification<'_>,
824 0 : decoded: &DecodedWALRecord,
825 0 : ctx: &RequestContext,
826 0 : ) -> anyhow::Result<()> {
827 0 : // Handle VM bit updates that are implicitly part of heap records.
828 0 :
829 0 : // First, look at the record to determine which VM bits need
830 0 : // to be cleared. If either of these variables is set, we
831 0 : // need to clear the corresponding bits in the visibility map.
832 0 : let mut new_heap_blkno: Option<u32> = None;
833 0 : let mut old_heap_blkno: Option<u32> = None;
834 0 : let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS;
835 0 : let pg_version = modification.tline.pg_version;
836 0 :
837 0 : assert_eq!(decoded.xl_rmid, pg_constants::RM_NEON_ID);
838 :
839 0 : match pg_version {
840 : 16 => {
841 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
842 0 :
843 0 : match info {
844 : pg_constants::XLOG_NEON_HEAP_INSERT => {
845 0 : let xlrec = v16::rm_neon::XlNeonHeapInsert::decode(buf);
846 0 : assert_eq!(0, buf.remaining());
847 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
848 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
849 0 : }
850 : }
851 : pg_constants::XLOG_NEON_HEAP_DELETE => {
852 0 : let xlrec = v16::rm_neon::XlNeonHeapDelete::decode(buf);
853 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
854 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
855 0 : }
856 : }
857 : pg_constants::XLOG_NEON_HEAP_UPDATE
858 : | pg_constants::XLOG_NEON_HEAP_HOT_UPDATE => {
859 0 : let xlrec = v16::rm_neon::XlNeonHeapUpdate::decode(buf);
860 0 : // the size of tuple data is inferred from the size of the record.
861 0 : // we can't validate the remaining number of bytes without parsing
862 0 : // the tuple data.
863 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
864 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
865 0 : }
866 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
867 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
868 0 : // non-HOT update where the new tuple goes to different page than
869 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
870 0 : // set.
871 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
872 0 : }
873 : }
874 : pg_constants::XLOG_NEON_HEAP_MULTI_INSERT => {
875 0 : let xlrec = v16::rm_neon::XlNeonHeapMultiInsert::decode(buf);
876 :
877 0 : let offset_array_len =
878 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
879 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
880 0 : 0
881 : } else {
882 0 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
883 : };
884 0 : assert_eq!(offset_array_len, buf.remaining());
885 :
886 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
887 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
888 0 : }
889 : }
890 : pg_constants::XLOG_NEON_HEAP_LOCK => {
891 0 : let xlrec = v16::rm_neon::XlNeonHeapLock::decode(buf);
892 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
893 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
894 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
895 0 : }
896 : }
897 0 : info => bail!("Unknown WAL record type for Neon RMGR: {}", info),
898 : }
899 : }
900 0 : _ => bail!(
901 0 : "Neon RMGR has no known compatibility with PostgreSQL version {}",
902 0 : pg_version
903 0 : ),
904 : }
905 :
906 : // Clear the VM bits if required.
907 0 : if new_heap_blkno.is_some() || old_heap_blkno.is_some() {
908 0 : let vm_rel = RelTag {
909 0 : forknum: VISIBILITYMAP_FORKNUM,
910 0 : spcnode: decoded.blocks[0].rnode_spcnode,
911 0 : dbnode: decoded.blocks[0].rnode_dbnode,
912 0 : relnode: decoded.blocks[0].rnode_relnode,
913 0 : };
914 0 :
915 0 : let mut new_vm_blk = new_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
916 0 : let mut old_vm_blk = old_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
917 :
918 : // Sometimes, Postgres seems to create heap WAL records with the
919 : // ALL_VISIBLE_CLEARED flag set, even though the bit in the VM page is
920 : // not set. In fact, it's possible that the VM page does not exist at all.
921 : // In that case, we don't want to store a record to clear the VM bit;
922 : // replaying it would fail to find the previous image of the page, because
923 : // it doesn't exist. So check if the VM page(s) exist, and skip the WAL
924 : // record if it doesn't.
925 0 : let vm_size = get_relsize(modification, vm_rel, ctx).await?;
926 0 : if let Some(blknum) = new_vm_blk {
927 0 : if blknum >= vm_size {
928 0 : new_vm_blk = None;
929 0 : }
930 0 : }
931 0 : if let Some(blknum) = old_vm_blk {
932 0 : if blknum >= vm_size {
933 0 : old_vm_blk = None;
934 0 : }
935 0 : }
936 :
937 0 : if new_vm_blk.is_some() || old_vm_blk.is_some() {
938 0 : if new_vm_blk == old_vm_blk {
939 : // An UPDATE record that needs to clear the bits for both old and the
940 : // new page, both of which reside on the same VM page.
941 0 : self.put_rel_wal_record(
942 0 : modification,
943 0 : vm_rel,
944 0 : new_vm_blk.unwrap(),
945 0 : NeonWalRecord::ClearVisibilityMapFlags {
946 0 : new_heap_blkno,
947 0 : old_heap_blkno,
948 0 : flags,
949 0 : },
950 0 : ctx,
951 0 : )
952 0 : .await?;
953 : } else {
954 : // Clear VM bits for one heap page, or for two pages that reside on
955 : // different VM pages.
956 0 : if let Some(new_vm_blk) = new_vm_blk {
957 0 : self.put_rel_wal_record(
958 0 : modification,
959 0 : vm_rel,
960 0 : new_vm_blk,
961 0 : NeonWalRecord::ClearVisibilityMapFlags {
962 0 : new_heap_blkno,
963 0 : old_heap_blkno: None,
964 0 : flags,
965 0 : },
966 0 : ctx,
967 0 : )
968 0 : .await?;
969 0 : }
970 0 : if let Some(old_vm_blk) = old_vm_blk {
971 0 : self.put_rel_wal_record(
972 0 : modification,
973 0 : vm_rel,
974 0 : old_vm_blk,
975 0 : NeonWalRecord::ClearVisibilityMapFlags {
976 0 : new_heap_blkno: None,
977 0 : old_heap_blkno,
978 0 : flags,
979 0 : },
980 0 : ctx,
981 0 : )
982 0 : .await?;
983 0 : }
984 : }
985 0 : }
986 0 : }
987 :
988 0 : Ok(())
989 0 : }
990 :
991 : /// Subroutine of ingest_record(), to handle an XLOG_DBASE_CREATE record.
992 0 : async fn ingest_xlog_dbase_create(
993 0 : &mut self,
994 0 : modification: &mut DatadirModification<'_>,
995 0 : rec: &XlCreateDatabase,
996 0 : ctx: &RequestContext,
997 0 : ) -> anyhow::Result<()> {
998 0 : let db_id = rec.db_id;
999 0 : let tablespace_id = rec.tablespace_id;
1000 0 : let src_db_id = rec.src_db_id;
1001 0 : let src_tablespace_id = rec.src_tablespace_id;
1002 :
1003 0 : let rels = modification
1004 0 : .tline
1005 0 : .list_rels(
1006 0 : src_tablespace_id,
1007 0 : src_db_id,
1008 0 : Version::Modified(modification),
1009 0 : ctx,
1010 0 : )
1011 0 : .await?;
1012 :
1013 0 : debug!("ingest_xlog_dbase_create: {} rels", rels.len());
1014 :
1015 : // Copy relfilemap
1016 0 : let filemap = modification
1017 0 : .tline
1018 0 : .get_relmap_file(
1019 0 : src_tablespace_id,
1020 0 : src_db_id,
1021 0 : Version::Modified(modification),
1022 0 : ctx,
1023 0 : )
1024 0 : .await?;
1025 0 : modification
1026 0 : .put_relmap_file(tablespace_id, db_id, filemap, ctx)
1027 0 : .await?;
1028 :
1029 0 : let mut num_rels_copied = 0;
1030 0 : let mut num_blocks_copied = 0;
1031 0 : for src_rel in rels {
1032 0 : assert_eq!(src_rel.spcnode, src_tablespace_id);
1033 0 : assert_eq!(src_rel.dbnode, src_db_id);
1034 :
1035 0 : let nblocks = modification
1036 0 : .tline
1037 0 : .get_rel_size(src_rel, Version::Modified(modification), true, ctx)
1038 0 : .await?;
1039 0 : let dst_rel = RelTag {
1040 0 : spcnode: tablespace_id,
1041 0 : dbnode: db_id,
1042 0 : relnode: src_rel.relnode,
1043 0 : forknum: src_rel.forknum,
1044 0 : };
1045 0 :
1046 0 : modification.put_rel_creation(dst_rel, nblocks, ctx).await?;
1047 :
1048 : // Copy content
1049 0 : debug!("copying rel {} to {}, {} blocks", src_rel, dst_rel, nblocks);
1050 0 : for blknum in 0..nblocks {
1051 : // Sharding:
1052 : // - src and dst are always on the same shard, because they differ only by dbNode, and
1053 : // dbNode is not included in the hash inputs for sharding.
1054 : // - This WAL command is replayed on all shards, but each shard only copies the blocks
1055 : // that belong to it.
1056 0 : let src_key = rel_block_to_key(src_rel, blknum);
1057 0 : if !self.shard.is_key_local(&src_key) {
1058 0 : debug!(
1059 0 : "Skipping non-local key {} during XLOG_DBASE_CREATE",
1060 0 : src_key
1061 0 : );
1062 0 : continue;
1063 0 : }
1064 0 : debug!(
1065 0 : "copying block {} from {} ({}) to {}",
1066 0 : blknum, src_rel, src_key, dst_rel
1067 0 : );
1068 :
1069 0 : let content = modification
1070 0 : .tline
1071 0 : .get_rel_page_at_lsn(
1072 0 : src_rel,
1073 0 : blknum,
1074 0 : Version::Modified(modification),
1075 0 : true,
1076 0 : ctx,
1077 0 : )
1078 0 : .await?;
1079 0 : modification.put_rel_page_image(dst_rel, blknum, content)?;
1080 0 : num_blocks_copied += 1;
1081 : }
1082 :
1083 0 : num_rels_copied += 1;
1084 : }
1085 :
1086 0 : info!(
1087 0 : "Created database {}/{}, copied {} blocks in {} rels",
1088 0 : tablespace_id, db_id, num_blocks_copied, num_rels_copied
1089 0 : );
1090 0 : Ok(())
1091 0 : }
1092 :
1093 16 : async fn ingest_xlog_smgr_create(
1094 16 : &mut self,
1095 16 : modification: &mut DatadirModification<'_>,
1096 16 : rec: &XlSmgrCreate,
1097 16 : ctx: &RequestContext,
1098 16 : ) -> anyhow::Result<()> {
1099 16 : let rel = RelTag {
1100 16 : spcnode: rec.rnode.spcnode,
1101 16 : dbnode: rec.rnode.dbnode,
1102 16 : relnode: rec.rnode.relnode,
1103 16 : forknum: rec.forknum,
1104 16 : };
1105 16 : self.put_rel_creation(modification, rel, ctx).await?;
1106 16 : Ok(())
1107 16 : }
1108 :
1109 : /// Subroutine of ingest_record(), to handle an XLOG_SMGR_TRUNCATE record.
1110 : ///
1111 : /// This is the same logic as in PostgreSQL's smgr_redo() function.
1112 0 : async fn ingest_xlog_smgr_truncate(
1113 0 : &mut self,
1114 0 : modification: &mut DatadirModification<'_>,
1115 0 : rec: &XlSmgrTruncate,
1116 0 : ctx: &RequestContext,
1117 0 : ) -> anyhow::Result<()> {
1118 0 : let spcnode = rec.rnode.spcnode;
1119 0 : let dbnode = rec.rnode.dbnode;
1120 0 : let relnode = rec.rnode.relnode;
1121 0 :
1122 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_HEAP) != 0 {
1123 0 : let rel = RelTag {
1124 0 : spcnode,
1125 0 : dbnode,
1126 0 : relnode,
1127 0 : forknum: MAIN_FORKNUM,
1128 0 : };
1129 0 : self.put_rel_truncation(modification, rel, rec.blkno, ctx)
1130 0 : .await?;
1131 0 : }
1132 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_FSM) != 0 {
1133 0 : let rel = RelTag {
1134 0 : spcnode,
1135 0 : dbnode,
1136 0 : relnode,
1137 0 : forknum: FSM_FORKNUM,
1138 0 : };
1139 0 :
1140 0 : let fsm_logical_page_no = rec.blkno / pg_constants::SLOTS_PER_FSM_PAGE;
1141 0 : let mut fsm_physical_page_no = fsm_logical_to_physical(fsm_logical_page_no);
1142 0 : if rec.blkno % pg_constants::SLOTS_PER_FSM_PAGE != 0 {
1143 : // Tail of last remaining FSM page has to be zeroed.
1144 : // We are not precise here and instead of digging in FSM bitmap format just clear the whole page.
1145 0 : modification.put_rel_page_image(rel, fsm_physical_page_no, ZERO_PAGE.clone())?;
1146 0 : fsm_physical_page_no += 1;
1147 0 : }
1148 0 : let nblocks = get_relsize(modification, rel, ctx).await?;
1149 0 : if nblocks > fsm_physical_page_no {
1150 : // check if something to do: FSM is larger than truncate position
1151 0 : self.put_rel_truncation(modification, rel, fsm_physical_page_no, ctx)
1152 0 : .await?;
1153 0 : }
1154 0 : }
1155 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_VM) != 0 {
1156 0 : let rel = RelTag {
1157 0 : spcnode,
1158 0 : dbnode,
1159 0 : relnode,
1160 0 : forknum: VISIBILITYMAP_FORKNUM,
1161 0 : };
1162 0 :
1163 0 : let mut vm_page_no = rec.blkno / pg_constants::VM_HEAPBLOCKS_PER_PAGE;
1164 0 : if rec.blkno % pg_constants::VM_HEAPBLOCKS_PER_PAGE != 0 {
1165 : // Tail of last remaining vm page has to be zeroed.
1166 : // We are not precise here and instead of digging in VM bitmap format just clear the whole page.
1167 0 : modification.put_rel_page_image(rel, vm_page_no, ZERO_PAGE.clone())?;
1168 0 : vm_page_no += 1;
1169 0 : }
1170 0 : let nblocks = get_relsize(modification, rel, ctx).await?;
1171 0 : if nblocks > vm_page_no {
1172 : // check if something to do: VM is larger than truncate position
1173 0 : self.put_rel_truncation(modification, rel, vm_page_no, ctx)
1174 0 : .await?;
1175 0 : }
1176 0 : }
1177 0 : Ok(())
1178 0 : }
1179 :
1180 : /// Subroutine of ingest_record(), to handle an XLOG_XACT_* records.
1181 : ///
1182 8 : async fn ingest_xact_record(
1183 8 : &mut self,
1184 8 : modification: &mut DatadirModification<'_>,
1185 8 : parsed: &XlXactParsedRecord,
1186 8 : is_commit: bool,
1187 8 : ctx: &RequestContext,
1188 8 : ) -> anyhow::Result<()> {
1189 8 : // Record update of CLOG pages
1190 8 : let mut pageno = parsed.xid / pg_constants::CLOG_XACTS_PER_PAGE;
1191 8 : let mut segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1192 8 : let mut rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1193 8 : let mut page_xids: Vec<TransactionId> = vec![parsed.xid];
1194 :
1195 8 : for subxact in &parsed.subxacts {
1196 0 : let subxact_pageno = subxact / pg_constants::CLOG_XACTS_PER_PAGE;
1197 0 : if subxact_pageno != pageno {
1198 : // This subxact goes to different page. Write the record
1199 : // for all the XIDs on the previous page, and continue
1200 : // accumulating XIDs on this new page.
1201 0 : modification.put_slru_wal_record(
1202 0 : SlruKind::Clog,
1203 0 : segno,
1204 0 : rpageno,
1205 0 : if is_commit {
1206 0 : NeonWalRecord::ClogSetCommitted {
1207 0 : xids: page_xids,
1208 0 : timestamp: parsed.xact_time,
1209 0 : }
1210 : } else {
1211 0 : NeonWalRecord::ClogSetAborted { xids: page_xids }
1212 : },
1213 0 : )?;
1214 0 : page_xids = Vec::new();
1215 0 : }
1216 0 : pageno = subxact_pageno;
1217 0 : segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1218 0 : rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1219 0 : page_xids.push(*subxact);
1220 : }
1221 8 : modification.put_slru_wal_record(
1222 8 : SlruKind::Clog,
1223 8 : segno,
1224 8 : rpageno,
1225 8 : if is_commit {
1226 8 : NeonWalRecord::ClogSetCommitted {
1227 8 : xids: page_xids,
1228 8 : timestamp: parsed.xact_time,
1229 8 : }
1230 : } else {
1231 0 : NeonWalRecord::ClogSetAborted { xids: page_xids }
1232 : },
1233 0 : )?;
1234 :
1235 8 : for xnode in &parsed.xnodes {
1236 0 : for forknum in MAIN_FORKNUM..=INIT_FORKNUM {
1237 0 : let rel = RelTag {
1238 0 : forknum,
1239 0 : spcnode: xnode.spcnode,
1240 0 : dbnode: xnode.dbnode,
1241 0 : relnode: xnode.relnode,
1242 0 : };
1243 0 : if modification
1244 0 : .tline
1245 0 : .get_rel_exists(rel, Version::Modified(modification), true, ctx)
1246 0 : .await?
1247 : {
1248 0 : self.put_rel_drop(modification, rel, ctx).await?;
1249 0 : }
1250 : }
1251 : }
1252 8 : Ok(())
1253 8 : }
1254 :
1255 0 : async fn ingest_clog_truncate_record(
1256 0 : &mut self,
1257 0 : modification: &mut DatadirModification<'_>,
1258 0 : xlrec: &XlClogTruncate,
1259 0 : ctx: &RequestContext,
1260 0 : ) -> anyhow::Result<()> {
1261 0 : info!(
1262 0 : "RM_CLOG_ID truncate pageno {} oldestXid {} oldestXidDB {}",
1263 0 : xlrec.pageno, xlrec.oldest_xid, xlrec.oldest_xid_db
1264 0 : );
1265 :
1266 : // Here we treat oldestXid and oldestXidDB
1267 : // differently from postgres redo routines.
1268 : // In postgres checkpoint.oldestXid lags behind xlrec.oldest_xid
1269 : // until checkpoint happens and updates the value.
1270 : // Here we can use the most recent value.
1271 : // It's just an optimization, though and can be deleted.
1272 : // TODO Figure out if there will be any issues with replica.
1273 0 : self.checkpoint.oldestXid = xlrec.oldest_xid;
1274 0 : self.checkpoint.oldestXidDB = xlrec.oldest_xid_db;
1275 0 : self.checkpoint_modified = true;
1276 0 :
1277 0 : // TODO Treat AdvanceOldestClogXid() or write a comment why we don't need it
1278 0 :
1279 0 : let latest_page_number =
1280 0 : self.checkpoint.nextXid.value as u32 / pg_constants::CLOG_XACTS_PER_PAGE;
1281 0 :
1282 0 : // Now delete all segments containing pages between xlrec.pageno
1283 0 : // and latest_page_number.
1284 0 :
1285 0 : // First, make an important safety check:
1286 0 : // the current endpoint page must not be eligible for removal.
1287 0 : // See SimpleLruTruncate() in slru.c
1288 0 : if clogpage_precedes(latest_page_number, xlrec.pageno) {
1289 0 : info!("could not truncate directory pg_xact apparent wraparound");
1290 0 : return Ok(());
1291 0 : }
1292 :
1293 : // Iterate via SLRU CLOG segments and drop segments that we're ready to truncate
1294 : //
1295 : // We cannot pass 'lsn' to the Timeline.list_nonrels(), or it
1296 : // will block waiting for the last valid LSN to advance up to
1297 : // it. So we use the previous record's LSN in the get calls
1298 : // instead.
1299 0 : for segno in modification
1300 0 : .tline
1301 0 : .list_slru_segments(SlruKind::Clog, Version::Modified(modification), ctx)
1302 0 : .await?
1303 : {
1304 0 : let segpage = segno * pg_constants::SLRU_PAGES_PER_SEGMENT;
1305 0 : if slru_may_delete_clogsegment(segpage, xlrec.pageno) {
1306 0 : modification
1307 0 : .drop_slru_segment(SlruKind::Clog, segno, ctx)
1308 0 : .await?;
1309 0 : trace!("Drop CLOG segment {:>04X}", segno);
1310 0 : }
1311 : }
1312 :
1313 0 : Ok(())
1314 0 : }
1315 :
1316 0 : fn ingest_multixact_create_record(
1317 0 : &mut self,
1318 0 : modification: &mut DatadirModification,
1319 0 : xlrec: &XlMultiXactCreate,
1320 0 : ) -> Result<()> {
1321 0 : // Create WAL record for updating the multixact-offsets page
1322 0 : let pageno = xlrec.mid / pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32;
1323 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1324 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1325 0 :
1326 0 : modification.put_slru_wal_record(
1327 0 : SlruKind::MultiXactOffsets,
1328 0 : segno,
1329 0 : rpageno,
1330 0 : NeonWalRecord::MultixactOffsetCreate {
1331 0 : mid: xlrec.mid,
1332 0 : moff: xlrec.moff,
1333 0 : },
1334 0 : )?;
1335 :
1336 : // Create WAL records for the update of each affected multixact-members page
1337 0 : let mut members = xlrec.members.iter();
1338 0 : let mut offset = xlrec.moff;
1339 : loop {
1340 0 : let pageno = offset / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
1341 0 :
1342 0 : // How many members fit on this page?
1343 0 : let page_remain = pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32
1344 0 : - offset % pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
1345 0 :
1346 0 : let mut this_page_members: Vec<MultiXactMember> = Vec::new();
1347 0 : for _ in 0..page_remain {
1348 0 : if let Some(m) = members.next() {
1349 0 : this_page_members.push(m.clone());
1350 0 : } else {
1351 0 : break;
1352 : }
1353 : }
1354 0 : if this_page_members.is_empty() {
1355 : // all done
1356 0 : break;
1357 0 : }
1358 0 : let n_this_page = this_page_members.len();
1359 0 :
1360 0 : modification.put_slru_wal_record(
1361 0 : SlruKind::MultiXactMembers,
1362 0 : pageno / pg_constants::SLRU_PAGES_PER_SEGMENT,
1363 0 : pageno % pg_constants::SLRU_PAGES_PER_SEGMENT,
1364 0 : NeonWalRecord::MultixactMembersCreate {
1365 0 : moff: offset,
1366 0 : members: this_page_members,
1367 0 : },
1368 0 : )?;
1369 :
1370 : // Note: The multixact members can wrap around, even within one WAL record.
1371 0 : offset = offset.wrapping_add(n_this_page as u32);
1372 : }
1373 0 : if xlrec.mid >= self.checkpoint.nextMulti {
1374 0 : self.checkpoint.nextMulti = xlrec.mid + 1;
1375 0 : self.checkpoint_modified = true;
1376 0 : }
1377 0 : if xlrec.moff + xlrec.nmembers > self.checkpoint.nextMultiOffset {
1378 0 : self.checkpoint.nextMultiOffset = xlrec.moff + xlrec.nmembers;
1379 0 : self.checkpoint_modified = true;
1380 0 : }
1381 0 : let max_mbr_xid = xlrec.members.iter().fold(None, |acc, mbr| {
1382 0 : if let Some(max_xid) = acc {
1383 0 : if mbr.xid.wrapping_sub(max_xid) as i32 > 0 {
1384 0 : Some(mbr.xid)
1385 : } else {
1386 0 : acc
1387 : }
1388 : } else {
1389 0 : Some(mbr.xid)
1390 : }
1391 0 : });
1392 :
1393 0 : if let Some(max_xid) = max_mbr_xid {
1394 0 : if self.checkpoint.update_next_xid(max_xid) {
1395 0 : self.checkpoint_modified = true;
1396 0 : }
1397 0 : }
1398 0 : Ok(())
1399 0 : }
1400 :
1401 0 : async fn ingest_multixact_truncate_record(
1402 0 : &mut self,
1403 0 : modification: &mut DatadirModification<'_>,
1404 0 : xlrec: &XlMultiXactTruncate,
1405 0 : ctx: &RequestContext,
1406 0 : ) -> Result<()> {
1407 0 : self.checkpoint.oldestMulti = xlrec.end_trunc_off;
1408 0 : self.checkpoint.oldestMultiDB = xlrec.oldest_multi_db;
1409 0 : self.checkpoint_modified = true;
1410 0 :
1411 0 : // PerformMembersTruncation
1412 0 : let maxsegment: i32 = mx_offset_to_member_segment(pg_constants::MAX_MULTIXACT_OFFSET);
1413 0 : let startsegment: i32 = mx_offset_to_member_segment(xlrec.start_trunc_memb);
1414 0 : let endsegment: i32 = mx_offset_to_member_segment(xlrec.end_trunc_memb);
1415 0 : let mut segment: i32 = startsegment;
1416 :
1417 : // Delete all the segments except the last one. The last segment can still
1418 : // contain, possibly partially, valid data.
1419 0 : while segment != endsegment {
1420 0 : modification
1421 0 : .drop_slru_segment(SlruKind::MultiXactMembers, segment as u32, ctx)
1422 0 : .await?;
1423 :
1424 : /* move to next segment, handling wraparound correctly */
1425 0 : if segment == maxsegment {
1426 0 : segment = 0;
1427 0 : } else {
1428 0 : segment += 1;
1429 0 : }
1430 : }
1431 :
1432 : // Truncate offsets
1433 : // FIXME: this did not handle wraparound correctly
1434 :
1435 0 : Ok(())
1436 0 : }
1437 :
1438 0 : async fn ingest_relmap_page(
1439 0 : &mut self,
1440 0 : modification: &mut DatadirModification<'_>,
1441 0 : xlrec: &XlRelmapUpdate,
1442 0 : decoded: &DecodedWALRecord,
1443 0 : ctx: &RequestContext,
1444 0 : ) -> Result<()> {
1445 0 : let mut buf = decoded.record.clone();
1446 0 : buf.advance(decoded.main_data_offset);
1447 0 : // skip xl_relmap_update
1448 0 : buf.advance(12);
1449 0 :
1450 0 : modification
1451 0 : .put_relmap_file(
1452 0 : xlrec.tsid,
1453 0 : xlrec.dbid,
1454 0 : Bytes::copy_from_slice(&buf[..]),
1455 0 : ctx,
1456 0 : )
1457 0 : .await
1458 0 : }
1459 :
1460 18 : async fn put_rel_creation(
1461 18 : &mut self,
1462 18 : modification: &mut DatadirModification<'_>,
1463 18 : rel: RelTag,
1464 18 : ctx: &RequestContext,
1465 18 : ) -> Result<()> {
1466 18 : modification.put_rel_creation(rel, 0, ctx).await?;
1467 18 : Ok(())
1468 18 : }
1469 :
1470 272426 : async fn put_rel_page_image(
1471 272426 : &mut self,
1472 272426 : modification: &mut DatadirModification<'_>,
1473 272426 : rel: RelTag,
1474 272426 : blknum: BlockNumber,
1475 272426 : img: Bytes,
1476 272426 : ctx: &RequestContext,
1477 272426 : ) -> Result<(), PageReconstructError> {
1478 272426 : self.handle_rel_extend(modification, rel, blknum, ctx)
1479 3079 : .await?;
1480 272426 : modification.put_rel_page_image(rel, blknum, img)?;
1481 272426 : Ok(())
1482 272426 : }
1483 :
1484 145630 : async fn put_rel_wal_record(
1485 145630 : &mut self,
1486 145630 : modification: &mut DatadirModification<'_>,
1487 145630 : rel: RelTag,
1488 145630 : blknum: BlockNumber,
1489 145630 : rec: NeonWalRecord,
1490 145630 : ctx: &RequestContext,
1491 145630 : ) -> Result<()> {
1492 145630 : self.handle_rel_extend(modification, rel, blknum, ctx)
1493 84 : .await?;
1494 145630 : modification.put_rel_wal_record(rel, blknum, rec)?;
1495 145630 : Ok(())
1496 145630 : }
1497 :
1498 6012 : async fn put_rel_truncation(
1499 6012 : &mut self,
1500 6012 : modification: &mut DatadirModification<'_>,
1501 6012 : rel: RelTag,
1502 6012 : nblocks: BlockNumber,
1503 6012 : ctx: &RequestContext,
1504 6012 : ) -> anyhow::Result<()> {
1505 6012 : modification.put_rel_truncation(rel, nblocks, ctx).await?;
1506 6012 : Ok(())
1507 6012 : }
1508 :
1509 2 : async fn put_rel_drop(
1510 2 : &mut self,
1511 2 : modification: &mut DatadirModification<'_>,
1512 2 : rel: RelTag,
1513 2 : ctx: &RequestContext,
1514 2 : ) -> Result<()> {
1515 2 : modification.put_rel_drop(rel, ctx).await?;
1516 2 : Ok(())
1517 2 : }
1518 :
1519 418056 : async fn handle_rel_extend(
1520 418056 : &mut self,
1521 418056 : modification: &mut DatadirModification<'_>,
1522 418056 : rel: RelTag,
1523 418056 : blknum: BlockNumber,
1524 418056 : ctx: &RequestContext,
1525 418056 : ) -> Result<(), PageReconstructError> {
1526 418056 : let new_nblocks = blknum + 1;
1527 : // Check if the relation exists. We implicitly create relations on first
1528 : // record.
1529 : // TODO: would be nice if to be more explicit about it
1530 :
1531 : // Get current size and put rel creation if rel doesn't exist
1532 : //
1533 : // NOTE: we check the cache first even though get_rel_exists and get_rel_size would
1534 : // check the cache too. This is because eagerly checking the cache results in
1535 : // less work overall and 10% better performance. It's more work on cache miss
1536 : // but cache miss is rare.
1537 418056 : let old_nblocks = if let Some(nblocks) = modification
1538 418056 : .tline
1539 418056 : .get_cached_rel_size(&rel, modification.get_lsn())
1540 : {
1541 418046 : nblocks
1542 10 : } else if !modification
1543 10 : .tline
1544 10 : .get_rel_exists(rel, Version::Modified(modification), true, ctx)
1545 0 : .await?
1546 : {
1547 : // create it with 0 size initially, the logic below will extend it
1548 10 : modification
1549 10 : .put_rel_creation(rel, 0, ctx)
1550 0 : .await
1551 10 : .context("Relation Error")?;
1552 10 : 0
1553 : } else {
1554 0 : modification
1555 0 : .tline
1556 0 : .get_rel_size(rel, Version::Modified(modification), true, ctx)
1557 0 : .await?
1558 : };
1559 :
1560 418056 : if new_nblocks > old_nblocks {
1561 : //info!("extending {} {} to {}", rel, old_nblocks, new_nblocks);
1562 274788 : modification.put_rel_extend(rel, new_nblocks, ctx).await?;
1563 :
1564 274788 : let mut key = rel_block_to_key(rel, blknum);
1565 : // fill the gap with zeros
1566 274788 : for gap_blknum in old_nblocks..blknum {
1567 2998 : key.field6 = gap_blknum;
1568 2998 :
1569 2998 : if self.shard.get_shard_number(&key) != self.shard.number {
1570 0 : continue;
1571 2998 : }
1572 2998 :
1573 2998 : modification.put_rel_page_image(rel, gap_blknum, ZERO_PAGE.clone())?;
1574 : }
1575 143268 : }
1576 418056 : Ok(())
1577 418056 : }
1578 :
1579 0 : async fn put_slru_page_image(
1580 0 : &mut self,
1581 0 : modification: &mut DatadirModification<'_>,
1582 0 : kind: SlruKind,
1583 0 : segno: u32,
1584 0 : blknum: BlockNumber,
1585 0 : img: Bytes,
1586 0 : ctx: &RequestContext,
1587 0 : ) -> Result<()> {
1588 0 : self.handle_slru_extend(modification, kind, segno, blknum, ctx)
1589 0 : .await?;
1590 0 : modification.put_slru_page_image(kind, segno, blknum, img)?;
1591 0 : Ok(())
1592 0 : }
1593 :
1594 0 : async fn handle_slru_extend(
1595 0 : &mut self,
1596 0 : modification: &mut DatadirModification<'_>,
1597 0 : kind: SlruKind,
1598 0 : segno: u32,
1599 0 : blknum: BlockNumber,
1600 0 : ctx: &RequestContext,
1601 0 : ) -> anyhow::Result<()> {
1602 0 : // we don't use a cache for this like we do for relations. SLRUS are explcitly
1603 0 : // extended with ZEROPAGE records, not with commit records, so it happens
1604 0 : // a lot less frequently.
1605 0 :
1606 0 : let new_nblocks = blknum + 1;
1607 : // Check if the relation exists. We implicitly create relations on first
1608 : // record.
1609 : // TODO: would be nice if to be more explicit about it
1610 0 : let old_nblocks = if !modification
1611 0 : .tline
1612 0 : .get_slru_segment_exists(kind, segno, Version::Modified(modification), ctx)
1613 0 : .await?
1614 : {
1615 : // create it with 0 size initially, the logic below will extend it
1616 0 : modification
1617 0 : .put_slru_segment_creation(kind, segno, 0, ctx)
1618 0 : .await?;
1619 0 : 0
1620 : } else {
1621 0 : modification
1622 0 : .tline
1623 0 : .get_slru_segment_size(kind, segno, Version::Modified(modification), ctx)
1624 0 : .await?
1625 : };
1626 :
1627 0 : if new_nblocks > old_nblocks {
1628 0 : trace!(
1629 0 : "extending SLRU {:?} seg {} from {} to {} blocks",
1630 0 : kind,
1631 0 : segno,
1632 0 : old_nblocks,
1633 0 : new_nblocks
1634 0 : );
1635 0 : modification.put_slru_extend(kind, segno, new_nblocks)?;
1636 :
1637 : // fill the gap with zeros
1638 0 : for gap_blknum in old_nblocks..blknum {
1639 0 : modification.put_slru_page_image(kind, segno, gap_blknum, ZERO_PAGE.clone())?;
1640 : }
1641 0 : }
1642 0 : Ok(())
1643 0 : }
1644 : }
1645 :
1646 12 : async fn get_relsize(
1647 12 : modification: &DatadirModification<'_>,
1648 12 : rel: RelTag,
1649 12 : ctx: &RequestContext,
1650 12 : ) -> anyhow::Result<BlockNumber> {
1651 12 : let nblocks = if !modification
1652 12 : .tline
1653 12 : .get_rel_exists(rel, Version::Modified(modification), true, ctx)
1654 0 : .await?
1655 : {
1656 0 : 0
1657 : } else {
1658 12 : modification
1659 12 : .tline
1660 12 : .get_rel_size(rel, Version::Modified(modification), true, ctx)
1661 0 : .await?
1662 : };
1663 12 : Ok(nblocks)
1664 12 : }
1665 :
1666 : #[allow(clippy::bool_assert_comparison)]
1667 : #[cfg(test)]
1668 : mod tests {
1669 : use super::*;
1670 : use crate::tenant::harness::*;
1671 : use crate::tenant::remote_timeline_client::{remote_initdb_archive_path, INITDB_PATH};
1672 : use postgres_ffi::RELSEG_SIZE;
1673 :
1674 : use crate::DEFAULT_PG_VERSION;
1675 :
1676 : /// Arbitrary relation tag, for testing.
1677 : const TESTREL_A: RelTag = RelTag {
1678 : spcnode: 0,
1679 : dbnode: 111,
1680 : relnode: 1000,
1681 : forknum: 0,
1682 : };
1683 :
1684 12 : fn assert_current_logical_size(_timeline: &Timeline, _lsn: Lsn) {
1685 12 : // TODO
1686 12 : }
1687 :
1688 : static ZERO_CHECKPOINT: Bytes = Bytes::from_static(&[0u8; SIZEOF_CHECKPOINT]);
1689 :
1690 8 : async fn init_walingest_test(tline: &Timeline, ctx: &RequestContext) -> Result<WalIngest> {
1691 8 : let mut m = tline.begin_modification(Lsn(0x10));
1692 8 : m.put_checkpoint(ZERO_CHECKPOINT.clone())?;
1693 16 : m.put_relmap_file(0, 111, Bytes::from(""), ctx).await?; // dummy relmapper file
1694 8 : m.commit(ctx).await?;
1695 8 : let walingest = WalIngest::new(tline, Lsn(0x10), ctx).await?;
1696 :
1697 8 : Ok(walingest)
1698 8 : }
1699 :
1700 : #[tokio::test]
1701 2 : async fn test_relsize() -> Result<()> {
1702 2 : let (tenant, ctx) = TenantHarness::create("test_relsize")?.load().await;
1703 2 : let tline = tenant
1704 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
1705 6 : .await?;
1706 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
1707 2 :
1708 2 : let mut m = tline.begin_modification(Lsn(0x20));
1709 2 : walingest.put_rel_creation(&mut m, TESTREL_A, &ctx).await?;
1710 2 : walingest
1711 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 2"), &ctx)
1712 2 : .await?;
1713 2 : m.commit(&ctx).await?;
1714 2 : let mut m = tline.begin_modification(Lsn(0x30));
1715 2 : walingest
1716 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 3"), &ctx)
1717 2 : .await?;
1718 2 : m.commit(&ctx).await?;
1719 2 : let mut m = tline.begin_modification(Lsn(0x40));
1720 2 : walingest
1721 2 : .put_rel_page_image(&mut m, TESTREL_A, 1, test_img("foo blk 1 at 4"), &ctx)
1722 2 : .await?;
1723 2 : m.commit(&ctx).await?;
1724 2 : let mut m = tline.begin_modification(Lsn(0x50));
1725 2 : walingest
1726 2 : .put_rel_page_image(&mut m, TESTREL_A, 2, test_img("foo blk 2 at 5"), &ctx)
1727 2 : .await?;
1728 2 : m.commit(&ctx).await?;
1729 2 :
1730 2 : assert_current_logical_size(&tline, Lsn(0x50));
1731 2 :
1732 2 : // The relation was created at LSN 2, not visible at LSN 1 yet.
1733 2 : assert_eq!(
1734 2 : tline
1735 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x10)), false, &ctx)
1736 2 : .await?,
1737 2 : false
1738 2 : );
1739 2 : assert!(tline
1740 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x10)), false, &ctx)
1741 2 : .await
1742 2 : .is_err());
1743 2 : assert_eq!(
1744 2 : tline
1745 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
1746 2 : .await?,
1747 2 : true
1748 2 : );
1749 2 : assert_eq!(
1750 2 : tline
1751 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
1752 2 : .await?,
1753 2 : 1
1754 2 : );
1755 2 : assert_eq!(
1756 2 : tline
1757 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), false, &ctx)
1758 2 : .await?,
1759 2 : 3
1760 2 : );
1761 2 :
1762 2 : // Check page contents at each LSN
1763 2 : assert_eq!(
1764 2 : tline
1765 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x20)), false, &ctx)
1766 2 : .await?,
1767 2 : test_img("foo blk 0 at 2")
1768 2 : );
1769 2 :
1770 2 : assert_eq!(
1771 2 : tline
1772 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x30)), false, &ctx)
1773 2 : .await?,
1774 2 : test_img("foo blk 0 at 3")
1775 2 : );
1776 2 :
1777 2 : assert_eq!(
1778 2 : tline
1779 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x40)), false, &ctx)
1780 2 : .await?,
1781 2 : test_img("foo blk 0 at 3")
1782 2 : );
1783 2 : assert_eq!(
1784 2 : tline
1785 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x40)), false, &ctx)
1786 2 : .await?,
1787 2 : test_img("foo blk 1 at 4")
1788 2 : );
1789 2 :
1790 2 : assert_eq!(
1791 2 : tline
1792 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x50)), false, &ctx)
1793 2 : .await?,
1794 2 : test_img("foo blk 0 at 3")
1795 2 : );
1796 2 : assert_eq!(
1797 2 : tline
1798 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x50)), false, &ctx)
1799 2 : .await?,
1800 2 : test_img("foo blk 1 at 4")
1801 2 : );
1802 2 : assert_eq!(
1803 2 : tline
1804 2 : .get_rel_page_at_lsn(TESTREL_A, 2, Version::Lsn(Lsn(0x50)), false, &ctx)
1805 2 : .await?,
1806 2 : test_img("foo blk 2 at 5")
1807 2 : );
1808 2 :
1809 2 : // Truncate last block
1810 2 : let mut m = tline.begin_modification(Lsn(0x60));
1811 2 : walingest
1812 2 : .put_rel_truncation(&mut m, TESTREL_A, 2, &ctx)
1813 2 : .await?;
1814 2 : m.commit(&ctx).await?;
1815 2 : assert_current_logical_size(&tline, Lsn(0x60));
1816 2 :
1817 2 : // Check reported size and contents after truncation
1818 2 : assert_eq!(
1819 2 : tline
1820 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x60)), false, &ctx)
1821 2 : .await?,
1822 2 : 2
1823 2 : );
1824 2 : assert_eq!(
1825 2 : tline
1826 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x60)), false, &ctx)
1827 2 : .await?,
1828 2 : test_img("foo blk 0 at 3")
1829 2 : );
1830 2 : assert_eq!(
1831 2 : tline
1832 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x60)), false, &ctx)
1833 2 : .await?,
1834 2 : test_img("foo blk 1 at 4")
1835 2 : );
1836 2 :
1837 2 : // should still see the truncated block with older LSN
1838 2 : assert_eq!(
1839 2 : tline
1840 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), false, &ctx)
1841 2 : .await?,
1842 2 : 3
1843 2 : );
1844 2 : assert_eq!(
1845 2 : tline
1846 2 : .get_rel_page_at_lsn(TESTREL_A, 2, Version::Lsn(Lsn(0x50)), false, &ctx)
1847 2 : .await?,
1848 2 : test_img("foo blk 2 at 5")
1849 2 : );
1850 2 :
1851 2 : // Truncate to zero length
1852 2 : let mut m = tline.begin_modification(Lsn(0x68));
1853 2 : walingest
1854 2 : .put_rel_truncation(&mut m, TESTREL_A, 0, &ctx)
1855 2 : .await?;
1856 2 : m.commit(&ctx).await?;
1857 2 : assert_eq!(
1858 2 : tline
1859 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x68)), false, &ctx)
1860 2 : .await?,
1861 2 : 0
1862 2 : );
1863 2 :
1864 2 : // Extend from 0 to 2 blocks, leaving a gap
1865 2 : let mut m = tline.begin_modification(Lsn(0x70));
1866 2 : walingest
1867 2 : .put_rel_page_image(&mut m, TESTREL_A, 1, test_img("foo blk 1"), &ctx)
1868 2 : .await?;
1869 2 : m.commit(&ctx).await?;
1870 2 : assert_eq!(
1871 2 : tline
1872 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x70)), false, &ctx)
1873 2 : .await?,
1874 2 : 2
1875 2 : );
1876 2 : assert_eq!(
1877 2 : tline
1878 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x70)), false, &ctx)
1879 2 : .await?,
1880 2 : ZERO_PAGE
1881 2 : );
1882 2 : assert_eq!(
1883 2 : tline
1884 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x70)), false, &ctx)
1885 2 : .await?,
1886 2 : test_img("foo blk 1")
1887 2 : );
1888 2 :
1889 2 : // Extend a lot more, leaving a big gap that spans across segments
1890 2 : let mut m = tline.begin_modification(Lsn(0x80));
1891 2 : walingest
1892 2 : .put_rel_page_image(&mut m, TESTREL_A, 1500, test_img("foo blk 1500"), &ctx)
1893 2 : .await?;
1894 1548 : m.commit(&ctx).await?;
1895 2 : assert_eq!(
1896 2 : tline
1897 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x80)), false, &ctx)
1898 2 : .await?,
1899 2 : 1501
1900 2 : );
1901 2998 : for blk in 2..1500 {
1902 2996 : assert_eq!(
1903 2996 : tline
1904 2996 : .get_rel_page_at_lsn(TESTREL_A, blk, Version::Lsn(Lsn(0x80)), false, &ctx)
1905 3035 : .await?,
1906 2996 : ZERO_PAGE
1907 2 : );
1908 2 : }
1909 2 : assert_eq!(
1910 2 : tline
1911 2 : .get_rel_page_at_lsn(TESTREL_A, 1500, Version::Lsn(Lsn(0x80)), false, &ctx)
1912 2 : .await?,
1913 2 : test_img("foo blk 1500")
1914 2 : );
1915 2 :
1916 2 : Ok(())
1917 2 : }
1918 :
1919 : // Test what happens if we dropped a relation
1920 : // and then created it again within the same layer.
1921 : #[tokio::test]
1922 2 : async fn test_drop_extend() -> Result<()> {
1923 2 : let (tenant, ctx) = TenantHarness::create("test_drop_extend")?.load().await;
1924 2 : let tline = tenant
1925 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
1926 6 : .await?;
1927 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
1928 2 :
1929 2 : let mut m = tline.begin_modification(Lsn(0x20));
1930 2 : walingest
1931 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 2"), &ctx)
1932 2 : .await?;
1933 2 : m.commit(&ctx).await?;
1934 2 :
1935 2 : // Check that rel exists and size is correct
1936 2 : assert_eq!(
1937 2 : tline
1938 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
1939 2 : .await?,
1940 2 : true
1941 2 : );
1942 2 : assert_eq!(
1943 2 : tline
1944 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
1945 2 : .await?,
1946 2 : 1
1947 2 : );
1948 2 :
1949 2 : // Drop rel
1950 2 : let mut m = tline.begin_modification(Lsn(0x30));
1951 2 : walingest.put_rel_drop(&mut m, TESTREL_A, &ctx).await?;
1952 2 : m.commit(&ctx).await?;
1953 2 :
1954 2 : // Check that rel is not visible anymore
1955 2 : assert_eq!(
1956 2 : tline
1957 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x30)), false, &ctx)
1958 2 : .await?,
1959 2 : false
1960 2 : );
1961 2 :
1962 2 : // FIXME: should fail
1963 2 : //assert!(tline.get_rel_size(TESTREL_A, Lsn(0x30), false)?.is_none());
1964 2 :
1965 2 : // Re-create it
1966 2 : let mut m = tline.begin_modification(Lsn(0x40));
1967 2 : walingest
1968 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 4"), &ctx)
1969 2 : .await?;
1970 2 : m.commit(&ctx).await?;
1971 2 :
1972 2 : // Check that rel exists and size is correct
1973 2 : assert_eq!(
1974 2 : tline
1975 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x40)), false, &ctx)
1976 2 : .await?,
1977 2 : true
1978 2 : );
1979 2 : assert_eq!(
1980 2 : tline
1981 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x40)), false, &ctx)
1982 2 : .await?,
1983 2 : 1
1984 2 : );
1985 2 :
1986 2 : Ok(())
1987 2 : }
1988 :
1989 : // Test what happens if we truncated a relation
1990 : // so that one of its segments was dropped
1991 : // and then extended it again within the same layer.
1992 : #[tokio::test]
1993 2 : async fn test_truncate_extend() -> Result<()> {
1994 2 : let (tenant, ctx) = TenantHarness::create("test_truncate_extend")?.load().await;
1995 2 : let tline = tenant
1996 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
1997 6 : .await?;
1998 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
1999 2 :
2000 2 : // Create a 20 MB relation (the size is arbitrary)
2001 2 : let relsize = 20 * 1024 * 1024 / 8192;
2002 2 : let mut m = tline.begin_modification(Lsn(0x20));
2003 5120 : for blkno in 0..relsize {
2004 5120 : let data = format!("foo blk {} at {}", blkno, Lsn(0x20));
2005 5120 : walingest
2006 5120 : .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx)
2007 2 : .await?;
2008 2 : }
2009 44 : m.commit(&ctx).await?;
2010 2 :
2011 2 : // The relation was created at LSN 20, not visible at LSN 1 yet.
2012 2 : assert_eq!(
2013 2 : tline
2014 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x10)), false, &ctx)
2015 2 : .await?,
2016 2 : false
2017 2 : );
2018 2 : assert!(tline
2019 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x10)), false, &ctx)
2020 2 : .await
2021 2 : .is_err());
2022 2 :
2023 2 : assert_eq!(
2024 2 : tline
2025 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
2026 2 : .await?,
2027 2 : true
2028 2 : );
2029 2 : assert_eq!(
2030 2 : tline
2031 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), false, &ctx)
2032 2 : .await?,
2033 2 : relsize
2034 2 : );
2035 2 :
2036 2 : // Check relation content
2037 5120 : for blkno in 0..relsize {
2038 5120 : let lsn = Lsn(0x20);
2039 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2040 5120 : assert_eq!(
2041 5120 : tline
2042 5120 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(lsn), false, &ctx)
2043 201 : .await?,
2044 5120 : test_img(&data)
2045 2 : );
2046 2 : }
2047 2 :
2048 2 : // Truncate relation so that second segment was dropped
2049 2 : // - only leave one page
2050 2 : let mut m = tline.begin_modification(Lsn(0x60));
2051 2 : walingest
2052 2 : .put_rel_truncation(&mut m, TESTREL_A, 1, &ctx)
2053 2 : .await?;
2054 2 : m.commit(&ctx).await?;
2055 2 :
2056 2 : // Check reported size and contents after truncation
2057 2 : assert_eq!(
2058 2 : tline
2059 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x60)), false, &ctx)
2060 2 : .await?,
2061 2 : 1
2062 2 : );
2063 2 :
2064 4 : for blkno in 0..1 {
2065 2 : let lsn = Lsn(0x20);
2066 2 : let data = format!("foo blk {} at {}", blkno, lsn);
2067 2 : assert_eq!(
2068 2 : tline
2069 2 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x60)), false, &ctx)
2070 2 : .await?,
2071 2 : test_img(&data)
2072 2 : );
2073 2 : }
2074 2 :
2075 2 : // should still see all blocks with older LSN
2076 2 : assert_eq!(
2077 2 : tline
2078 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), false, &ctx)
2079 2 : .await?,
2080 2 : relsize
2081 2 : );
2082 5120 : for blkno in 0..relsize {
2083 5120 : let lsn = Lsn(0x20);
2084 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2085 5120 : assert_eq!(
2086 5120 : tline
2087 5120 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x50)), false, &ctx)
2088 400 : .await?,
2089 5120 : test_img(&data)
2090 2 : );
2091 2 : }
2092 2 :
2093 2 : // Extend relation again.
2094 2 : // Add enough blocks to create second segment
2095 2 : let lsn = Lsn(0x80);
2096 2 : let mut m = tline.begin_modification(lsn);
2097 5120 : for blkno in 0..relsize {
2098 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2099 5120 : walingest
2100 5120 : .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx)
2101 2 : .await?;
2102 2 : }
2103 46 : m.commit(&ctx).await?;
2104 2 :
2105 2 : assert_eq!(
2106 2 : tline
2107 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x80)), false, &ctx)
2108 2 : .await?,
2109 2 : true
2110 2 : );
2111 2 : assert_eq!(
2112 2 : tline
2113 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x80)), false, &ctx)
2114 2 : .await?,
2115 2 : relsize
2116 2 : );
2117 2 : // Check relation content
2118 5120 : for blkno in 0..relsize {
2119 5120 : let lsn = Lsn(0x80);
2120 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2121 5120 : assert_eq!(
2122 5120 : tline
2123 5120 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x80)), false, &ctx)
2124 200 : .await?,
2125 5120 : test_img(&data)
2126 2 : );
2127 2 : }
2128 2 :
2129 2 : Ok(())
2130 2 : }
2131 :
2132 : /// Test get_relsize() and truncation with a file larger than 1 GB, so that it's
2133 : /// split into multiple 1 GB segments in Postgres.
2134 : #[tokio::test]
2135 2 : async fn test_large_rel() -> Result<()> {
2136 2 : let (tenant, ctx) = TenantHarness::create("test_large_rel")?.load().await;
2137 2 : let tline = tenant
2138 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
2139 6 : .await?;
2140 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
2141 2 :
2142 2 : let mut lsn = 0x10;
2143 262146 : for blknum in 0..RELSEG_SIZE + 1 {
2144 262146 : lsn += 0x10;
2145 262146 : let mut m = tline.begin_modification(Lsn(lsn));
2146 262146 : let img = test_img(&format!("foo blk {} at {}", blknum, Lsn(lsn)));
2147 262146 : walingest
2148 262146 : .put_rel_page_image(&mut m, TESTREL_A, blknum as BlockNumber, img, &ctx)
2149 3079 : .await?;
2150 262146 : m.commit(&ctx).await?;
2151 2 : }
2152 2 :
2153 2 : assert_current_logical_size(&tline, Lsn(lsn));
2154 2 :
2155 2 : assert_eq!(
2156 2 : tline
2157 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), false, &ctx)
2158 2 : .await?,
2159 2 : RELSEG_SIZE + 1
2160 2 : );
2161 2 :
2162 2 : // Truncate one block
2163 2 : lsn += 0x10;
2164 2 : let mut m = tline.begin_modification(Lsn(lsn));
2165 2 : walingest
2166 2 : .put_rel_truncation(&mut m, TESTREL_A, RELSEG_SIZE, &ctx)
2167 2 : .await?;
2168 2 : m.commit(&ctx).await?;
2169 2 : assert_eq!(
2170 2 : tline
2171 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), false, &ctx)
2172 2 : .await?,
2173 2 : RELSEG_SIZE
2174 2 : );
2175 2 : assert_current_logical_size(&tline, Lsn(lsn));
2176 2 :
2177 2 : // Truncate another block
2178 2 : lsn += 0x10;
2179 2 : let mut m = tline.begin_modification(Lsn(lsn));
2180 2 : walingest
2181 2 : .put_rel_truncation(&mut m, TESTREL_A, RELSEG_SIZE - 1, &ctx)
2182 2 : .await?;
2183 2 : m.commit(&ctx).await?;
2184 2 : assert_eq!(
2185 2 : tline
2186 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), false, &ctx)
2187 2 : .await?,
2188 2 : RELSEG_SIZE - 1
2189 2 : );
2190 2 : assert_current_logical_size(&tline, Lsn(lsn));
2191 2 :
2192 2 : // Truncate to 1500, and then truncate all the way down to 0, one block at a time
2193 2 : // This tests the behavior at segment boundaries
2194 2 : let mut size: i32 = 3000;
2195 6004 : while size >= 0 {
2196 6002 : lsn += 0x10;
2197 6002 : let mut m = tline.begin_modification(Lsn(lsn));
2198 6002 : walingest
2199 6002 : .put_rel_truncation(&mut m, TESTREL_A, size as BlockNumber, &ctx)
2200 139 : .await?;
2201 6002 : m.commit(&ctx).await?;
2202 6002 : assert_eq!(
2203 6002 : tline
2204 6002 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), false, &ctx)
2205 2 : .await?,
2206 6002 : size as BlockNumber
2207 2 : );
2208 2 :
2209 6002 : size -= 1;
2210 2 : }
2211 2 : assert_current_logical_size(&tline, Lsn(lsn));
2212 2 :
2213 2 : Ok(())
2214 2 : }
2215 :
2216 : /// Replay a wal segment file taken directly from safekeepers.
2217 : ///
2218 : /// This test is useful for benchmarking since it allows us to profile only
2219 : /// the walingest code in a single-threaded executor, and iterate more quickly
2220 : /// without waiting for unrelated steps.
2221 : #[tokio::test]
2222 2 : async fn test_ingest_real_wal() {
2223 2 : use crate::tenant::harness::*;
2224 2 : use postgres_ffi::waldecoder::WalStreamDecoder;
2225 2 : use postgres_ffi::WAL_SEGMENT_SIZE;
2226 2 :
2227 2 : // Define test data path and constants.
2228 2 : //
2229 2 : // Steps to reconstruct the data, if needed:
2230 2 : // 1. Run the pgbench python test
2231 2 : // 2. Take the first wal segment file from safekeeper
2232 2 : // 3. Compress it using `zstd --long input_file`
2233 2 : // 4. Copy initdb.tar.zst from local_fs_remote_storage
2234 2 : // 5. Grep sk logs for "restart decoder" to get startpoint
2235 2 : // 6. Run just the decoder from this test to get the endpoint.
2236 2 : // It's the last LSN the decoder will output.
2237 2 : let pg_version = 15; // The test data was generated by pg15
2238 2 : let path = "test_data/sk_wal_segment_from_pgbench";
2239 2 : let wal_segment_path = format!("{path}/000000010000000000000001.zst");
2240 2 : let source_initdb_path = format!("{path}/{INITDB_PATH}");
2241 2 : let startpoint = Lsn::from_hex("14AEC08").unwrap();
2242 2 : let _endpoint = Lsn::from_hex("1FFFF98").unwrap();
2243 2 :
2244 2 : let harness = TenantHarness::create("test_ingest_real_wal").unwrap();
2245 2 : let (tenant, ctx) = harness.load().await;
2246 2 :
2247 2 : let remote_initdb_path =
2248 2 : remote_initdb_archive_path(&tenant.tenant_shard_id().tenant_id, &TIMELINE_ID);
2249 2 : let initdb_path = harness.remote_fs_dir.join(remote_initdb_path.get_path());
2250 2 :
2251 2 : std::fs::create_dir_all(initdb_path.parent().unwrap())
2252 2 : .expect("creating test dir should work");
2253 2 : std::fs::copy(source_initdb_path, initdb_path).expect("copying the initdb.tar.zst works");
2254 2 :
2255 2 : // Bootstrap a real timeline. We can't use create_test_timeline because
2256 2 : // it doesn't create a real checkpoint, and Walingest::new tries to parse
2257 2 : // the garbage data.
2258 2 : let tline = tenant
2259 2 : .bootstrap_timeline_test(TIMELINE_ID, pg_version, Some(TIMELINE_ID), &ctx)
2260 22724 : .await
2261 2 : .unwrap();
2262 2 :
2263 2 : // We fully read and decompress this into memory before decoding
2264 2 : // to get a more accurate perf profile of the decoder.
2265 2 : let bytes = {
2266 2 : use async_compression::tokio::bufread::ZstdDecoder;
2267 2 : let file = tokio::fs::File::open(wal_segment_path).await.unwrap();
2268 2 : let reader = tokio::io::BufReader::new(file);
2269 2 : let decoder = ZstdDecoder::new(reader);
2270 2 : let mut reader = tokio::io::BufReader::new(decoder);
2271 2 : let mut buffer = Vec::new();
2272 224 : tokio::io::copy_buf(&mut reader, &mut buffer).await.unwrap();
2273 2 : buffer
2274 2 : };
2275 2 :
2276 2 : // TODO start a profiler too
2277 2 : let started_at = std::time::Instant::now();
2278 2 :
2279 2 : // Initialize walingest
2280 2 : let xlogoff: usize = startpoint.segment_offset(WAL_SEGMENT_SIZE);
2281 2 : let mut decoder = WalStreamDecoder::new(startpoint, pg_version);
2282 2 : let mut walingest = WalIngest::new(tline.as_ref(), startpoint, &ctx)
2283 5 : .await
2284 2 : .unwrap();
2285 2 : let mut modification = tline.begin_modification(startpoint);
2286 2 : let mut decoded = DecodedWALRecord::default();
2287 2 : println!("decoding {} bytes", bytes.len() - xlogoff);
2288 2 :
2289 2 : // Decode and ingest wal. We process the wal in chunks because
2290 2 : // that's what happens when we get bytes from safekeepers.
2291 474686 : for chunk in bytes[xlogoff..].chunks(50) {
2292 474686 : decoder.feed_bytes(chunk);
2293 620536 : while let Some((lsn, recdata)) = decoder.poll_decode().unwrap() {
2294 145850 : walingest
2295 145850 : .ingest_record(recdata, lsn, &mut modification, &mut decoded, &ctx)
2296 89 : .await
2297 145850 : .unwrap();
2298 2 : }
2299 474686 : modification.commit(&ctx).await.unwrap();
2300 2 : }
2301 2 :
2302 2 : let duration = started_at.elapsed();
2303 2 : println!("done in {:?}", duration);
2304 2 : }
2305 : }
|