Line data Source code
1 : //!
2 : //! Parse PostgreSQL WAL records and store them in a neon Timeline.
3 : //!
4 : //! The pipeline for ingesting WAL looks like this:
5 : //!
6 : //! WAL receiver -> WalIngest -> Repository
7 : //!
8 : //! The WAL receiver receives a stream of WAL from the WAL safekeepers,
9 : //! and decodes it to individual WAL records. It feeds the WAL records
10 : //! to WalIngest, which parses them and stores them in the Repository.
11 : //!
12 : //! The neon Repository can store page versions in two formats: as
13 : //! page images, or a WAL records. WalIngest::ingest_record() extracts
14 : //! page images out of some WAL records, but most it stores as WAL
15 : //! records. If a WAL record modifies multiple pages, WalIngest
16 : //! will call Repository::put_wal_record or put_page_image functions
17 : //! separately for each modified page.
18 : //!
19 : //! To reconstruct a page using a WAL record, the Repository calls the
20 : //! code in walredo.rs. walredo.rs passes most WAL records to the WAL
21 : //! redo Postgres process, but some records it can handle directly with
22 : //! bespoken Rust code.
23 :
24 : use pageserver_api::shard::ShardIdentity;
25 : use postgres_ffi::v14::nonrelfile_utils::clogpage_precedes;
26 : use postgres_ffi::v14::nonrelfile_utils::slru_may_delete_clogsegment;
27 : use postgres_ffi::{fsm_logical_to_physical, page_is_new, page_set_lsn};
28 :
29 : use anyhow::{bail, Context, Result};
30 : use bytes::{Buf, Bytes, BytesMut};
31 : use tracing::*;
32 : use utils::failpoint_support;
33 :
34 : use crate::context::RequestContext;
35 : use crate::metrics::WAL_INGEST;
36 : use crate::pgdatadir_mapping::{DatadirModification, Version};
37 : use crate::tenant::PageReconstructError;
38 : use crate::tenant::Timeline;
39 : use crate::walrecord::*;
40 : use crate::ZERO_PAGE;
41 : use pageserver_api::key::rel_block_to_key;
42 : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
43 : use postgres_ffi::pg_constants;
44 : use postgres_ffi::relfile_utils::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM};
45 : use postgres_ffi::v14::nonrelfile_utils::mx_offset_to_member_segment;
46 : use postgres_ffi::v14::xlog_utils::*;
47 : use postgres_ffi::v14::CheckPoint;
48 : use postgres_ffi::TransactionId;
49 : use postgres_ffi::BLCKSZ;
50 : use utils::lsn::Lsn;
51 :
52 : pub struct WalIngest {
53 : shard: ShardIdentity,
54 : checkpoint: CheckPoint,
55 : checkpoint_modified: bool,
56 : }
57 :
58 : impl WalIngest {
59 12 : pub async fn new(
60 12 : timeline: &Timeline,
61 12 : startpoint: Lsn,
62 12 : ctx: &RequestContext,
63 12 : ) -> anyhow::Result<WalIngest> {
64 : // Fetch the latest checkpoint into memory, so that we can compare with it
65 : // quickly in `ingest_record` and update it when it changes.
66 12 : let checkpoint_bytes = timeline.get_checkpoint(startpoint, ctx).await?;
67 12 : let checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
68 12 : trace!("CheckPoint.nextXid = {}", checkpoint.nextXid.value);
69 :
70 12 : Ok(WalIngest {
71 12 : shard: *timeline.get_shard_identity(),
72 12 : checkpoint,
73 12 : checkpoint_modified: false,
74 12 : })
75 12 : }
76 :
77 : ///
78 : /// Decode a PostgreSQL WAL record and store it in the repository, in the given timeline.
79 : ///
80 : /// This function updates `lsn` field of `DatadirModification`
81 : ///
82 : /// Helper function to parse a WAL record and call the Timeline's PUT functions for all the
83 : /// relations/pages that the record affects.
84 : ///
85 : /// This function returns `true` if the record was ingested, and `false` if it was filtered out
86 : ///
87 145852 : pub async fn ingest_record(
88 145852 : &mut self,
89 145852 : recdata: Bytes,
90 145852 : lsn: Lsn,
91 145852 : modification: &mut DatadirModification<'_>,
92 145852 : decoded: &mut DecodedWALRecord,
93 145852 : ctx: &RequestContext,
94 145852 : ) -> anyhow::Result<bool> {
95 145852 : WAL_INGEST.records_received.inc();
96 145852 : let pg_version = modification.tline.pg_version;
97 145852 : let prev_len = modification.len();
98 145852 :
99 145852 : modification.set_lsn(lsn)?;
100 145852 : decode_wal_record(recdata, decoded, pg_version)?;
101 :
102 145852 : let mut buf = decoded.record.clone();
103 145852 : buf.advance(decoded.main_data_offset);
104 145852 :
105 145852 : assert!(!self.checkpoint_modified);
106 145852 : if decoded.xl_xid != pg_constants::INVALID_TRANSACTION_ID
107 145834 : && self.checkpoint.update_next_xid(decoded.xl_xid)
108 2 : {
109 2 : self.checkpoint_modified = true;
110 145850 : }
111 :
112 145852 : failpoint_support::sleep_millis_async!("wal-ingest-record-sleep");
113 :
114 145852 : match decoded.xl_rmid {
115 : pg_constants::RM_HEAP_ID | pg_constants::RM_HEAP2_ID => {
116 : // Heap AM records need some special handling, because they modify VM pages
117 : // without registering them with the standard mechanism.
118 145474 : self.ingest_heapam_record(&mut buf, modification, decoded, ctx)
119 0 : .await?;
120 : }
121 : pg_constants::RM_NEON_ID => {
122 0 : self.ingest_neonrmgr_record(&mut buf, modification, decoded, ctx)
123 0 : .await?;
124 : }
125 : // Handle other special record types
126 : pg_constants::RM_SMGR_ID => {
127 16 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
128 16 :
129 16 : if info == pg_constants::XLOG_SMGR_CREATE {
130 16 : let create = XlSmgrCreate::decode(&mut buf);
131 16 : self.ingest_xlog_smgr_create(modification, &create, ctx)
132 4 : .await?;
133 0 : } else if info == pg_constants::XLOG_SMGR_TRUNCATE {
134 0 : let truncate = XlSmgrTruncate::decode(&mut buf);
135 0 : self.ingest_xlog_smgr_truncate(modification, &truncate, ctx)
136 0 : .await?;
137 0 : }
138 : }
139 : pg_constants::RM_DBASE_ID => {
140 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
141 0 : debug!(%info, %pg_version, "handle RM_DBASE_ID");
142 :
143 0 : if pg_version == 14 {
144 0 : if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE {
145 0 : let createdb = XlCreateDatabase::decode(&mut buf);
146 0 : debug!("XLOG_DBASE_CREATE v14");
147 :
148 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
149 0 : .await?;
150 0 : } else if info == postgres_ffi::v14::bindings::XLOG_DBASE_DROP {
151 0 : let dropdb = XlDropDatabase::decode(&mut buf);
152 0 : for tablespace_id in dropdb.tablespace_ids {
153 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
154 0 : modification
155 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
156 0 : .await?;
157 : }
158 0 : }
159 0 : } else if pg_version == 15 {
160 0 : if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG {
161 0 : debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
162 0 : } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY {
163 : // The XLOG record was renamed between v14 and v15,
164 : // but the record format is the same.
165 : // So we can reuse XlCreateDatabase here.
166 0 : debug!("XLOG_DBASE_CREATE_FILE_COPY");
167 0 : let createdb = XlCreateDatabase::decode(&mut buf);
168 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
169 0 : .await?;
170 0 : } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_DROP {
171 0 : let dropdb = XlDropDatabase::decode(&mut buf);
172 0 : for tablespace_id in dropdb.tablespace_ids {
173 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
174 0 : modification
175 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
176 0 : .await?;
177 : }
178 0 : }
179 0 : } else if pg_version == 16 {
180 0 : if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG {
181 0 : debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
182 0 : } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY {
183 : // The XLOG record was renamed between v14 and v15,
184 : // but the record format is the same.
185 : // So we can reuse XlCreateDatabase here.
186 0 : debug!("XLOG_DBASE_CREATE_FILE_COPY");
187 0 : let createdb = XlCreateDatabase::decode(&mut buf);
188 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
189 0 : .await?;
190 0 : } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_DROP {
191 0 : let dropdb = XlDropDatabase::decode(&mut buf);
192 0 : for tablespace_id in dropdb.tablespace_ids {
193 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
194 0 : modification
195 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
196 0 : .await?;
197 : }
198 0 : }
199 0 : }
200 : }
201 : pg_constants::RM_TBLSPC_ID => {
202 0 : trace!("XLOG_TBLSPC_CREATE/DROP is not handled yet");
203 : }
204 : pg_constants::RM_CLOG_ID => {
205 0 : let info = decoded.xl_info & !pg_constants::XLR_INFO_MASK;
206 0 :
207 0 : if info == pg_constants::CLOG_ZEROPAGE {
208 0 : let pageno = buf.get_u32_le();
209 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
210 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
211 0 : self.put_slru_page_image(
212 0 : modification,
213 0 : SlruKind::Clog,
214 0 : segno,
215 0 : rpageno,
216 0 : ZERO_PAGE.clone(),
217 0 : ctx,
218 0 : )
219 0 : .await?;
220 : } else {
221 0 : assert!(info == pg_constants::CLOG_TRUNCATE);
222 0 : let xlrec = XlClogTruncate::decode(&mut buf);
223 0 : self.ingest_clog_truncate_record(modification, &xlrec, ctx)
224 0 : .await?;
225 : }
226 : }
227 : pg_constants::RM_XACT_ID => {
228 24 : let info = decoded.xl_info & pg_constants::XLOG_XACT_OPMASK;
229 24 :
230 24 : if info == pg_constants::XLOG_XACT_COMMIT || info == pg_constants::XLOG_XACT_ABORT {
231 8 : let parsed_xact =
232 8 : XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info);
233 8 : self.ingest_xact_record(
234 8 : modification,
235 8 : &parsed_xact,
236 8 : info == pg_constants::XLOG_XACT_COMMIT,
237 8 : decoded.origin_id,
238 8 : ctx,
239 8 : )
240 0 : .await?;
241 16 : } else if info == pg_constants::XLOG_XACT_COMMIT_PREPARED
242 16 : || info == pg_constants::XLOG_XACT_ABORT_PREPARED
243 : {
244 0 : let parsed_xact =
245 0 : XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info);
246 0 : self.ingest_xact_record(
247 0 : modification,
248 0 : &parsed_xact,
249 0 : info == pg_constants::XLOG_XACT_COMMIT_PREPARED,
250 0 : decoded.origin_id,
251 0 : ctx,
252 0 : )
253 0 : .await?;
254 : // Remove twophase file. see RemoveTwoPhaseFile() in postgres code
255 0 : trace!(
256 0 : "Drop twophaseFile for xid {} parsed_xact.xid {} here at {}",
257 : decoded.xl_xid,
258 : parsed_xact.xid,
259 : lsn,
260 : );
261 0 : modification
262 0 : .drop_twophase_file(parsed_xact.xid, ctx)
263 0 : .await?;
264 16 : } else if info == pg_constants::XLOG_XACT_PREPARE {
265 0 : modification
266 0 : .put_twophase_file(decoded.xl_xid, Bytes::copy_from_slice(&buf[..]), ctx)
267 0 : .await?;
268 16 : }
269 : }
270 : pg_constants::RM_MULTIXACT_ID => {
271 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
272 0 :
273 0 : if info == pg_constants::XLOG_MULTIXACT_ZERO_OFF_PAGE {
274 0 : let pageno = buf.get_u32_le();
275 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
276 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
277 0 : self.put_slru_page_image(
278 0 : modification,
279 0 : SlruKind::MultiXactOffsets,
280 0 : segno,
281 0 : rpageno,
282 0 : ZERO_PAGE.clone(),
283 0 : ctx,
284 0 : )
285 0 : .await?;
286 0 : } else if info == pg_constants::XLOG_MULTIXACT_ZERO_MEM_PAGE {
287 0 : let pageno = buf.get_u32_le();
288 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
289 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
290 0 : self.put_slru_page_image(
291 0 : modification,
292 0 : SlruKind::MultiXactMembers,
293 0 : segno,
294 0 : rpageno,
295 0 : ZERO_PAGE.clone(),
296 0 : ctx,
297 0 : )
298 0 : .await?;
299 0 : } else if info == pg_constants::XLOG_MULTIXACT_CREATE_ID {
300 0 : let xlrec = XlMultiXactCreate::decode(&mut buf);
301 0 : self.ingest_multixact_create_record(modification, &xlrec)?;
302 0 : } else if info == pg_constants::XLOG_MULTIXACT_TRUNCATE_ID {
303 0 : let xlrec = XlMultiXactTruncate::decode(&mut buf);
304 0 : self.ingest_multixact_truncate_record(modification, &xlrec, ctx)
305 0 : .await?;
306 0 : }
307 : }
308 : pg_constants::RM_RELMAP_ID => {
309 0 : let xlrec = XlRelmapUpdate::decode(&mut buf);
310 0 : self.ingest_relmap_page(modification, &xlrec, decoded, ctx)
311 0 : .await?;
312 : }
313 : pg_constants::RM_XLOG_ID => {
314 30 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
315 30 :
316 30 : if info == pg_constants::XLOG_NEXTOID {
317 2 : let next_oid = buf.get_u32_le();
318 2 : if self.checkpoint.nextOid != next_oid {
319 2 : self.checkpoint.nextOid = next_oid;
320 2 : self.checkpoint_modified = true;
321 2 : }
322 28 : } else if info == pg_constants::XLOG_CHECKPOINT_ONLINE
323 28 : || info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN
324 : {
325 2 : let mut checkpoint_bytes = [0u8; SIZEOF_CHECKPOINT];
326 2 : buf.copy_to_slice(&mut checkpoint_bytes);
327 2 : let xlog_checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
328 2 : trace!(
329 0 : "xlog_checkpoint.oldestXid={}, checkpoint.oldestXid={}",
330 : xlog_checkpoint.oldestXid,
331 : self.checkpoint.oldestXid
332 : );
333 2 : if (self
334 2 : .checkpoint
335 2 : .oldestXid
336 2 : .wrapping_sub(xlog_checkpoint.oldestXid) as i32)
337 2 : < 0
338 0 : {
339 0 : self.checkpoint.oldestXid = xlog_checkpoint.oldestXid;
340 2 : }
341 2 : trace!(
342 0 : "xlog_checkpoint.oldestActiveXid={}, checkpoint.oldestActiveXid={}",
343 : xlog_checkpoint.oldestActiveXid,
344 : self.checkpoint.oldestActiveXid
345 : );
346 2 : self.checkpoint.oldestActiveXid = xlog_checkpoint.oldestActiveXid;
347 2 :
348 2 : // Write a new checkpoint key-value pair on every checkpoint record, even
349 2 : // if nothing really changed. Not strictly required, but it seems nice to
350 2 : // have some trace of the checkpoint records in the layer files at the same
351 2 : // LSNs.
352 2 : self.checkpoint_modified = true;
353 26 : }
354 : }
355 : pg_constants::RM_LOGICALMSG_ID => {
356 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
357 0 :
358 0 : if info == pg_constants::XLOG_LOGICAL_MESSAGE {
359 0 : let xlrec = crate::walrecord::XlLogicalMessage::decode(&mut buf);
360 0 : let prefix = std::str::from_utf8(&buf[0..xlrec.prefix_size - 1])?;
361 0 : let message = &buf[xlrec.prefix_size..xlrec.prefix_size + xlrec.message_size];
362 0 : if prefix == "neon-test" {
363 : // This is a convenient way to make the WAL ingestion pause at
364 : // particular point in the WAL. For more fine-grained control,
365 : // we could peek into the message and only pause if it contains
366 : // a particular string, for example, but this is enough for now.
367 0 : failpoint_support::sleep_millis_async!("wal-ingest-logical-message-sleep");
368 0 : } else if let Some(path) = prefix.strip_prefix("neon-file:") {
369 0 : modification.put_file(path, message, ctx).await?;
370 0 : }
371 0 : }
372 : }
373 : pg_constants::RM_STANDBY_ID => {
374 16 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
375 16 : if info == pg_constants::XLOG_RUNNING_XACTS {
376 0 : let xlrec = crate::walrecord::XlRunningXacts::decode(&mut buf);
377 0 : self.checkpoint.oldestActiveXid = xlrec.oldest_running_xid;
378 16 : }
379 : }
380 : pg_constants::RM_REPLORIGIN_ID => {
381 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
382 0 : if info == pg_constants::XLOG_REPLORIGIN_SET {
383 0 : let xlrec = crate::walrecord::XlReploriginSet::decode(&mut buf);
384 0 : modification
385 0 : .set_replorigin(xlrec.node_id, xlrec.remote_lsn)
386 0 : .await?
387 0 : } else if info == pg_constants::XLOG_REPLORIGIN_DROP {
388 0 : let xlrec = crate::walrecord::XlReploriginDrop::decode(&mut buf);
389 0 : modification.drop_replorigin(xlrec.node_id).await?
390 0 : }
391 : }
392 292 : _x => {
393 292 : // TODO: should probably log & fail here instead of blindly
394 292 : // doing something without understanding the protocol
395 292 : }
396 : }
397 :
398 : // Iterate through all the blocks that the record modifies, and
399 : // "put" a separate copy of the record for each block.
400 145852 : for blk in decoded.blocks.iter() {
401 145642 : let rel = RelTag {
402 145642 : spcnode: blk.rnode_spcnode,
403 145642 : dbnode: blk.rnode_dbnode,
404 145642 : relnode: blk.rnode_relnode,
405 145642 : forknum: blk.forknum,
406 145642 : };
407 145642 :
408 145642 : let key = rel_block_to_key(rel, blk.blkno);
409 145642 : let key_is_local = self.shard.is_key_local(&key);
410 145642 :
411 145642 : tracing::debug!(
412 : lsn=%lsn,
413 : key=%key,
414 0 : "ingest: shard decision {} (checkpoint={})",
415 0 : if !key_is_local { "drop" } else { "keep" },
416 : self.checkpoint_modified
417 : );
418 :
419 145642 : if !key_is_local {
420 0 : if self.shard.is_shard_zero() {
421 : // Shard 0 tracks relation sizes. Although we will not store this block, we will observe
422 : // its blkno in case it implicitly extends a relation.
423 0 : self.observe_decoded_block(modification, blk, ctx).await?;
424 0 : }
425 :
426 0 : continue;
427 145642 : }
428 145642 : self.ingest_decoded_block(modification, lsn, decoded, blk, ctx)
429 62 : .await?;
430 : }
431 :
432 : // If checkpoint data was updated, store the new version in the repository
433 145852 : if self.checkpoint_modified {
434 6 : let new_checkpoint_bytes = self.checkpoint.encode()?;
435 :
436 6 : modification.put_checkpoint(new_checkpoint_bytes)?;
437 6 : self.checkpoint_modified = false;
438 145846 : }
439 :
440 : // Note that at this point this record is only cached in the modification
441 : // until commit() is called to flush the data into the repository and update
442 : // the latest LSN.
443 :
444 145852 : Ok(modification.len() > prev_len)
445 145852 : }
446 :
447 : /// Do not store this block, but observe it for the purposes of updating our relation size state.
448 0 : async fn observe_decoded_block(
449 0 : &mut self,
450 0 : modification: &mut DatadirModification<'_>,
451 0 : blk: &DecodedBkpBlock,
452 0 : ctx: &RequestContext,
453 0 : ) -> Result<(), PageReconstructError> {
454 0 : let rel = RelTag {
455 0 : spcnode: blk.rnode_spcnode,
456 0 : dbnode: blk.rnode_dbnode,
457 0 : relnode: blk.rnode_relnode,
458 0 : forknum: blk.forknum,
459 0 : };
460 0 : self.handle_rel_extend(modification, rel, blk.blkno, ctx)
461 0 : .await
462 0 : }
463 :
464 145642 : async fn ingest_decoded_block(
465 145642 : &mut self,
466 145642 : modification: &mut DatadirModification<'_>,
467 145642 : lsn: Lsn,
468 145642 : decoded: &DecodedWALRecord,
469 145642 : blk: &DecodedBkpBlock,
470 145642 : ctx: &RequestContext,
471 145642 : ) -> Result<(), PageReconstructError> {
472 145642 : let rel = RelTag {
473 145642 : spcnode: blk.rnode_spcnode,
474 145642 : dbnode: blk.rnode_dbnode,
475 145642 : relnode: blk.rnode_relnode,
476 145642 : forknum: blk.forknum,
477 145642 : };
478 145642 :
479 145642 : //
480 145642 : // Instead of storing full-page-image WAL record,
481 145642 : // it is better to store extracted image: we can skip wal-redo
482 145642 : // in this case. Also some FPI records may contain multiple (up to 32) pages,
483 145642 : // so them have to be copied multiple times.
484 145642 : //
485 145642 : if blk.apply_image
486 60 : && blk.has_image
487 60 : && decoded.xl_rmid == pg_constants::RM_XLOG_ID
488 24 : && (decoded.xl_info == pg_constants::XLOG_FPI
489 0 : || decoded.xl_info == pg_constants::XLOG_FPI_FOR_HINT)
490 : // compression of WAL is not yet supported: fall back to storing the original WAL record
491 24 : && !postgres_ffi::bkpimage_is_compressed(blk.bimg_info, modification.tline.pg_version)?
492 : // do not materialize null pages because them most likely be soon replaced with real data
493 24 : && blk.bimg_len != 0
494 : {
495 : // Extract page image from FPI record
496 24 : let img_len = blk.bimg_len as usize;
497 24 : let img_offs = blk.bimg_offset as usize;
498 24 : let mut image = BytesMut::with_capacity(BLCKSZ as usize);
499 24 : image.extend_from_slice(&decoded.record[img_offs..img_offs + img_len]);
500 24 :
501 24 : if blk.hole_length != 0 {
502 0 : let tail = image.split_off(blk.hole_offset as usize);
503 0 : image.resize(image.len() + blk.hole_length as usize, 0u8);
504 0 : image.unsplit(tail);
505 24 : }
506 : //
507 : // Match the logic of XLogReadBufferForRedoExtended:
508 : // The page may be uninitialized. If so, we can't set the LSN because
509 : // that would corrupt the page.
510 : //
511 24 : if !page_is_new(&image) {
512 18 : page_set_lsn(&mut image, lsn)
513 6 : }
514 24 : assert_eq!(image.len(), BLCKSZ as usize);
515 24 : self.put_rel_page_image(modification, rel, blk.blkno, image.freeze(), ctx)
516 0 : .await?;
517 : } else {
518 145618 : let rec = NeonWalRecord::Postgres {
519 145618 : will_init: blk.will_init || blk.apply_image,
520 145618 : rec: decoded.record.clone(),
521 145618 : };
522 145618 : self.put_rel_wal_record(modification, rel, blk.blkno, rec, ctx)
523 62 : .await?;
524 : }
525 145642 : Ok(())
526 145642 : }
527 :
528 145474 : async fn ingest_heapam_record(
529 145474 : &mut self,
530 145474 : buf: &mut Bytes,
531 145474 : modification: &mut DatadirModification<'_>,
532 145474 : decoded: &DecodedWALRecord,
533 145474 : ctx: &RequestContext,
534 145474 : ) -> anyhow::Result<()> {
535 145474 : // Handle VM bit updates that are implicitly part of heap records.
536 145474 :
537 145474 : // First, look at the record to determine which VM bits need
538 145474 : // to be cleared. If either of these variables is set, we
539 145474 : // need to clear the corresponding bits in the visibility map.
540 145474 : let mut new_heap_blkno: Option<u32> = None;
541 145474 : let mut old_heap_blkno: Option<u32> = None;
542 145474 : let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS;
543 145474 :
544 145474 : match modification.tline.pg_version {
545 : 14 => {
546 0 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
547 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
548 0 :
549 0 : if info == pg_constants::XLOG_HEAP_INSERT {
550 0 : let xlrec = v14::XlHeapInsert::decode(buf);
551 0 : assert_eq!(0, buf.remaining());
552 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
553 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
554 0 : }
555 0 : } else if info == pg_constants::XLOG_HEAP_DELETE {
556 0 : let xlrec = v14::XlHeapDelete::decode(buf);
557 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
558 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
559 0 : }
560 0 : } else if info == pg_constants::XLOG_HEAP_UPDATE
561 0 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
562 : {
563 0 : let xlrec = v14::XlHeapUpdate::decode(buf);
564 0 : // the size of tuple data is inferred from the size of the record.
565 0 : // we can't validate the remaining number of bytes without parsing
566 0 : // the tuple data.
567 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
568 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
569 0 : }
570 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
571 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
572 0 : // non-HOT update where the new tuple goes to different page than
573 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
574 0 : // set.
575 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
576 0 : }
577 0 : } else if info == pg_constants::XLOG_HEAP_LOCK {
578 0 : let xlrec = v14::XlHeapLock::decode(buf);
579 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
580 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
581 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
582 0 : }
583 0 : }
584 0 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
585 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
586 0 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
587 0 : let xlrec = v14::XlHeapMultiInsert::decode(buf);
588 :
589 0 : let offset_array_len =
590 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
591 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
592 0 : 0
593 : } else {
594 0 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
595 : };
596 0 : assert_eq!(offset_array_len, buf.remaining());
597 :
598 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
599 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
600 0 : }
601 0 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
602 0 : let xlrec = v14::XlHeapLockUpdated::decode(buf);
603 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
604 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
605 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
606 0 : }
607 0 : }
608 : } else {
609 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
610 : }
611 : }
612 : 15 => {
613 145474 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
614 145286 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
615 145286 :
616 145286 : if info == pg_constants::XLOG_HEAP_INSERT {
617 145276 : let xlrec = v15::XlHeapInsert::decode(buf);
618 145276 : assert_eq!(0, buf.remaining());
619 145276 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
620 4 : new_heap_blkno = Some(decoded.blocks[0].blkno);
621 145272 : }
622 10 : } else if info == pg_constants::XLOG_HEAP_DELETE {
623 0 : let xlrec = v15::XlHeapDelete::decode(buf);
624 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
625 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
626 0 : }
627 10 : } else if info == pg_constants::XLOG_HEAP_UPDATE
628 2 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
629 : {
630 8 : let xlrec = v15::XlHeapUpdate::decode(buf);
631 8 : // the size of tuple data is inferred from the size of the record.
632 8 : // we can't validate the remaining number of bytes without parsing
633 8 : // the tuple data.
634 8 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
635 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
636 8 : }
637 8 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
638 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
639 0 : // non-HOT update where the new tuple goes to different page than
640 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
641 0 : // set.
642 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
643 8 : }
644 2 : } else if info == pg_constants::XLOG_HEAP_LOCK {
645 0 : let xlrec = v15::XlHeapLock::decode(buf);
646 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
647 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
648 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
649 0 : }
650 2 : }
651 188 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
652 188 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
653 188 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
654 42 : let xlrec = v15::XlHeapMultiInsert::decode(buf);
655 :
656 42 : let offset_array_len =
657 42 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
658 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
659 2 : 0
660 : } else {
661 40 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
662 : };
663 42 : assert_eq!(offset_array_len, buf.remaining());
664 :
665 42 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
666 8 : new_heap_blkno = Some(decoded.blocks[0].blkno);
667 34 : }
668 146 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
669 0 : let xlrec = v15::XlHeapLockUpdated::decode(buf);
670 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
671 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
672 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
673 0 : }
674 146 : }
675 : } else {
676 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
677 : }
678 : }
679 : 16 => {
680 0 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
681 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
682 0 :
683 0 : if info == pg_constants::XLOG_HEAP_INSERT {
684 0 : let xlrec = v16::XlHeapInsert::decode(buf);
685 0 : assert_eq!(0, buf.remaining());
686 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
687 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
688 0 : }
689 0 : } else if info == pg_constants::XLOG_HEAP_DELETE {
690 0 : let xlrec = v16::XlHeapDelete::decode(buf);
691 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
692 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
693 0 : }
694 0 : } else if info == pg_constants::XLOG_HEAP_UPDATE
695 0 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
696 : {
697 0 : let xlrec = v16::XlHeapUpdate::decode(buf);
698 0 : // the size of tuple data is inferred from the size of the record.
699 0 : // we can't validate the remaining number of bytes without parsing
700 0 : // the tuple data.
701 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
702 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
703 0 : }
704 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
705 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
706 0 : // non-HOT update where the new tuple goes to different page than
707 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
708 0 : // set.
709 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
710 0 : }
711 0 : } else if info == pg_constants::XLOG_HEAP_LOCK {
712 0 : let xlrec = v16::XlHeapLock::decode(buf);
713 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
714 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
715 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
716 0 : }
717 0 : }
718 0 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
719 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
720 0 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
721 0 : let xlrec = v16::XlHeapMultiInsert::decode(buf);
722 :
723 0 : let offset_array_len =
724 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
725 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
726 0 : 0
727 : } else {
728 0 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
729 : };
730 0 : assert_eq!(offset_array_len, buf.remaining());
731 :
732 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
733 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
734 0 : }
735 0 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
736 0 : let xlrec = v16::XlHeapLockUpdated::decode(buf);
737 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
738 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
739 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
740 0 : }
741 0 : }
742 : } else {
743 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
744 : }
745 : }
746 0 : _ => {}
747 : }
748 :
749 : // Clear the VM bits if required.
750 145474 : if new_heap_blkno.is_some() || old_heap_blkno.is_some() {
751 12 : let vm_rel = RelTag {
752 12 : forknum: VISIBILITYMAP_FORKNUM,
753 12 : spcnode: decoded.blocks[0].rnode_spcnode,
754 12 : dbnode: decoded.blocks[0].rnode_dbnode,
755 12 : relnode: decoded.blocks[0].rnode_relnode,
756 12 : };
757 12 :
758 12 : let mut new_vm_blk = new_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
759 12 : let mut old_vm_blk = old_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
760 :
761 : // Sometimes, Postgres seems to create heap WAL records with the
762 : // ALL_VISIBLE_CLEARED flag set, even though the bit in the VM page is
763 : // not set. In fact, it's possible that the VM page does not exist at all.
764 : // In that case, we don't want to store a record to clear the VM bit;
765 : // replaying it would fail to find the previous image of the page, because
766 : // it doesn't exist. So check if the VM page(s) exist, and skip the WAL
767 : // record if it doesn't.
768 12 : let vm_size = get_relsize(modification, vm_rel, ctx).await?;
769 12 : if let Some(blknum) = new_vm_blk {
770 12 : if blknum >= vm_size {
771 0 : new_vm_blk = None;
772 12 : }
773 0 : }
774 12 : if let Some(blknum) = old_vm_blk {
775 0 : if blknum >= vm_size {
776 0 : old_vm_blk = None;
777 0 : }
778 12 : }
779 :
780 12 : if new_vm_blk.is_some() || old_vm_blk.is_some() {
781 12 : if new_vm_blk == old_vm_blk {
782 : // An UPDATE record that needs to clear the bits for both old and the
783 : // new page, both of which reside on the same VM page.
784 0 : self.put_rel_wal_record(
785 0 : modification,
786 0 : vm_rel,
787 0 : new_vm_blk.unwrap(),
788 0 : NeonWalRecord::ClearVisibilityMapFlags {
789 0 : new_heap_blkno,
790 0 : old_heap_blkno,
791 0 : flags,
792 0 : },
793 0 : ctx,
794 0 : )
795 0 : .await?;
796 : } else {
797 : // Clear VM bits for one heap page, or for two pages that reside on
798 : // different VM pages.
799 12 : if let Some(new_vm_blk) = new_vm_blk {
800 12 : self.put_rel_wal_record(
801 12 : modification,
802 12 : vm_rel,
803 12 : new_vm_blk,
804 12 : NeonWalRecord::ClearVisibilityMapFlags {
805 12 : new_heap_blkno,
806 12 : old_heap_blkno: None,
807 12 : flags,
808 12 : },
809 12 : ctx,
810 12 : )
811 0 : .await?;
812 0 : }
813 12 : if let Some(old_vm_blk) = old_vm_blk {
814 0 : self.put_rel_wal_record(
815 0 : modification,
816 0 : vm_rel,
817 0 : old_vm_blk,
818 0 : NeonWalRecord::ClearVisibilityMapFlags {
819 0 : new_heap_blkno: None,
820 0 : old_heap_blkno,
821 0 : flags,
822 0 : },
823 0 : ctx,
824 0 : )
825 0 : .await?;
826 12 : }
827 : }
828 0 : }
829 145462 : }
830 :
831 145474 : Ok(())
832 145474 : }
833 :
834 0 : async fn ingest_neonrmgr_record(
835 0 : &mut self,
836 0 : buf: &mut Bytes,
837 0 : modification: &mut DatadirModification<'_>,
838 0 : decoded: &DecodedWALRecord,
839 0 : ctx: &RequestContext,
840 0 : ) -> anyhow::Result<()> {
841 0 : // Handle VM bit updates that are implicitly part of heap records.
842 0 :
843 0 : // First, look at the record to determine which VM bits need
844 0 : // to be cleared. If either of these variables is set, we
845 0 : // need to clear the corresponding bits in the visibility map.
846 0 : let mut new_heap_blkno: Option<u32> = None;
847 0 : let mut old_heap_blkno: Option<u32> = None;
848 0 : let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS;
849 0 : let pg_version = modification.tline.pg_version;
850 0 :
851 0 : assert_eq!(decoded.xl_rmid, pg_constants::RM_NEON_ID);
852 :
853 0 : match pg_version {
854 : 16 => {
855 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
856 0 :
857 0 : match info {
858 : pg_constants::XLOG_NEON_HEAP_INSERT => {
859 0 : let xlrec = v16::rm_neon::XlNeonHeapInsert::decode(buf);
860 0 : assert_eq!(0, buf.remaining());
861 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
862 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
863 0 : }
864 : }
865 : pg_constants::XLOG_NEON_HEAP_DELETE => {
866 0 : let xlrec = v16::rm_neon::XlNeonHeapDelete::decode(buf);
867 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
868 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
869 0 : }
870 : }
871 : pg_constants::XLOG_NEON_HEAP_UPDATE
872 : | pg_constants::XLOG_NEON_HEAP_HOT_UPDATE => {
873 0 : let xlrec = v16::rm_neon::XlNeonHeapUpdate::decode(buf);
874 0 : // the size of tuple data is inferred from the size of the record.
875 0 : // we can't validate the remaining number of bytes without parsing
876 0 : // the tuple data.
877 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
878 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
879 0 : }
880 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
881 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
882 0 : // non-HOT update where the new tuple goes to different page than
883 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
884 0 : // set.
885 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
886 0 : }
887 : }
888 : pg_constants::XLOG_NEON_HEAP_MULTI_INSERT => {
889 0 : let xlrec = v16::rm_neon::XlNeonHeapMultiInsert::decode(buf);
890 :
891 0 : let offset_array_len =
892 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
893 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
894 0 : 0
895 : } else {
896 0 : std::mem::size_of::<u16>() * xlrec.ntuples as usize
897 : };
898 0 : assert_eq!(offset_array_len, buf.remaining());
899 :
900 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
901 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
902 0 : }
903 : }
904 : pg_constants::XLOG_NEON_HEAP_LOCK => {
905 0 : let xlrec = v16::rm_neon::XlNeonHeapLock::decode(buf);
906 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
907 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
908 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
909 0 : }
910 : }
911 0 : info => bail!("Unknown WAL record type for Neon RMGR: {}", info),
912 : }
913 : }
914 0 : _ => bail!(
915 0 : "Neon RMGR has no known compatibility with PostgreSQL version {}",
916 0 : pg_version
917 0 : ),
918 : }
919 :
920 : // Clear the VM bits if required.
921 0 : if new_heap_blkno.is_some() || old_heap_blkno.is_some() {
922 0 : let vm_rel = RelTag {
923 0 : forknum: VISIBILITYMAP_FORKNUM,
924 0 : spcnode: decoded.blocks[0].rnode_spcnode,
925 0 : dbnode: decoded.blocks[0].rnode_dbnode,
926 0 : relnode: decoded.blocks[0].rnode_relnode,
927 0 : };
928 0 :
929 0 : let mut new_vm_blk = new_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
930 0 : let mut old_vm_blk = old_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
931 :
932 : // Sometimes, Postgres seems to create heap WAL records with the
933 : // ALL_VISIBLE_CLEARED flag set, even though the bit in the VM page is
934 : // not set. In fact, it's possible that the VM page does not exist at all.
935 : // In that case, we don't want to store a record to clear the VM bit;
936 : // replaying it would fail to find the previous image of the page, because
937 : // it doesn't exist. So check if the VM page(s) exist, and skip the WAL
938 : // record if it doesn't.
939 0 : let vm_size = get_relsize(modification, vm_rel, ctx).await?;
940 0 : if let Some(blknum) = new_vm_blk {
941 0 : if blknum >= vm_size {
942 0 : new_vm_blk = None;
943 0 : }
944 0 : }
945 0 : if let Some(blknum) = old_vm_blk {
946 0 : if blknum >= vm_size {
947 0 : old_vm_blk = None;
948 0 : }
949 0 : }
950 :
951 0 : if new_vm_blk.is_some() || old_vm_blk.is_some() {
952 0 : if new_vm_blk == old_vm_blk {
953 : // An UPDATE record that needs to clear the bits for both old and the
954 : // new page, both of which reside on the same VM page.
955 0 : self.put_rel_wal_record(
956 0 : modification,
957 0 : vm_rel,
958 0 : new_vm_blk.unwrap(),
959 0 : NeonWalRecord::ClearVisibilityMapFlags {
960 0 : new_heap_blkno,
961 0 : old_heap_blkno,
962 0 : flags,
963 0 : },
964 0 : ctx,
965 0 : )
966 0 : .await?;
967 : } else {
968 : // Clear VM bits for one heap page, or for two pages that reside on
969 : // different VM pages.
970 0 : if let Some(new_vm_blk) = new_vm_blk {
971 0 : self.put_rel_wal_record(
972 0 : modification,
973 0 : vm_rel,
974 0 : new_vm_blk,
975 0 : NeonWalRecord::ClearVisibilityMapFlags {
976 0 : new_heap_blkno,
977 0 : old_heap_blkno: None,
978 0 : flags,
979 0 : },
980 0 : ctx,
981 0 : )
982 0 : .await?;
983 0 : }
984 0 : if let Some(old_vm_blk) = old_vm_blk {
985 0 : self.put_rel_wal_record(
986 0 : modification,
987 0 : vm_rel,
988 0 : old_vm_blk,
989 0 : NeonWalRecord::ClearVisibilityMapFlags {
990 0 : new_heap_blkno: None,
991 0 : old_heap_blkno,
992 0 : flags,
993 0 : },
994 0 : ctx,
995 0 : )
996 0 : .await?;
997 0 : }
998 : }
999 0 : }
1000 0 : }
1001 :
1002 0 : Ok(())
1003 0 : }
1004 :
1005 : /// Subroutine of ingest_record(), to handle an XLOG_DBASE_CREATE record.
1006 0 : async fn ingest_xlog_dbase_create(
1007 0 : &mut self,
1008 0 : modification: &mut DatadirModification<'_>,
1009 0 : rec: &XlCreateDatabase,
1010 0 : ctx: &RequestContext,
1011 0 : ) -> anyhow::Result<()> {
1012 0 : let db_id = rec.db_id;
1013 0 : let tablespace_id = rec.tablespace_id;
1014 0 : let src_db_id = rec.src_db_id;
1015 0 : let src_tablespace_id = rec.src_tablespace_id;
1016 :
1017 0 : let rels = modification
1018 0 : .tline
1019 0 : .list_rels(
1020 0 : src_tablespace_id,
1021 0 : src_db_id,
1022 0 : Version::Modified(modification),
1023 0 : ctx,
1024 0 : )
1025 0 : .await?;
1026 :
1027 0 : debug!("ingest_xlog_dbase_create: {} rels", rels.len());
1028 :
1029 : // Copy relfilemap
1030 0 : let filemap = modification
1031 0 : .tline
1032 0 : .get_relmap_file(
1033 0 : src_tablespace_id,
1034 0 : src_db_id,
1035 0 : Version::Modified(modification),
1036 0 : ctx,
1037 0 : )
1038 0 : .await?;
1039 0 : modification
1040 0 : .put_relmap_file(tablespace_id, db_id, filemap, ctx)
1041 0 : .await?;
1042 :
1043 0 : let mut num_rels_copied = 0;
1044 0 : let mut num_blocks_copied = 0;
1045 0 : for src_rel in rels {
1046 0 : assert_eq!(src_rel.spcnode, src_tablespace_id);
1047 0 : assert_eq!(src_rel.dbnode, src_db_id);
1048 :
1049 0 : let nblocks = modification
1050 0 : .tline
1051 0 : .get_rel_size(src_rel, Version::Modified(modification), ctx)
1052 0 : .await?;
1053 0 : let dst_rel = RelTag {
1054 0 : spcnode: tablespace_id,
1055 0 : dbnode: db_id,
1056 0 : relnode: src_rel.relnode,
1057 0 : forknum: src_rel.forknum,
1058 0 : };
1059 0 :
1060 0 : modification.put_rel_creation(dst_rel, nblocks, ctx).await?;
1061 :
1062 : // Copy content
1063 0 : debug!("copying rel {} to {}, {} blocks", src_rel, dst_rel, nblocks);
1064 0 : for blknum in 0..nblocks {
1065 : // Sharding:
1066 : // - src and dst are always on the same shard, because they differ only by dbNode, and
1067 : // dbNode is not included in the hash inputs for sharding.
1068 : // - This WAL command is replayed on all shards, but each shard only copies the blocks
1069 : // that belong to it.
1070 0 : let src_key = rel_block_to_key(src_rel, blknum);
1071 0 : if !self.shard.is_key_local(&src_key) {
1072 0 : debug!(
1073 0 : "Skipping non-local key {} during XLOG_DBASE_CREATE",
1074 : src_key
1075 : );
1076 0 : continue;
1077 0 : }
1078 0 : debug!(
1079 0 : "copying block {} from {} ({}) to {}",
1080 : blknum, src_rel, src_key, dst_rel
1081 : );
1082 :
1083 0 : let content = modification
1084 0 : .tline
1085 0 : .get_rel_page_at_lsn(src_rel, blknum, Version::Modified(modification), ctx)
1086 0 : .await?;
1087 0 : modification.put_rel_page_image(dst_rel, blknum, content)?;
1088 0 : num_blocks_copied += 1;
1089 : }
1090 :
1091 0 : num_rels_copied += 1;
1092 : }
1093 :
1094 0 : info!(
1095 0 : "Created database {}/{}, copied {} blocks in {} rels",
1096 : tablespace_id, db_id, num_blocks_copied, num_rels_copied
1097 : );
1098 0 : Ok(())
1099 0 : }
1100 :
1101 16 : async fn ingest_xlog_smgr_create(
1102 16 : &mut self,
1103 16 : modification: &mut DatadirModification<'_>,
1104 16 : rec: &XlSmgrCreate,
1105 16 : ctx: &RequestContext,
1106 16 : ) -> anyhow::Result<()> {
1107 16 : let rel = RelTag {
1108 16 : spcnode: rec.rnode.spcnode,
1109 16 : dbnode: rec.rnode.dbnode,
1110 16 : relnode: rec.rnode.relnode,
1111 16 : forknum: rec.forknum,
1112 16 : };
1113 16 : self.put_rel_creation(modification, rel, ctx).await?;
1114 16 : Ok(())
1115 16 : }
1116 :
1117 : /// Subroutine of ingest_record(), to handle an XLOG_SMGR_TRUNCATE record.
1118 : ///
1119 : /// This is the same logic as in PostgreSQL's smgr_redo() function.
1120 0 : async fn ingest_xlog_smgr_truncate(
1121 0 : &mut self,
1122 0 : modification: &mut DatadirModification<'_>,
1123 0 : rec: &XlSmgrTruncate,
1124 0 : ctx: &RequestContext,
1125 0 : ) -> anyhow::Result<()> {
1126 0 : let spcnode = rec.rnode.spcnode;
1127 0 : let dbnode = rec.rnode.dbnode;
1128 0 : let relnode = rec.rnode.relnode;
1129 0 :
1130 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_HEAP) != 0 {
1131 0 : let rel = RelTag {
1132 0 : spcnode,
1133 0 : dbnode,
1134 0 : relnode,
1135 0 : forknum: MAIN_FORKNUM,
1136 0 : };
1137 0 : self.put_rel_truncation(modification, rel, rec.blkno, ctx)
1138 0 : .await?;
1139 0 : }
1140 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_FSM) != 0 {
1141 0 : let rel = RelTag {
1142 0 : spcnode,
1143 0 : dbnode,
1144 0 : relnode,
1145 0 : forknum: FSM_FORKNUM,
1146 0 : };
1147 0 :
1148 0 : let fsm_logical_page_no = rec.blkno / pg_constants::SLOTS_PER_FSM_PAGE;
1149 0 : let mut fsm_physical_page_no = fsm_logical_to_physical(fsm_logical_page_no);
1150 0 : if rec.blkno % pg_constants::SLOTS_PER_FSM_PAGE != 0 {
1151 : // Tail of last remaining FSM page has to be zeroed.
1152 : // We are not precise here and instead of digging in FSM bitmap format just clear the whole page.
1153 0 : modification.put_rel_page_image(rel, fsm_physical_page_no, ZERO_PAGE.clone())?;
1154 0 : fsm_physical_page_no += 1;
1155 0 : }
1156 0 : let nblocks = get_relsize(modification, rel, ctx).await?;
1157 0 : if nblocks > fsm_physical_page_no {
1158 : // check if something to do: FSM is larger than truncate position
1159 0 : self.put_rel_truncation(modification, rel, fsm_physical_page_no, ctx)
1160 0 : .await?;
1161 0 : }
1162 0 : }
1163 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_VM) != 0 {
1164 0 : let rel = RelTag {
1165 0 : spcnode,
1166 0 : dbnode,
1167 0 : relnode,
1168 0 : forknum: VISIBILITYMAP_FORKNUM,
1169 0 : };
1170 0 :
1171 0 : let mut vm_page_no = rec.blkno / pg_constants::VM_HEAPBLOCKS_PER_PAGE;
1172 0 : if rec.blkno % pg_constants::VM_HEAPBLOCKS_PER_PAGE != 0 {
1173 : // Tail of last remaining vm page has to be zeroed.
1174 : // We are not precise here and instead of digging in VM bitmap format just clear the whole page.
1175 0 : modification.put_rel_page_image(rel, vm_page_no, ZERO_PAGE.clone())?;
1176 0 : vm_page_no += 1;
1177 0 : }
1178 0 : let nblocks = get_relsize(modification, rel, ctx).await?;
1179 0 : if nblocks > vm_page_no {
1180 : // check if something to do: VM is larger than truncate position
1181 0 : self.put_rel_truncation(modification, rel, vm_page_no, ctx)
1182 0 : .await?;
1183 0 : }
1184 0 : }
1185 0 : Ok(())
1186 0 : }
1187 :
1188 : /// Subroutine of ingest_record(), to handle an XLOG_XACT_* records.
1189 : ///
1190 8 : async fn ingest_xact_record(
1191 8 : &mut self,
1192 8 : modification: &mut DatadirModification<'_>,
1193 8 : parsed: &XlXactParsedRecord,
1194 8 : is_commit: bool,
1195 8 : origin_id: u16,
1196 8 : ctx: &RequestContext,
1197 8 : ) -> anyhow::Result<()> {
1198 8 : // Record update of CLOG pages
1199 8 : let mut pageno = parsed.xid / pg_constants::CLOG_XACTS_PER_PAGE;
1200 8 : let mut segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1201 8 : let mut rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1202 8 : let mut page_xids: Vec<TransactionId> = vec![parsed.xid];
1203 :
1204 8 : for subxact in &parsed.subxacts {
1205 0 : let subxact_pageno = subxact / pg_constants::CLOG_XACTS_PER_PAGE;
1206 0 : if subxact_pageno != pageno {
1207 : // This subxact goes to different page. Write the record
1208 : // for all the XIDs on the previous page, and continue
1209 : // accumulating XIDs on this new page.
1210 0 : modification.put_slru_wal_record(
1211 0 : SlruKind::Clog,
1212 0 : segno,
1213 0 : rpageno,
1214 0 : if is_commit {
1215 0 : NeonWalRecord::ClogSetCommitted {
1216 0 : xids: page_xids,
1217 0 : timestamp: parsed.xact_time,
1218 0 : }
1219 : } else {
1220 0 : NeonWalRecord::ClogSetAborted { xids: page_xids }
1221 : },
1222 0 : )?;
1223 0 : page_xids = Vec::new();
1224 0 : }
1225 0 : pageno = subxact_pageno;
1226 0 : segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1227 0 : rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1228 0 : page_xids.push(*subxact);
1229 : }
1230 8 : modification.put_slru_wal_record(
1231 8 : SlruKind::Clog,
1232 8 : segno,
1233 8 : rpageno,
1234 8 : if is_commit {
1235 8 : NeonWalRecord::ClogSetCommitted {
1236 8 : xids: page_xids,
1237 8 : timestamp: parsed.xact_time,
1238 8 : }
1239 : } else {
1240 0 : NeonWalRecord::ClogSetAborted { xids: page_xids }
1241 : },
1242 0 : )?;
1243 :
1244 8 : for xnode in &parsed.xnodes {
1245 0 : for forknum in MAIN_FORKNUM..=INIT_FORKNUM {
1246 0 : let rel = RelTag {
1247 0 : forknum,
1248 0 : spcnode: xnode.spcnode,
1249 0 : dbnode: xnode.dbnode,
1250 0 : relnode: xnode.relnode,
1251 0 : };
1252 0 : if modification
1253 0 : .tline
1254 0 : .get_rel_exists(rel, Version::Modified(modification), ctx)
1255 0 : .await?
1256 : {
1257 0 : self.put_rel_drop(modification, rel, ctx).await?;
1258 0 : }
1259 : }
1260 : }
1261 8 : if origin_id != 0 {
1262 0 : modification
1263 0 : .set_replorigin(origin_id, parsed.origin_lsn)
1264 0 : .await?;
1265 8 : }
1266 8 : Ok(())
1267 8 : }
1268 :
1269 0 : async fn ingest_clog_truncate_record(
1270 0 : &mut self,
1271 0 : modification: &mut DatadirModification<'_>,
1272 0 : xlrec: &XlClogTruncate,
1273 0 : ctx: &RequestContext,
1274 0 : ) -> anyhow::Result<()> {
1275 0 : info!(
1276 0 : "RM_CLOG_ID truncate pageno {} oldestXid {} oldestXidDB {}",
1277 : xlrec.pageno, xlrec.oldest_xid, xlrec.oldest_xid_db
1278 : );
1279 :
1280 : // Here we treat oldestXid and oldestXidDB
1281 : // differently from postgres redo routines.
1282 : // In postgres checkpoint.oldestXid lags behind xlrec.oldest_xid
1283 : // until checkpoint happens and updates the value.
1284 : // Here we can use the most recent value.
1285 : // It's just an optimization, though and can be deleted.
1286 : // TODO Figure out if there will be any issues with replica.
1287 0 : self.checkpoint.oldestXid = xlrec.oldest_xid;
1288 0 : self.checkpoint.oldestXidDB = xlrec.oldest_xid_db;
1289 0 : self.checkpoint_modified = true;
1290 0 :
1291 0 : // TODO Treat AdvanceOldestClogXid() or write a comment why we don't need it
1292 0 :
1293 0 : let latest_page_number =
1294 0 : self.checkpoint.nextXid.value as u32 / pg_constants::CLOG_XACTS_PER_PAGE;
1295 0 :
1296 0 : // Now delete all segments containing pages between xlrec.pageno
1297 0 : // and latest_page_number.
1298 0 :
1299 0 : // First, make an important safety check:
1300 0 : // the current endpoint page must not be eligible for removal.
1301 0 : // See SimpleLruTruncate() in slru.c
1302 0 : if clogpage_precedes(latest_page_number, xlrec.pageno) {
1303 0 : info!("could not truncate directory pg_xact apparent wraparound");
1304 0 : return Ok(());
1305 0 : }
1306 :
1307 : // Iterate via SLRU CLOG segments and drop segments that we're ready to truncate
1308 : //
1309 : // We cannot pass 'lsn' to the Timeline.list_nonrels(), or it
1310 : // will block waiting for the last valid LSN to advance up to
1311 : // it. So we use the previous record's LSN in the get calls
1312 : // instead.
1313 0 : for segno in modification
1314 0 : .tline
1315 0 : .list_slru_segments(SlruKind::Clog, Version::Modified(modification), ctx)
1316 0 : .await?
1317 : {
1318 0 : let segpage = segno * pg_constants::SLRU_PAGES_PER_SEGMENT;
1319 0 : if slru_may_delete_clogsegment(segpage, xlrec.pageno) {
1320 0 : modification
1321 0 : .drop_slru_segment(SlruKind::Clog, segno, ctx)
1322 0 : .await?;
1323 0 : trace!("Drop CLOG segment {:>04X}", segno);
1324 0 : }
1325 : }
1326 :
1327 0 : Ok(())
1328 0 : }
1329 :
1330 0 : fn ingest_multixact_create_record(
1331 0 : &mut self,
1332 0 : modification: &mut DatadirModification,
1333 0 : xlrec: &XlMultiXactCreate,
1334 0 : ) -> Result<()> {
1335 0 : // Create WAL record for updating the multixact-offsets page
1336 0 : let pageno = xlrec.mid / pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32;
1337 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1338 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1339 0 :
1340 0 : modification.put_slru_wal_record(
1341 0 : SlruKind::MultiXactOffsets,
1342 0 : segno,
1343 0 : rpageno,
1344 0 : NeonWalRecord::MultixactOffsetCreate {
1345 0 : mid: xlrec.mid,
1346 0 : moff: xlrec.moff,
1347 0 : },
1348 0 : )?;
1349 :
1350 : // Create WAL records for the update of each affected multixact-members page
1351 0 : let mut members = xlrec.members.iter();
1352 0 : let mut offset = xlrec.moff;
1353 : loop {
1354 0 : let pageno = offset / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
1355 0 :
1356 0 : // How many members fit on this page?
1357 0 : let page_remain = pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32
1358 0 : - offset % pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
1359 0 :
1360 0 : let mut this_page_members: Vec<MultiXactMember> = Vec::new();
1361 0 : for _ in 0..page_remain {
1362 0 : if let Some(m) = members.next() {
1363 0 : this_page_members.push(m.clone());
1364 0 : } else {
1365 0 : break;
1366 : }
1367 : }
1368 0 : if this_page_members.is_empty() {
1369 : // all done
1370 0 : break;
1371 0 : }
1372 0 : let n_this_page = this_page_members.len();
1373 0 :
1374 0 : modification.put_slru_wal_record(
1375 0 : SlruKind::MultiXactMembers,
1376 0 : pageno / pg_constants::SLRU_PAGES_PER_SEGMENT,
1377 0 : pageno % pg_constants::SLRU_PAGES_PER_SEGMENT,
1378 0 : NeonWalRecord::MultixactMembersCreate {
1379 0 : moff: offset,
1380 0 : members: this_page_members,
1381 0 : },
1382 0 : )?;
1383 :
1384 : // Note: The multixact members can wrap around, even within one WAL record.
1385 0 : offset = offset.wrapping_add(n_this_page as u32);
1386 : }
1387 0 : if xlrec.mid >= self.checkpoint.nextMulti {
1388 0 : self.checkpoint.nextMulti = xlrec.mid + 1;
1389 0 : self.checkpoint_modified = true;
1390 0 : }
1391 0 : if xlrec.moff + xlrec.nmembers > self.checkpoint.nextMultiOffset {
1392 0 : self.checkpoint.nextMultiOffset = xlrec.moff + xlrec.nmembers;
1393 0 : self.checkpoint_modified = true;
1394 0 : }
1395 0 : let max_mbr_xid = xlrec.members.iter().fold(None, |acc, mbr| {
1396 0 : if let Some(max_xid) = acc {
1397 0 : if mbr.xid.wrapping_sub(max_xid) as i32 > 0 {
1398 0 : Some(mbr.xid)
1399 : } else {
1400 0 : acc
1401 : }
1402 : } else {
1403 0 : Some(mbr.xid)
1404 : }
1405 0 : });
1406 :
1407 0 : if let Some(max_xid) = max_mbr_xid {
1408 0 : if self.checkpoint.update_next_xid(max_xid) {
1409 0 : self.checkpoint_modified = true;
1410 0 : }
1411 0 : }
1412 0 : Ok(())
1413 0 : }
1414 :
1415 0 : async fn ingest_multixact_truncate_record(
1416 0 : &mut self,
1417 0 : modification: &mut DatadirModification<'_>,
1418 0 : xlrec: &XlMultiXactTruncate,
1419 0 : ctx: &RequestContext,
1420 0 : ) -> Result<()> {
1421 0 : self.checkpoint.oldestMulti = xlrec.end_trunc_off;
1422 0 : self.checkpoint.oldestMultiDB = xlrec.oldest_multi_db;
1423 0 : self.checkpoint_modified = true;
1424 0 :
1425 0 : // PerformMembersTruncation
1426 0 : let maxsegment: i32 = mx_offset_to_member_segment(pg_constants::MAX_MULTIXACT_OFFSET);
1427 0 : let startsegment: i32 = mx_offset_to_member_segment(xlrec.start_trunc_memb);
1428 0 : let endsegment: i32 = mx_offset_to_member_segment(xlrec.end_trunc_memb);
1429 0 : let mut segment: i32 = startsegment;
1430 :
1431 : // Delete all the segments except the last one. The last segment can still
1432 : // contain, possibly partially, valid data.
1433 0 : while segment != endsegment {
1434 0 : modification
1435 0 : .drop_slru_segment(SlruKind::MultiXactMembers, segment as u32, ctx)
1436 0 : .await?;
1437 :
1438 : /* move to next segment, handling wraparound correctly */
1439 0 : if segment == maxsegment {
1440 0 : segment = 0;
1441 0 : } else {
1442 0 : segment += 1;
1443 0 : }
1444 : }
1445 :
1446 : // Truncate offsets
1447 : // FIXME: this did not handle wraparound correctly
1448 :
1449 0 : Ok(())
1450 0 : }
1451 :
1452 0 : async fn ingest_relmap_page(
1453 0 : &mut self,
1454 0 : modification: &mut DatadirModification<'_>,
1455 0 : xlrec: &XlRelmapUpdate,
1456 0 : decoded: &DecodedWALRecord,
1457 0 : ctx: &RequestContext,
1458 0 : ) -> Result<()> {
1459 0 : let mut buf = decoded.record.clone();
1460 0 : buf.advance(decoded.main_data_offset);
1461 0 : // skip xl_relmap_update
1462 0 : buf.advance(12);
1463 0 :
1464 0 : modification
1465 0 : .put_relmap_file(
1466 0 : xlrec.tsid,
1467 0 : xlrec.dbid,
1468 0 : Bytes::copy_from_slice(&buf[..]),
1469 0 : ctx,
1470 0 : )
1471 0 : .await
1472 0 : }
1473 :
1474 18 : async fn put_rel_creation(
1475 18 : &mut self,
1476 18 : modification: &mut DatadirModification<'_>,
1477 18 : rel: RelTag,
1478 18 : ctx: &RequestContext,
1479 18 : ) -> Result<()> {
1480 18 : modification.put_rel_creation(rel, 0, ctx).await?;
1481 18 : Ok(())
1482 18 : }
1483 :
1484 272426 : async fn put_rel_page_image(
1485 272426 : &mut self,
1486 272426 : modification: &mut DatadirModification<'_>,
1487 272426 : rel: RelTag,
1488 272426 : blknum: BlockNumber,
1489 272426 : img: Bytes,
1490 272426 : ctx: &RequestContext,
1491 272426 : ) -> Result<(), PageReconstructError> {
1492 272426 : self.handle_rel_extend(modification, rel, blknum, ctx)
1493 7219 : .await?;
1494 272426 : modification.put_rel_page_image(rel, blknum, img)?;
1495 272426 : Ok(())
1496 272426 : }
1497 :
1498 145630 : async fn put_rel_wal_record(
1499 145630 : &mut self,
1500 145630 : modification: &mut DatadirModification<'_>,
1501 145630 : rel: RelTag,
1502 145630 : blknum: BlockNumber,
1503 145630 : rec: NeonWalRecord,
1504 145630 : ctx: &RequestContext,
1505 145630 : ) -> Result<()> {
1506 145630 : self.handle_rel_extend(modification, rel, blknum, ctx)
1507 62 : .await?;
1508 145630 : modification.put_rel_wal_record(rel, blknum, rec)?;
1509 145630 : Ok(())
1510 145630 : }
1511 :
1512 6012 : async fn put_rel_truncation(
1513 6012 : &mut self,
1514 6012 : modification: &mut DatadirModification<'_>,
1515 6012 : rel: RelTag,
1516 6012 : nblocks: BlockNumber,
1517 6012 : ctx: &RequestContext,
1518 6012 : ) -> anyhow::Result<()> {
1519 6012 : modification.put_rel_truncation(rel, nblocks, ctx).await?;
1520 6012 : Ok(())
1521 6012 : }
1522 :
1523 2 : async fn put_rel_drop(
1524 2 : &mut self,
1525 2 : modification: &mut DatadirModification<'_>,
1526 2 : rel: RelTag,
1527 2 : ctx: &RequestContext,
1528 2 : ) -> Result<()> {
1529 2 : modification.put_rel_drop(rel, ctx).await?;
1530 2 : Ok(())
1531 2 : }
1532 :
1533 418056 : async fn handle_rel_extend(
1534 418056 : &mut self,
1535 418056 : modification: &mut DatadirModification<'_>,
1536 418056 : rel: RelTag,
1537 418056 : blknum: BlockNumber,
1538 418056 : ctx: &RequestContext,
1539 418056 : ) -> Result<(), PageReconstructError> {
1540 418056 : let new_nblocks = blknum + 1;
1541 : // Check if the relation exists. We implicitly create relations on first
1542 : // record.
1543 : // TODO: would be nice if to be more explicit about it
1544 :
1545 : // Get current size and put rel creation if rel doesn't exist
1546 : //
1547 : // NOTE: we check the cache first even though get_rel_exists and get_rel_size would
1548 : // check the cache too. This is because eagerly checking the cache results in
1549 : // less work overall and 10% better performance. It's more work on cache miss
1550 : // but cache miss is rare.
1551 418056 : let old_nblocks = if let Some(nblocks) = modification
1552 418056 : .tline
1553 418056 : .get_cached_rel_size(&rel, modification.get_lsn())
1554 : {
1555 418046 : nblocks
1556 10 : } else if !modification
1557 10 : .tline
1558 10 : .get_rel_exists(rel, Version::Modified(modification), ctx)
1559 0 : .await?
1560 : {
1561 : // create it with 0 size initially, the logic below will extend it
1562 10 : modification
1563 10 : .put_rel_creation(rel, 0, ctx)
1564 0 : .await
1565 10 : .context("Relation Error")?;
1566 10 : 0
1567 : } else {
1568 0 : modification
1569 0 : .tline
1570 0 : .get_rel_size(rel, Version::Modified(modification), ctx)
1571 0 : .await?
1572 : };
1573 :
1574 418056 : if new_nblocks > old_nblocks {
1575 : //info!("extending {} {} to {}", rel, old_nblocks, new_nblocks);
1576 274788 : modification.put_rel_extend(rel, new_nblocks, ctx).await?;
1577 :
1578 274788 : let mut key = rel_block_to_key(rel, blknum);
1579 : // fill the gap with zeros
1580 274788 : for gap_blknum in old_nblocks..blknum {
1581 2998 : key.field6 = gap_blknum;
1582 2998 :
1583 2998 : if self.shard.get_shard_number(&key) != self.shard.number {
1584 0 : continue;
1585 2998 : }
1586 2998 :
1587 2998 : modification.put_rel_page_image(rel, gap_blknum, ZERO_PAGE.clone())?;
1588 : }
1589 143268 : }
1590 418056 : Ok(())
1591 418056 : }
1592 :
1593 0 : async fn put_slru_page_image(
1594 0 : &mut self,
1595 0 : modification: &mut DatadirModification<'_>,
1596 0 : kind: SlruKind,
1597 0 : segno: u32,
1598 0 : blknum: BlockNumber,
1599 0 : img: Bytes,
1600 0 : ctx: &RequestContext,
1601 0 : ) -> Result<()> {
1602 0 : self.handle_slru_extend(modification, kind, segno, blknum, ctx)
1603 0 : .await?;
1604 0 : modification.put_slru_page_image(kind, segno, blknum, img)?;
1605 0 : Ok(())
1606 0 : }
1607 :
1608 0 : async fn handle_slru_extend(
1609 0 : &mut self,
1610 0 : modification: &mut DatadirModification<'_>,
1611 0 : kind: SlruKind,
1612 0 : segno: u32,
1613 0 : blknum: BlockNumber,
1614 0 : ctx: &RequestContext,
1615 0 : ) -> anyhow::Result<()> {
1616 0 : // we don't use a cache for this like we do for relations. SLRUS are explcitly
1617 0 : // extended with ZEROPAGE records, not with commit records, so it happens
1618 0 : // a lot less frequently.
1619 0 :
1620 0 : let new_nblocks = blknum + 1;
1621 : // Check if the relation exists. We implicitly create relations on first
1622 : // record.
1623 : // TODO: would be nice if to be more explicit about it
1624 0 : let old_nblocks = if !modification
1625 0 : .tline
1626 0 : .get_slru_segment_exists(kind, segno, Version::Modified(modification), ctx)
1627 0 : .await?
1628 : {
1629 : // create it with 0 size initially, the logic below will extend it
1630 0 : modification
1631 0 : .put_slru_segment_creation(kind, segno, 0, ctx)
1632 0 : .await?;
1633 0 : 0
1634 : } else {
1635 0 : modification
1636 0 : .tline
1637 0 : .get_slru_segment_size(kind, segno, Version::Modified(modification), ctx)
1638 0 : .await?
1639 : };
1640 :
1641 0 : if new_nblocks > old_nblocks {
1642 0 : trace!(
1643 0 : "extending SLRU {:?} seg {} from {} to {} blocks",
1644 : kind,
1645 : segno,
1646 : old_nblocks,
1647 : new_nblocks
1648 : );
1649 0 : modification.put_slru_extend(kind, segno, new_nblocks)?;
1650 :
1651 : // fill the gap with zeros
1652 0 : for gap_blknum in old_nblocks..blknum {
1653 0 : modification.put_slru_page_image(kind, segno, gap_blknum, ZERO_PAGE.clone())?;
1654 : }
1655 0 : }
1656 0 : Ok(())
1657 0 : }
1658 : }
1659 :
1660 12 : async fn get_relsize(
1661 12 : modification: &DatadirModification<'_>,
1662 12 : rel: RelTag,
1663 12 : ctx: &RequestContext,
1664 12 : ) -> anyhow::Result<BlockNumber> {
1665 12 : let nblocks = if !modification
1666 12 : .tline
1667 12 : .get_rel_exists(rel, Version::Modified(modification), ctx)
1668 0 : .await?
1669 : {
1670 0 : 0
1671 : } else {
1672 12 : modification
1673 12 : .tline
1674 12 : .get_rel_size(rel, Version::Modified(modification), ctx)
1675 0 : .await?
1676 : };
1677 12 : Ok(nblocks)
1678 12 : }
1679 :
1680 : #[allow(clippy::bool_assert_comparison)]
1681 : #[cfg(test)]
1682 : mod tests {
1683 : use super::*;
1684 : use crate::tenant::harness::*;
1685 : use crate::tenant::remote_timeline_client::{remote_initdb_archive_path, INITDB_PATH};
1686 : use postgres_ffi::RELSEG_SIZE;
1687 :
1688 : use crate::DEFAULT_PG_VERSION;
1689 :
1690 : /// Arbitrary relation tag, for testing.
1691 : const TESTREL_A: RelTag = RelTag {
1692 : spcnode: 0,
1693 : dbnode: 111,
1694 : relnode: 1000,
1695 : forknum: 0,
1696 : };
1697 :
1698 12 : fn assert_current_logical_size(_timeline: &Timeline, _lsn: Lsn) {
1699 12 : // TODO
1700 12 : }
1701 :
1702 : static ZERO_CHECKPOINT: Bytes = Bytes::from_static(&[0u8; SIZEOF_CHECKPOINT]);
1703 :
1704 8 : async fn init_walingest_test(tline: &Timeline, ctx: &RequestContext) -> Result<WalIngest> {
1705 8 : let mut m = tline.begin_modification(Lsn(0x10));
1706 8 : m.put_checkpoint(ZERO_CHECKPOINT.clone())?;
1707 16 : m.put_relmap_file(0, 111, Bytes::from(""), ctx).await?; // dummy relmapper file
1708 8 : m.commit(ctx).await?;
1709 8 : let walingest = WalIngest::new(tline, Lsn(0x10), ctx).await?;
1710 :
1711 8 : Ok(walingest)
1712 8 : }
1713 :
1714 : #[tokio::test]
1715 2 : async fn test_relsize() -> Result<()> {
1716 8 : let (tenant, ctx) = TenantHarness::create("test_relsize")?.load().await;
1717 2 : let tline = tenant
1718 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
1719 6 : .await?;
1720 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
1721 2 :
1722 2 : let mut m = tline.begin_modification(Lsn(0x20));
1723 2 : walingest.put_rel_creation(&mut m, TESTREL_A, &ctx).await?;
1724 2 : walingest
1725 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 2"), &ctx)
1726 2 : .await?;
1727 2 : m.commit(&ctx).await?;
1728 2 : let mut m = tline.begin_modification(Lsn(0x30));
1729 2 : walingest
1730 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 3"), &ctx)
1731 2 : .await?;
1732 2 : m.commit(&ctx).await?;
1733 2 : let mut m = tline.begin_modification(Lsn(0x40));
1734 2 : walingest
1735 2 : .put_rel_page_image(&mut m, TESTREL_A, 1, test_img("foo blk 1 at 4"), &ctx)
1736 2 : .await?;
1737 2 : m.commit(&ctx).await?;
1738 2 : let mut m = tline.begin_modification(Lsn(0x50));
1739 2 : walingest
1740 2 : .put_rel_page_image(&mut m, TESTREL_A, 2, test_img("foo blk 2 at 5"), &ctx)
1741 2 : .await?;
1742 2 : m.commit(&ctx).await?;
1743 2 :
1744 2 : assert_current_logical_size(&tline, Lsn(0x50));
1745 2 :
1746 2 : // The relation was created at LSN 2, not visible at LSN 1 yet.
1747 2 : assert_eq!(
1748 2 : tline
1749 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x10)), &ctx)
1750 2 : .await?,
1751 2 : false
1752 2 : );
1753 2 : assert!(tline
1754 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x10)), &ctx)
1755 2 : .await
1756 2 : .is_err());
1757 2 : assert_eq!(
1758 2 : tline
1759 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
1760 2 : .await?,
1761 2 : true
1762 2 : );
1763 2 : assert_eq!(
1764 2 : tline
1765 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
1766 2 : .await?,
1767 2 : 1
1768 2 : );
1769 2 : assert_eq!(
1770 2 : tline
1771 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), &ctx)
1772 2 : .await?,
1773 2 : 3
1774 2 : );
1775 2 :
1776 2 : // Check page contents at each LSN
1777 2 : assert_eq!(
1778 2 : tline
1779 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x20)), &ctx)
1780 2 : .await?,
1781 2 : test_img("foo blk 0 at 2")
1782 2 : );
1783 2 :
1784 2 : assert_eq!(
1785 2 : tline
1786 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x30)), &ctx)
1787 2 : .await?,
1788 2 : test_img("foo blk 0 at 3")
1789 2 : );
1790 2 :
1791 2 : assert_eq!(
1792 2 : tline
1793 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x40)), &ctx)
1794 2 : .await?,
1795 2 : test_img("foo blk 0 at 3")
1796 2 : );
1797 2 : assert_eq!(
1798 2 : tline
1799 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x40)), &ctx)
1800 2 : .await?,
1801 2 : test_img("foo blk 1 at 4")
1802 2 : );
1803 2 :
1804 2 : assert_eq!(
1805 2 : tline
1806 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x50)), &ctx)
1807 2 : .await?,
1808 2 : test_img("foo blk 0 at 3")
1809 2 : );
1810 2 : assert_eq!(
1811 2 : tline
1812 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x50)), &ctx)
1813 2 : .await?,
1814 2 : test_img("foo blk 1 at 4")
1815 2 : );
1816 2 : assert_eq!(
1817 2 : tline
1818 2 : .get_rel_page_at_lsn(TESTREL_A, 2, Version::Lsn(Lsn(0x50)), &ctx)
1819 2 : .await?,
1820 2 : test_img("foo blk 2 at 5")
1821 2 : );
1822 2 :
1823 2 : // Truncate last block
1824 2 : let mut m = tline.begin_modification(Lsn(0x60));
1825 2 : walingest
1826 2 : .put_rel_truncation(&mut m, TESTREL_A, 2, &ctx)
1827 2 : .await?;
1828 2 : m.commit(&ctx).await?;
1829 2 : assert_current_logical_size(&tline, Lsn(0x60));
1830 2 :
1831 2 : // Check reported size and contents after truncation
1832 2 : assert_eq!(
1833 2 : tline
1834 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x60)), &ctx)
1835 2 : .await?,
1836 2 : 2
1837 2 : );
1838 2 : assert_eq!(
1839 2 : tline
1840 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x60)), &ctx)
1841 2 : .await?,
1842 2 : test_img("foo blk 0 at 3")
1843 2 : );
1844 2 : assert_eq!(
1845 2 : tline
1846 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x60)), &ctx)
1847 2 : .await?,
1848 2 : test_img("foo blk 1 at 4")
1849 2 : );
1850 2 :
1851 2 : // should still see the truncated block with older LSN
1852 2 : assert_eq!(
1853 2 : tline
1854 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), &ctx)
1855 2 : .await?,
1856 2 : 3
1857 2 : );
1858 2 : assert_eq!(
1859 2 : tline
1860 2 : .get_rel_page_at_lsn(TESTREL_A, 2, Version::Lsn(Lsn(0x50)), &ctx)
1861 2 : .await?,
1862 2 : test_img("foo blk 2 at 5")
1863 2 : );
1864 2 :
1865 2 : // Truncate to zero length
1866 2 : let mut m = tline.begin_modification(Lsn(0x68));
1867 2 : walingest
1868 2 : .put_rel_truncation(&mut m, TESTREL_A, 0, &ctx)
1869 2 : .await?;
1870 2 : m.commit(&ctx).await?;
1871 2 : assert_eq!(
1872 2 : tline
1873 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x68)), &ctx)
1874 2 : .await?,
1875 2 : 0
1876 2 : );
1877 2 :
1878 2 : // Extend from 0 to 2 blocks, leaving a gap
1879 2 : let mut m = tline.begin_modification(Lsn(0x70));
1880 2 : walingest
1881 2 : .put_rel_page_image(&mut m, TESTREL_A, 1, test_img("foo blk 1"), &ctx)
1882 2 : .await?;
1883 2 : m.commit(&ctx).await?;
1884 2 : assert_eq!(
1885 2 : tline
1886 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x70)), &ctx)
1887 2 : .await?,
1888 2 : 2
1889 2 : );
1890 2 : assert_eq!(
1891 2 : tline
1892 2 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x70)), &ctx)
1893 2 : .await?,
1894 2 : ZERO_PAGE
1895 2 : );
1896 2 : assert_eq!(
1897 2 : tline
1898 2 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x70)), &ctx)
1899 2 : .await?,
1900 2 : test_img("foo blk 1")
1901 2 : );
1902 2 :
1903 2 : // Extend a lot more, leaving a big gap that spans across segments
1904 2 : let mut m = tline.begin_modification(Lsn(0x80));
1905 2 : walingest
1906 2 : .put_rel_page_image(&mut m, TESTREL_A, 1500, test_img("foo blk 1500"), &ctx)
1907 2 : .await?;
1908 214 : m.commit(&ctx).await?;
1909 2 : assert_eq!(
1910 2 : tline
1911 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x80)), &ctx)
1912 2 : .await?,
1913 2 : 1501
1914 2 : );
1915 2998 : for blk in 2..1500 {
1916 2996 : assert_eq!(
1917 2996 : tline
1918 2996 : .get_rel_page_at_lsn(TESTREL_A, blk, Version::Lsn(Lsn(0x80)), &ctx)
1919 3030 : .await?,
1920 2996 : ZERO_PAGE
1921 2 : );
1922 2 : }
1923 2 : assert_eq!(
1924 2 : tline
1925 2 : .get_rel_page_at_lsn(TESTREL_A, 1500, Version::Lsn(Lsn(0x80)), &ctx)
1926 2 : .await?,
1927 2 : test_img("foo blk 1500")
1928 2 : );
1929 2 :
1930 2 : Ok(())
1931 2 : }
1932 :
1933 : // Test what happens if we dropped a relation
1934 : // and then created it again within the same layer.
1935 : #[tokio::test]
1936 2 : async fn test_drop_extend() -> Result<()> {
1937 8 : let (tenant, ctx) = TenantHarness::create("test_drop_extend")?.load().await;
1938 2 : let tline = tenant
1939 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
1940 6 : .await?;
1941 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
1942 2 :
1943 2 : let mut m = tline.begin_modification(Lsn(0x20));
1944 2 : walingest
1945 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 2"), &ctx)
1946 2 : .await?;
1947 2 : m.commit(&ctx).await?;
1948 2 :
1949 2 : // Check that rel exists and size is correct
1950 2 : assert_eq!(
1951 2 : tline
1952 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
1953 2 : .await?,
1954 2 : true
1955 2 : );
1956 2 : assert_eq!(
1957 2 : tline
1958 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
1959 2 : .await?,
1960 2 : 1
1961 2 : );
1962 2 :
1963 2 : // Drop rel
1964 2 : let mut m = tline.begin_modification(Lsn(0x30));
1965 2 : walingest.put_rel_drop(&mut m, TESTREL_A, &ctx).await?;
1966 2 : m.commit(&ctx).await?;
1967 2 :
1968 2 : // Check that rel is not visible anymore
1969 2 : assert_eq!(
1970 2 : tline
1971 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x30)), &ctx)
1972 2 : .await?,
1973 2 : false
1974 2 : );
1975 2 :
1976 2 : // FIXME: should fail
1977 2 : //assert!(tline.get_rel_size(TESTREL_A, Lsn(0x30), false)?.is_none());
1978 2 :
1979 2 : // Re-create it
1980 2 : let mut m = tline.begin_modification(Lsn(0x40));
1981 2 : walingest
1982 2 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 4"), &ctx)
1983 2 : .await?;
1984 2 : m.commit(&ctx).await?;
1985 2 :
1986 2 : // Check that rel exists and size is correct
1987 2 : assert_eq!(
1988 2 : tline
1989 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x40)), &ctx)
1990 2 : .await?,
1991 2 : true
1992 2 : );
1993 2 : assert_eq!(
1994 2 : tline
1995 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x40)), &ctx)
1996 2 : .await?,
1997 2 : 1
1998 2 : );
1999 2 :
2000 2 : Ok(())
2001 2 : }
2002 :
2003 : // Test what happens if we truncated a relation
2004 : // so that one of its segments was dropped
2005 : // and then extended it again within the same layer.
2006 : #[tokio::test]
2007 2 : async fn test_truncate_extend() -> Result<()> {
2008 6 : let (tenant, ctx) = TenantHarness::create("test_truncate_extend")?.load().await;
2009 2 : let tline = tenant
2010 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
2011 6 : .await?;
2012 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
2013 2 :
2014 2 : // Create a 20 MB relation (the size is arbitrary)
2015 2 : let relsize = 20 * 1024 * 1024 / 8192;
2016 2 : let mut m = tline.begin_modification(Lsn(0x20));
2017 5120 : for blkno in 0..relsize {
2018 5120 : let data = format!("foo blk {} at {}", blkno, Lsn(0x20));
2019 5120 : walingest
2020 5120 : .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx)
2021 2 : .await?;
2022 2 : }
2023 41 : m.commit(&ctx).await?;
2024 2 :
2025 2 : // The relation was created at LSN 20, not visible at LSN 1 yet.
2026 2 : assert_eq!(
2027 2 : tline
2028 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x10)), &ctx)
2029 2 : .await?,
2030 2 : false
2031 2 : );
2032 2 : assert!(tline
2033 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x10)), &ctx)
2034 2 : .await
2035 2 : .is_err());
2036 2 :
2037 2 : assert_eq!(
2038 2 : tline
2039 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
2040 2 : .await?,
2041 2 : true
2042 2 : );
2043 2 : assert_eq!(
2044 2 : tline
2045 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
2046 2 : .await?,
2047 2 : relsize
2048 2 : );
2049 2 :
2050 2 : // Check relation content
2051 5120 : for blkno in 0..relsize {
2052 5120 : let lsn = Lsn(0x20);
2053 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2054 5120 : assert_eq!(
2055 5120 : tline
2056 5120 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(lsn), &ctx)
2057 200 : .await?,
2058 5120 : test_img(&data)
2059 2 : );
2060 2 : }
2061 2 :
2062 2 : // Truncate relation so that second segment was dropped
2063 2 : // - only leave one page
2064 2 : let mut m = tline.begin_modification(Lsn(0x60));
2065 2 : walingest
2066 2 : .put_rel_truncation(&mut m, TESTREL_A, 1, &ctx)
2067 2 : .await?;
2068 2 : m.commit(&ctx).await?;
2069 2 :
2070 2 : // Check reported size and contents after truncation
2071 2 : assert_eq!(
2072 2 : tline
2073 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x60)), &ctx)
2074 2 : .await?,
2075 2 : 1
2076 2 : );
2077 2 :
2078 4 : for blkno in 0..1 {
2079 2 : let lsn = Lsn(0x20);
2080 2 : let data = format!("foo blk {} at {}", blkno, lsn);
2081 2 : assert_eq!(
2082 2 : tline
2083 2 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x60)), &ctx)
2084 2 : .await?,
2085 2 : test_img(&data)
2086 2 : );
2087 2 : }
2088 2 :
2089 2 : // should still see all blocks with older LSN
2090 2 : assert_eq!(
2091 2 : tline
2092 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), &ctx)
2093 2 : .await?,
2094 2 : relsize
2095 2 : );
2096 5120 : for blkno in 0..relsize {
2097 5120 : let lsn = Lsn(0x20);
2098 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2099 5120 : assert_eq!(
2100 5120 : tline
2101 5120 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x50)), &ctx)
2102 401 : .await?,
2103 5120 : test_img(&data)
2104 2 : );
2105 2 : }
2106 2 :
2107 2 : // Extend relation again.
2108 2 : // Add enough blocks to create second segment
2109 2 : let lsn = Lsn(0x80);
2110 2 : let mut m = tline.begin_modification(lsn);
2111 5120 : for blkno in 0..relsize {
2112 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2113 5120 : walingest
2114 5120 : .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx)
2115 2 : .await?;
2116 2 : }
2117 42 : m.commit(&ctx).await?;
2118 2 :
2119 2 : assert_eq!(
2120 2 : tline
2121 2 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x80)), &ctx)
2122 2 : .await?,
2123 2 : true
2124 2 : );
2125 2 : assert_eq!(
2126 2 : tline
2127 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x80)), &ctx)
2128 2 : .await?,
2129 2 : relsize
2130 2 : );
2131 2 : // Check relation content
2132 5120 : for blkno in 0..relsize {
2133 5120 : let lsn = Lsn(0x80);
2134 5120 : let data = format!("foo blk {} at {}", blkno, lsn);
2135 5120 : assert_eq!(
2136 5120 : tline
2137 5120 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x80)), &ctx)
2138 200 : .await?,
2139 5120 : test_img(&data)
2140 2 : );
2141 2 : }
2142 2 :
2143 2 : Ok(())
2144 2 : }
2145 :
2146 : /// Test get_relsize() and truncation with a file larger than 1 GB, so that it's
2147 : /// split into multiple 1 GB segments in Postgres.
2148 : #[tokio::test]
2149 2 : async fn test_large_rel() -> Result<()> {
2150 8 : let (tenant, ctx) = TenantHarness::create("test_large_rel")?.load().await;
2151 2 : let tline = tenant
2152 2 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
2153 6 : .await?;
2154 5 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
2155 2 :
2156 2 : let mut lsn = 0x10;
2157 262146 : for blknum in 0..RELSEG_SIZE + 1 {
2158 262146 : lsn += 0x10;
2159 262146 : let mut m = tline.begin_modification(Lsn(lsn));
2160 262146 : let img = test_img(&format!("foo blk {} at {}", blknum, Lsn(lsn)));
2161 262146 : walingest
2162 262146 : .put_rel_page_image(&mut m, TESTREL_A, blknum as BlockNumber, img, &ctx)
2163 7219 : .await?;
2164 262146 : m.commit(&ctx).await?;
2165 2 : }
2166 2 :
2167 2 : assert_current_logical_size(&tline, Lsn(lsn));
2168 2 :
2169 2 : assert_eq!(
2170 2 : tline
2171 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), &ctx)
2172 2 : .await?,
2173 2 : RELSEG_SIZE + 1
2174 2 : );
2175 2 :
2176 2 : // Truncate one block
2177 2 : lsn += 0x10;
2178 2 : let mut m = tline.begin_modification(Lsn(lsn));
2179 2 : walingest
2180 2 : .put_rel_truncation(&mut m, TESTREL_A, RELSEG_SIZE, &ctx)
2181 2 : .await?;
2182 2 : m.commit(&ctx).await?;
2183 2 : assert_eq!(
2184 2 : tline
2185 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), &ctx)
2186 2 : .await?,
2187 2 : RELSEG_SIZE
2188 2 : );
2189 2 : assert_current_logical_size(&tline, Lsn(lsn));
2190 2 :
2191 2 : // Truncate another block
2192 2 : lsn += 0x10;
2193 2 : let mut m = tline.begin_modification(Lsn(lsn));
2194 2 : walingest
2195 2 : .put_rel_truncation(&mut m, TESTREL_A, RELSEG_SIZE - 1, &ctx)
2196 2 : .await?;
2197 2 : m.commit(&ctx).await?;
2198 2 : assert_eq!(
2199 2 : tline
2200 2 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), &ctx)
2201 2 : .await?,
2202 2 : RELSEG_SIZE - 1
2203 2 : );
2204 2 : assert_current_logical_size(&tline, Lsn(lsn));
2205 2 :
2206 2 : // Truncate to 1500, and then truncate all the way down to 0, one block at a time
2207 2 : // This tests the behavior at segment boundaries
2208 2 : let mut size: i32 = 3000;
2209 6004 : while size >= 0 {
2210 6002 : lsn += 0x10;
2211 6002 : let mut m = tline.begin_modification(Lsn(lsn));
2212 6002 : walingest
2213 6002 : .put_rel_truncation(&mut m, TESTREL_A, size as BlockNumber, &ctx)
2214 140 : .await?;
2215 6002 : m.commit(&ctx).await?;
2216 6002 : assert_eq!(
2217 6002 : tline
2218 6002 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), &ctx)
2219 2 : .await?,
2220 6002 : size as BlockNumber
2221 2 : );
2222 2 :
2223 6002 : size -= 1;
2224 2 : }
2225 2 : assert_current_logical_size(&tline, Lsn(lsn));
2226 2 :
2227 2 : Ok(())
2228 2 : }
2229 :
2230 : /// Replay a wal segment file taken directly from safekeepers.
2231 : ///
2232 : /// This test is useful for benchmarking since it allows us to profile only
2233 : /// the walingest code in a single-threaded executor, and iterate more quickly
2234 : /// without waiting for unrelated steps.
2235 : #[tokio::test]
2236 2 : async fn test_ingest_real_wal() {
2237 2 : use crate::tenant::harness::*;
2238 2 : use postgres_ffi::waldecoder::WalStreamDecoder;
2239 2 : use postgres_ffi::WAL_SEGMENT_SIZE;
2240 2 :
2241 2 : // Define test data path and constants.
2242 2 : //
2243 2 : // Steps to reconstruct the data, if needed:
2244 2 : // 1. Run the pgbench python test
2245 2 : // 2. Take the first wal segment file from safekeeper
2246 2 : // 3. Compress it using `zstd --long input_file`
2247 2 : // 4. Copy initdb.tar.zst from local_fs_remote_storage
2248 2 : // 5. Grep sk logs for "restart decoder" to get startpoint
2249 2 : // 6. Run just the decoder from this test to get the endpoint.
2250 2 : // It's the last LSN the decoder will output.
2251 2 : let pg_version = 15; // The test data was generated by pg15
2252 2 : let path = "test_data/sk_wal_segment_from_pgbench";
2253 2 : let wal_segment_path = format!("{path}/000000010000000000000001.zst");
2254 2 : let source_initdb_path = format!("{path}/{INITDB_PATH}");
2255 2 : let startpoint = Lsn::from_hex("14AEC08").unwrap();
2256 2 : let _endpoint = Lsn::from_hex("1FFFF98").unwrap();
2257 2 :
2258 2 : let harness = TenantHarness::create("test_ingest_real_wal").unwrap();
2259 8 : let (tenant, ctx) = harness.load().await;
2260 2 :
2261 2 : let remote_initdb_path =
2262 2 : remote_initdb_archive_path(&tenant.tenant_shard_id().tenant_id, &TIMELINE_ID);
2263 2 : let initdb_path = harness.remote_fs_dir.join(remote_initdb_path.get_path());
2264 2 :
2265 2 : std::fs::create_dir_all(initdb_path.parent().unwrap())
2266 2 : .expect("creating test dir should work");
2267 2 : std::fs::copy(source_initdb_path, initdb_path).expect("copying the initdb.tar.zst works");
2268 2 :
2269 2 : // Bootstrap a real timeline. We can't use create_test_timeline because
2270 2 : // it doesn't create a real checkpoint, and Walingest::new tries to parse
2271 2 : // the garbage data.
2272 2 : let tline = tenant
2273 2 : .bootstrap_timeline_test(TIMELINE_ID, pg_version, Some(TIMELINE_ID), &ctx)
2274 20825 : .await
2275 2 : .unwrap();
2276 2 :
2277 2 : // We fully read and decompress this into memory before decoding
2278 2 : // to get a more accurate perf profile of the decoder.
2279 2 : let bytes = {
2280 2 : use async_compression::tokio::bufread::ZstdDecoder;
2281 2 : let file = tokio::fs::File::open(wal_segment_path).await.unwrap();
2282 2 : let reader = tokio::io::BufReader::new(file);
2283 2 : let decoder = ZstdDecoder::new(reader);
2284 2 : let mut reader = tokio::io::BufReader::new(decoder);
2285 2 : let mut buffer = Vec::new();
2286 223 : tokio::io::copy_buf(&mut reader, &mut buffer).await.unwrap();
2287 2 : buffer
2288 2 : };
2289 2 :
2290 2 : // TODO start a profiler too
2291 2 : let started_at = std::time::Instant::now();
2292 2 :
2293 2 : // Initialize walingest
2294 2 : let xlogoff: usize = startpoint.segment_offset(WAL_SEGMENT_SIZE);
2295 2 : let mut decoder = WalStreamDecoder::new(startpoint, pg_version);
2296 2 : let mut walingest = WalIngest::new(tline.as_ref(), startpoint, &ctx)
2297 5 : .await
2298 2 : .unwrap();
2299 2 : let mut modification = tline.begin_modification(startpoint);
2300 2 : let mut decoded = DecodedWALRecord::default();
2301 2 : println!("decoding {} bytes", bytes.len() - xlogoff);
2302 2 :
2303 2 : // Decode and ingest wal. We process the wal in chunks because
2304 2 : // that's what happens when we get bytes from safekeepers.
2305 474686 : for chunk in bytes[xlogoff..].chunks(50) {
2306 474686 : decoder.feed_bytes(chunk);
2307 620536 : while let Some((lsn, recdata)) = decoder.poll_decode().unwrap() {
2308 145850 : walingest
2309 145850 : .ingest_record(recdata, lsn, &mut modification, &mut decoded, &ctx)
2310 66 : .await
2311 145850 : .unwrap();
2312 2 : }
2313 474686 : modification.commit(&ctx).await.unwrap();
2314 2 : }
2315 2 :
2316 2 : let duration = started_at.elapsed();
2317 2 : println!("done in {:?}", duration);
2318 2 : }
2319 : }
|