Line data Source code
1 : //!
2 : //! Parse PostgreSQL WAL records and store them in a neon Timeline.
3 : //!
4 : //! The pipeline for ingesting WAL looks like this:
5 : //!
6 : //! WAL receiver -> WalIngest -> Repository
7 : //!
8 : //! The WAL receiver receives a stream of WAL from the WAL safekeepers,
9 : //! and decodes it to individual WAL records. It feeds the WAL records
10 : //! to WalIngest, which parses them and stores them in the Repository.
11 : //!
12 : //! The neon Repository can store page versions in two formats: as
13 : //! page images, or a WAL records. WalIngest::ingest_record() extracts
14 : //! page images out of some WAL records, but most it stores as WAL
15 : //! records. If a WAL record modifies multiple pages, WalIngest
16 : //! will call Repository::put_wal_record or put_page_image functions
17 : //! separately for each modified page.
18 : //!
19 : //! To reconstruct a page using a WAL record, the Repository calls the
20 : //! code in walredo.rs. walredo.rs passes most WAL records to the WAL
21 : //! redo Postgres process, but some records it can handle directly with
22 : //! bespoken Rust code.
23 :
24 : use std::time::Duration;
25 : use std::time::SystemTime;
26 :
27 : use pageserver_api::shard::ShardIdentity;
28 : use postgres_ffi::v14::nonrelfile_utils::clogpage_precedes;
29 : use postgres_ffi::v14::nonrelfile_utils::slru_may_delete_clogsegment;
30 : use postgres_ffi::TimestampTz;
31 : use postgres_ffi::{fsm_logical_to_physical, page_is_new, page_set_lsn};
32 :
33 : use anyhow::{bail, Context, Result};
34 : use bytes::{Buf, Bytes, BytesMut};
35 : use tracing::*;
36 : use utils::failpoint_support;
37 : use utils::rate_limit::RateLimit;
38 :
39 : use crate::context::RequestContext;
40 : use crate::metrics::WAL_INGEST;
41 : use crate::pgdatadir_mapping::{DatadirModification, Version};
42 : use crate::span::debug_assert_current_span_has_tenant_and_timeline_id;
43 : use crate::tenant::PageReconstructError;
44 : use crate::tenant::Timeline;
45 : use crate::walrecord::*;
46 : use crate::ZERO_PAGE;
47 : use pageserver_api::key::rel_block_to_key;
48 : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
49 : use postgres_ffi::pg_constants;
50 : use postgres_ffi::relfile_utils::{FSM_FORKNUM, INIT_FORKNUM, MAIN_FORKNUM, VISIBILITYMAP_FORKNUM};
51 : use postgres_ffi::v14::nonrelfile_utils::mx_offset_to_member_segment;
52 : use postgres_ffi::v14::xlog_utils::*;
53 : use postgres_ffi::v14::CheckPoint;
54 : use postgres_ffi::TransactionId;
55 : use postgres_ffi::BLCKSZ;
56 : use utils::lsn::Lsn;
57 :
58 : pub struct WalIngest {
59 : shard: ShardIdentity,
60 : checkpoint: CheckPoint,
61 : checkpoint_modified: bool,
62 : warn_ingest_lag: WarnIngestLag,
63 : }
64 :
65 : struct WarnIngestLag {
66 : lag_msg_ratelimit: RateLimit,
67 : future_lsn_msg_ratelimit: RateLimit,
68 : timestamp_invalid_msg_ratelimit: RateLimit,
69 : }
70 :
71 : impl WalIngest {
72 36 : pub async fn new(
73 36 : timeline: &Timeline,
74 36 : startpoint: Lsn,
75 36 : ctx: &RequestContext,
76 36 : ) -> anyhow::Result<WalIngest> {
77 : // Fetch the latest checkpoint into memory, so that we can compare with it
78 : // quickly in `ingest_record` and update it when it changes.
79 36 : let checkpoint_bytes = timeline.get_checkpoint(startpoint, ctx).await?;
80 36 : let checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
81 36 : trace!("CheckPoint.nextXid = {}", checkpoint.nextXid.value);
82 :
83 36 : Ok(WalIngest {
84 36 : shard: *timeline.get_shard_identity(),
85 36 : checkpoint,
86 36 : checkpoint_modified: false,
87 36 : warn_ingest_lag: WarnIngestLag {
88 36 : lag_msg_ratelimit: RateLimit::new(std::time::Duration::from_secs(10)),
89 36 : future_lsn_msg_ratelimit: RateLimit::new(std::time::Duration::from_secs(10)),
90 36 : timestamp_invalid_msg_ratelimit: RateLimit::new(std::time::Duration::from_secs(10)),
91 36 : },
92 36 : })
93 36 : }
94 :
95 : ///
96 : /// Decode a PostgreSQL WAL record and store it in the repository, in the given timeline.
97 : ///
98 : /// This function updates `lsn` field of `DatadirModification`
99 : ///
100 : /// Helper function to parse a WAL record and call the Timeline's PUT functions for all the
101 : /// relations/pages that the record affects.
102 : ///
103 : /// This function returns `true` if the record was ingested, and `false` if it was filtered out
104 : ///
105 437556 : pub async fn ingest_record(
106 437556 : &mut self,
107 437556 : recdata: Bytes,
108 437556 : lsn: Lsn,
109 437556 : modification: &mut DatadirModification<'_>,
110 437556 : decoded: &mut DecodedWALRecord,
111 437556 : ctx: &RequestContext,
112 437556 : ) -> anyhow::Result<bool> {
113 437556 : WAL_INGEST.records_received.inc();
114 437556 : let pg_version = modification.tline.pg_version;
115 437556 : let prev_len = modification.len();
116 437556 :
117 437556 : modification.set_lsn(lsn)?;
118 437556 : decode_wal_record(recdata, decoded, pg_version)?;
119 :
120 437556 : let mut buf = decoded.record.clone();
121 437556 : buf.advance(decoded.main_data_offset);
122 437556 :
123 437556 : assert!(!self.checkpoint_modified);
124 437556 : if decoded.xl_xid != pg_constants::INVALID_TRANSACTION_ID
125 437502 : && self.checkpoint.update_next_xid(decoded.xl_xid)
126 6 : {
127 6 : self.checkpoint_modified = true;
128 437550 : }
129 :
130 437556 : failpoint_support::sleep_millis_async!("wal-ingest-record-sleep");
131 :
132 437556 : match decoded.xl_rmid {
133 : pg_constants::RM_HEAP_ID | pg_constants::RM_HEAP2_ID => {
134 : // Heap AM records need some special handling, because they modify VM pages
135 : // without registering them with the standard mechanism.
136 436422 : self.ingest_heapam_record(&mut buf, modification, decoded, ctx)
137 0 : .await?;
138 : }
139 : pg_constants::RM_NEON_ID => {
140 0 : self.ingest_neonrmgr_record(&mut buf, modification, decoded, ctx)
141 0 : .await?;
142 : }
143 : // Handle other special record types
144 : pg_constants::RM_SMGR_ID => {
145 48 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
146 48 :
147 48 : if info == pg_constants::XLOG_SMGR_CREATE {
148 48 : let create = XlSmgrCreate::decode(&mut buf);
149 48 : self.ingest_xlog_smgr_create(modification, &create, ctx)
150 36 : .await?;
151 0 : } else if info == pg_constants::XLOG_SMGR_TRUNCATE {
152 0 : let truncate = XlSmgrTruncate::decode(&mut buf);
153 0 : self.ingest_xlog_smgr_truncate(modification, &truncate, ctx)
154 0 : .await?;
155 0 : }
156 : }
157 : pg_constants::RM_DBASE_ID => {
158 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
159 0 : debug!(%info, %pg_version, "handle RM_DBASE_ID");
160 :
161 0 : if pg_version == 14 {
162 0 : if info == postgres_ffi::v14::bindings::XLOG_DBASE_CREATE {
163 0 : let createdb = XlCreateDatabase::decode(&mut buf);
164 0 : debug!("XLOG_DBASE_CREATE v14");
165 :
166 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
167 0 : .await?;
168 0 : } else if info == postgres_ffi::v14::bindings::XLOG_DBASE_DROP {
169 0 : let dropdb = XlDropDatabase::decode(&mut buf);
170 0 : for tablespace_id in dropdb.tablespace_ids {
171 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
172 0 : modification
173 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
174 0 : .await?;
175 : }
176 0 : }
177 0 : } else if pg_version == 15 {
178 0 : if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_WAL_LOG {
179 0 : debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
180 0 : } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_CREATE_FILE_COPY {
181 : // The XLOG record was renamed between v14 and v15,
182 : // but the record format is the same.
183 : // So we can reuse XlCreateDatabase here.
184 0 : debug!("XLOG_DBASE_CREATE_FILE_COPY");
185 0 : let createdb = XlCreateDatabase::decode(&mut buf);
186 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
187 0 : .await?;
188 0 : } else if info == postgres_ffi::v15::bindings::XLOG_DBASE_DROP {
189 0 : let dropdb = XlDropDatabase::decode(&mut buf);
190 0 : for tablespace_id in dropdb.tablespace_ids {
191 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
192 0 : modification
193 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
194 0 : .await?;
195 : }
196 0 : }
197 0 : } else if pg_version == 16 {
198 0 : if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_WAL_LOG {
199 0 : debug!("XLOG_DBASE_CREATE_WAL_LOG: noop");
200 0 : } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_CREATE_FILE_COPY {
201 : // The XLOG record was renamed between v14 and v15,
202 : // but the record format is the same.
203 : // So we can reuse XlCreateDatabase here.
204 0 : debug!("XLOG_DBASE_CREATE_FILE_COPY");
205 0 : let createdb = XlCreateDatabase::decode(&mut buf);
206 0 : self.ingest_xlog_dbase_create(modification, &createdb, ctx)
207 0 : .await?;
208 0 : } else if info == postgres_ffi::v16::bindings::XLOG_DBASE_DROP {
209 0 : let dropdb = XlDropDatabase::decode(&mut buf);
210 0 : for tablespace_id in dropdb.tablespace_ids {
211 0 : trace!("Drop db {}, {}", tablespace_id, dropdb.db_id);
212 0 : modification
213 0 : .drop_dbdir(tablespace_id, dropdb.db_id, ctx)
214 0 : .await?;
215 : }
216 0 : }
217 0 : }
218 : }
219 : pg_constants::RM_TBLSPC_ID => {
220 0 : trace!("XLOG_TBLSPC_CREATE/DROP is not handled yet");
221 : }
222 : pg_constants::RM_CLOG_ID => {
223 0 : let info = decoded.xl_info & !pg_constants::XLR_INFO_MASK;
224 0 :
225 0 : if info == pg_constants::CLOG_ZEROPAGE {
226 0 : let pageno = buf.get_u32_le();
227 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
228 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
229 0 : self.put_slru_page_image(
230 0 : modification,
231 0 : SlruKind::Clog,
232 0 : segno,
233 0 : rpageno,
234 0 : ZERO_PAGE.clone(),
235 0 : ctx,
236 0 : )
237 0 : .await?;
238 : } else {
239 0 : assert!(info == pg_constants::CLOG_TRUNCATE);
240 0 : let xlrec = XlClogTruncate::decode(&mut buf);
241 0 : self.ingest_clog_truncate_record(modification, &xlrec, ctx)
242 0 : .await?;
243 : }
244 : }
245 : pg_constants::RM_XACT_ID => {
246 72 : let info = decoded.xl_info & pg_constants::XLOG_XACT_OPMASK;
247 72 :
248 72 : if info == pg_constants::XLOG_XACT_COMMIT || info == pg_constants::XLOG_XACT_ABORT {
249 24 : let parsed_xact =
250 24 : XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info);
251 24 : self.ingest_xact_record(
252 24 : modification,
253 24 : &parsed_xact,
254 24 : info == pg_constants::XLOG_XACT_COMMIT,
255 24 : decoded.origin_id,
256 24 : ctx,
257 24 : )
258 0 : .await?;
259 48 : } else if info == pg_constants::XLOG_XACT_COMMIT_PREPARED
260 48 : || info == pg_constants::XLOG_XACT_ABORT_PREPARED
261 : {
262 0 : let parsed_xact =
263 0 : XlXactParsedRecord::decode(&mut buf, decoded.xl_xid, decoded.xl_info);
264 0 : self.ingest_xact_record(
265 0 : modification,
266 0 : &parsed_xact,
267 0 : info == pg_constants::XLOG_XACT_COMMIT_PREPARED,
268 0 : decoded.origin_id,
269 0 : ctx,
270 0 : )
271 0 : .await?;
272 : // Remove twophase file. see RemoveTwoPhaseFile() in postgres code
273 0 : trace!(
274 0 : "Drop twophaseFile for xid {} parsed_xact.xid {} here at {}",
275 : decoded.xl_xid,
276 : parsed_xact.xid,
277 : lsn,
278 : );
279 0 : modification
280 0 : .drop_twophase_file(parsed_xact.xid, ctx)
281 0 : .await?;
282 48 : } else if info == pg_constants::XLOG_XACT_PREPARE {
283 0 : modification
284 0 : .put_twophase_file(decoded.xl_xid, Bytes::copy_from_slice(&buf[..]), ctx)
285 0 : .await?;
286 48 : }
287 : }
288 : pg_constants::RM_MULTIXACT_ID => {
289 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
290 0 :
291 0 : if info == pg_constants::XLOG_MULTIXACT_ZERO_OFF_PAGE {
292 0 : let pageno = buf.get_u32_le();
293 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
294 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
295 0 : self.put_slru_page_image(
296 0 : modification,
297 0 : SlruKind::MultiXactOffsets,
298 0 : segno,
299 0 : rpageno,
300 0 : ZERO_PAGE.clone(),
301 0 : ctx,
302 0 : )
303 0 : .await?;
304 0 : } else if info == pg_constants::XLOG_MULTIXACT_ZERO_MEM_PAGE {
305 0 : let pageno = buf.get_u32_le();
306 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
307 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
308 0 : self.put_slru_page_image(
309 0 : modification,
310 0 : SlruKind::MultiXactMembers,
311 0 : segno,
312 0 : rpageno,
313 0 : ZERO_PAGE.clone(),
314 0 : ctx,
315 0 : )
316 0 : .await?;
317 0 : } else if info == pg_constants::XLOG_MULTIXACT_CREATE_ID {
318 0 : let xlrec = XlMultiXactCreate::decode(&mut buf);
319 0 : self.ingest_multixact_create_record(modification, &xlrec)?;
320 0 : } else if info == pg_constants::XLOG_MULTIXACT_TRUNCATE_ID {
321 0 : let xlrec = XlMultiXactTruncate::decode(&mut buf);
322 0 : self.ingest_multixact_truncate_record(modification, &xlrec, ctx)
323 0 : .await?;
324 0 : }
325 : }
326 : pg_constants::RM_RELMAP_ID => {
327 0 : let xlrec = XlRelmapUpdate::decode(&mut buf);
328 0 : self.ingest_relmap_page(modification, &xlrec, decoded, ctx)
329 0 : .await?;
330 : }
331 : pg_constants::RM_XLOG_ID => {
332 90 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
333 90 :
334 90 : if info == pg_constants::XLOG_NEXTOID {
335 6 : let next_oid = buf.get_u32_le();
336 6 : if self.checkpoint.nextOid != next_oid {
337 6 : self.checkpoint.nextOid = next_oid;
338 6 : self.checkpoint_modified = true;
339 6 : }
340 84 : } else if info == pg_constants::XLOG_CHECKPOINT_ONLINE
341 84 : || info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN
342 : {
343 6 : let mut checkpoint_bytes = [0u8; SIZEOF_CHECKPOINT];
344 6 : buf.copy_to_slice(&mut checkpoint_bytes);
345 6 : let xlog_checkpoint = CheckPoint::decode(&checkpoint_bytes)?;
346 6 : trace!(
347 0 : "xlog_checkpoint.oldestXid={}, checkpoint.oldestXid={}",
348 : xlog_checkpoint.oldestXid,
349 : self.checkpoint.oldestXid
350 : );
351 6 : if (self
352 6 : .checkpoint
353 6 : .oldestXid
354 6 : .wrapping_sub(xlog_checkpoint.oldestXid) as i32)
355 6 : < 0
356 0 : {
357 0 : self.checkpoint.oldestXid = xlog_checkpoint.oldestXid;
358 6 : }
359 6 : trace!(
360 0 : "xlog_checkpoint.oldestActiveXid={}, checkpoint.oldestActiveXid={}",
361 : xlog_checkpoint.oldestActiveXid,
362 : self.checkpoint.oldestActiveXid
363 : );
364 :
365 : // A shutdown checkpoint has `oldestActiveXid == InvalidTransactionid`,
366 : // because at shutdown, all in-progress transactions will implicitly
367 : // end. Postgres startup code knows that, and allows hot standby to start
368 : // immediately from a shutdown checkpoint.
369 : //
370 : // In Neon, Postgres hot standby startup always behaves as if starting from
371 : // an online checkpoint. It needs a valid `oldestActiveXid` value, so
372 : // instead of overwriting self.checkpoint.oldestActiveXid with
373 : // InvalidTransactionid from the checkpoint WAL record, update it to a
374 : // proper value, knowing that there are no in-progress transactions at this
375 : // point, except for prepared transactions.
376 : //
377 : // See also the neon code changes in the InitWalRecovery() function.
378 6 : if xlog_checkpoint.oldestActiveXid == pg_constants::INVALID_TRANSACTION_ID
379 6 : && info == pg_constants::XLOG_CHECKPOINT_SHUTDOWN
380 : {
381 6 : let mut oldest_active_xid = self.checkpoint.nextXid.value as u32;
382 6 : for xid in modification.tline.list_twophase_files(lsn, ctx).await? {
383 0 : if (xid.wrapping_sub(oldest_active_xid) as i32) < 0 {
384 0 : oldest_active_xid = xid;
385 0 : }
386 : }
387 6 : self.checkpoint.oldestActiveXid = oldest_active_xid;
388 0 : } else {
389 0 : self.checkpoint.oldestActiveXid = xlog_checkpoint.oldestActiveXid;
390 0 : }
391 :
392 : // Write a new checkpoint key-value pair on every checkpoint record, even
393 : // if nothing really changed. Not strictly required, but it seems nice to
394 : // have some trace of the checkpoint records in the layer files at the same
395 : // LSNs.
396 6 : self.checkpoint_modified = true;
397 78 : }
398 : }
399 : pg_constants::RM_LOGICALMSG_ID => {
400 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
401 0 :
402 0 : if info == pg_constants::XLOG_LOGICAL_MESSAGE {
403 0 : let xlrec = crate::walrecord::XlLogicalMessage::decode(&mut buf);
404 0 : let prefix = std::str::from_utf8(&buf[0..xlrec.prefix_size - 1])?;
405 0 : let message = &buf[xlrec.prefix_size..xlrec.prefix_size + xlrec.message_size];
406 0 : if prefix == "neon-test" {
407 : // This is a convenient way to make the WAL ingestion pause at
408 : // particular point in the WAL. For more fine-grained control,
409 : // we could peek into the message and only pause if it contains
410 : // a particular string, for example, but this is enough for now.
411 0 : failpoint_support::sleep_millis_async!("wal-ingest-logical-message-sleep");
412 0 : } else if let Some(path) = prefix.strip_prefix("neon-file:") {
413 0 : modification.put_file(path, message, ctx).await?;
414 0 : }
415 0 : }
416 : }
417 : pg_constants::RM_STANDBY_ID => {
418 48 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
419 48 : if info == pg_constants::XLOG_RUNNING_XACTS {
420 0 : let xlrec = crate::walrecord::XlRunningXacts::decode(&mut buf);
421 0 : self.checkpoint.oldestActiveXid = xlrec.oldest_running_xid;
422 0 : self.checkpoint_modified = true;
423 48 : }
424 : }
425 : pg_constants::RM_REPLORIGIN_ID => {
426 0 : let info = decoded.xl_info & pg_constants::XLR_RMGR_INFO_MASK;
427 0 : if info == pg_constants::XLOG_REPLORIGIN_SET {
428 0 : let xlrec = crate::walrecord::XlReploriginSet::decode(&mut buf);
429 0 : modification
430 0 : .set_replorigin(xlrec.node_id, xlrec.remote_lsn)
431 0 : .await?
432 0 : } else if info == pg_constants::XLOG_REPLORIGIN_DROP {
433 0 : let xlrec = crate::walrecord::XlReploriginDrop::decode(&mut buf);
434 0 : modification.drop_replorigin(xlrec.node_id).await?
435 0 : }
436 : }
437 876 : _x => {
438 876 : // TODO: should probably log & fail here instead of blindly
439 876 : // doing something without understanding the protocol
440 876 : }
441 : }
442 :
443 : // Iterate through all the blocks that the record modifies, and
444 : // "put" a separate copy of the record for each block.
445 437556 : for blk in decoded.blocks.iter() {
446 436926 : let rel = RelTag {
447 436926 : spcnode: blk.rnode_spcnode,
448 436926 : dbnode: blk.rnode_dbnode,
449 436926 : relnode: blk.rnode_relnode,
450 436926 : forknum: blk.forknum,
451 436926 : };
452 436926 :
453 436926 : let key = rel_block_to_key(rel, blk.blkno);
454 436926 : let key_is_local = self.shard.is_key_local(&key);
455 436926 :
456 436926 : tracing::debug!(
457 : lsn=%lsn,
458 : key=%key,
459 0 : "ingest: shard decision {} (checkpoint={})",
460 0 : if !key_is_local { "drop" } else { "keep" },
461 : self.checkpoint_modified
462 : );
463 :
464 436926 : if !key_is_local {
465 0 : if self.shard.is_shard_zero() {
466 : // Shard 0 tracks relation sizes. Although we will not store this block, we will observe
467 : // its blkno in case it implicitly extends a relation.
468 0 : self.observe_decoded_block(modification, blk, ctx).await?;
469 0 : }
470 :
471 0 : continue;
472 436926 : }
473 436926 : self.ingest_decoded_block(modification, lsn, decoded, blk, ctx)
474 863 : .await?;
475 : }
476 :
477 : // If checkpoint data was updated, store the new version in the repository
478 437556 : if self.checkpoint_modified {
479 18 : let new_checkpoint_bytes = self.checkpoint.encode()?;
480 :
481 18 : modification.put_checkpoint(new_checkpoint_bytes)?;
482 18 : self.checkpoint_modified = false;
483 437538 : }
484 :
485 : // Note that at this point this record is only cached in the modification
486 : // until commit() is called to flush the data into the repository and update
487 : // the latest LSN.
488 :
489 437556 : Ok(modification.len() > prev_len)
490 437556 : }
491 :
492 : /// Do not store this block, but observe it for the purposes of updating our relation size state.
493 0 : async fn observe_decoded_block(
494 0 : &mut self,
495 0 : modification: &mut DatadirModification<'_>,
496 0 : blk: &DecodedBkpBlock,
497 0 : ctx: &RequestContext,
498 0 : ) -> Result<(), PageReconstructError> {
499 0 : let rel = RelTag {
500 0 : spcnode: blk.rnode_spcnode,
501 0 : dbnode: blk.rnode_dbnode,
502 0 : relnode: blk.rnode_relnode,
503 0 : forknum: blk.forknum,
504 0 : };
505 0 : self.handle_rel_extend(modification, rel, blk.blkno, ctx)
506 0 : .await
507 0 : }
508 :
509 436926 : async fn ingest_decoded_block(
510 436926 : &mut self,
511 436926 : modification: &mut DatadirModification<'_>,
512 436926 : lsn: Lsn,
513 436926 : decoded: &DecodedWALRecord,
514 436926 : blk: &DecodedBkpBlock,
515 436926 : ctx: &RequestContext,
516 436926 : ) -> Result<(), PageReconstructError> {
517 436926 : let rel = RelTag {
518 436926 : spcnode: blk.rnode_spcnode,
519 436926 : dbnode: blk.rnode_dbnode,
520 436926 : relnode: blk.rnode_relnode,
521 436926 : forknum: blk.forknum,
522 436926 : };
523 436926 :
524 436926 : //
525 436926 : // Instead of storing full-page-image WAL record,
526 436926 : // it is better to store extracted image: we can skip wal-redo
527 436926 : // in this case. Also some FPI records may contain multiple (up to 32) pages,
528 436926 : // so them have to be copied multiple times.
529 436926 : //
530 436926 : if blk.apply_image
531 180 : && blk.has_image
532 180 : && decoded.xl_rmid == pg_constants::RM_XLOG_ID
533 72 : && (decoded.xl_info == pg_constants::XLOG_FPI
534 0 : || decoded.xl_info == pg_constants::XLOG_FPI_FOR_HINT)
535 : // compression of WAL is not yet supported: fall back to storing the original WAL record
536 72 : && !postgres_ffi::bkpimage_is_compressed(blk.bimg_info, modification.tline.pg_version)
537 : // do not materialize null pages because them most likely be soon replaced with real data
538 72 : && blk.bimg_len != 0
539 : {
540 : // Extract page image from FPI record
541 72 : let img_len = blk.bimg_len as usize;
542 72 : let img_offs = blk.bimg_offset as usize;
543 72 : let mut image = BytesMut::with_capacity(BLCKSZ as usize);
544 72 : image.extend_from_slice(&decoded.record[img_offs..img_offs + img_len]);
545 72 :
546 72 : if blk.hole_length != 0 {
547 0 : let tail = image.split_off(blk.hole_offset as usize);
548 0 : image.resize(image.len() + blk.hole_length as usize, 0u8);
549 0 : image.unsplit(tail);
550 72 : }
551 : //
552 : // Match the logic of XLogReadBufferForRedoExtended:
553 : // The page may be uninitialized. If so, we can't set the LSN because
554 : // that would corrupt the page.
555 : //
556 72 : if !page_is_new(&image) {
557 54 : page_set_lsn(&mut image, lsn)
558 18 : }
559 72 : assert_eq!(image.len(), BLCKSZ as usize);
560 72 : self.put_rel_page_image(modification, rel, blk.blkno, image.freeze(), ctx)
561 15 : .await?;
562 : } else {
563 436854 : let rec = NeonWalRecord::Postgres {
564 436854 : will_init: blk.will_init || blk.apply_image,
565 436854 : rec: decoded.record.clone(),
566 436854 : };
567 436854 : self.put_rel_wal_record(modification, rel, blk.blkno, rec, ctx)
568 848 : .await?;
569 : }
570 436926 : Ok(())
571 436926 : }
572 :
573 436422 : async fn ingest_heapam_record(
574 436422 : &mut self,
575 436422 : buf: &mut Bytes,
576 436422 : modification: &mut DatadirModification<'_>,
577 436422 : decoded: &DecodedWALRecord,
578 436422 : ctx: &RequestContext,
579 436422 : ) -> anyhow::Result<()> {
580 436422 : // Handle VM bit updates that are implicitly part of heap records.
581 436422 :
582 436422 : // First, look at the record to determine which VM bits need
583 436422 : // to be cleared. If either of these variables is set, we
584 436422 : // need to clear the corresponding bits in the visibility map.
585 436422 : let mut new_heap_blkno: Option<u32> = None;
586 436422 : let mut old_heap_blkno: Option<u32> = None;
587 436422 : let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS;
588 436422 :
589 436422 : match modification.tline.pg_version {
590 : 14 => {
591 0 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
592 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
593 0 :
594 0 : if info == pg_constants::XLOG_HEAP_INSERT {
595 0 : let xlrec = v14::XlHeapInsert::decode(buf);
596 0 : assert_eq!(0, buf.remaining());
597 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
598 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
599 0 : }
600 0 : } else if info == pg_constants::XLOG_HEAP_DELETE {
601 0 : let xlrec = v14::XlHeapDelete::decode(buf);
602 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
603 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
604 0 : }
605 0 : } else if info == pg_constants::XLOG_HEAP_UPDATE
606 0 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
607 : {
608 0 : let xlrec = v14::XlHeapUpdate::decode(buf);
609 0 : // the size of tuple data is inferred from the size of the record.
610 0 : // we can't validate the remaining number of bytes without parsing
611 0 : // the tuple data.
612 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
613 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
614 0 : }
615 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
616 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
617 0 : // non-HOT update where the new tuple goes to different page than
618 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
619 0 : // set.
620 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
621 0 : }
622 0 : } else if info == pg_constants::XLOG_HEAP_LOCK {
623 0 : let xlrec = v14::XlHeapLock::decode(buf);
624 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
625 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
626 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
627 0 : }
628 0 : }
629 0 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
630 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
631 0 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
632 0 : let xlrec = v14::XlHeapMultiInsert::decode(buf);
633 :
634 0 : let offset_array_len =
635 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
636 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
637 0 : 0
638 : } else {
639 0 : size_of::<u16>() * xlrec.ntuples as usize
640 : };
641 0 : assert_eq!(offset_array_len, buf.remaining());
642 :
643 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
644 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
645 0 : }
646 0 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
647 0 : let xlrec = v14::XlHeapLockUpdated::decode(buf);
648 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
649 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
650 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
651 0 : }
652 0 : }
653 : } else {
654 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
655 : }
656 : }
657 : 15 => {
658 436422 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
659 435858 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
660 435858 :
661 435858 : if info == pg_constants::XLOG_HEAP_INSERT {
662 435828 : let xlrec = v15::XlHeapInsert::decode(buf);
663 435828 : assert_eq!(0, buf.remaining());
664 435828 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
665 12 : new_heap_blkno = Some(decoded.blocks[0].blkno);
666 435816 : }
667 30 : } else if info == pg_constants::XLOG_HEAP_DELETE {
668 0 : let xlrec = v15::XlHeapDelete::decode(buf);
669 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
670 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
671 0 : }
672 30 : } else if info == pg_constants::XLOG_HEAP_UPDATE
673 6 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
674 : {
675 24 : let xlrec = v15::XlHeapUpdate::decode(buf);
676 24 : // the size of tuple data is inferred from the size of the record.
677 24 : // we can't validate the remaining number of bytes without parsing
678 24 : // the tuple data.
679 24 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
680 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
681 24 : }
682 24 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
683 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
684 0 : // non-HOT update where the new tuple goes to different page than
685 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
686 0 : // set.
687 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
688 24 : }
689 6 : } else if info == pg_constants::XLOG_HEAP_LOCK {
690 0 : let xlrec = v15::XlHeapLock::decode(buf);
691 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
692 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
693 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
694 0 : }
695 6 : }
696 564 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
697 564 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
698 564 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
699 126 : let xlrec = v15::XlHeapMultiInsert::decode(buf);
700 :
701 126 : let offset_array_len =
702 126 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
703 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
704 6 : 0
705 : } else {
706 120 : size_of::<u16>() * xlrec.ntuples as usize
707 : };
708 126 : assert_eq!(offset_array_len, buf.remaining());
709 :
710 126 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
711 24 : new_heap_blkno = Some(decoded.blocks[0].blkno);
712 102 : }
713 438 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
714 0 : let xlrec = v15::XlHeapLockUpdated::decode(buf);
715 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
716 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
717 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
718 0 : }
719 438 : }
720 : } else {
721 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
722 : }
723 : }
724 : 16 => {
725 0 : if decoded.xl_rmid == pg_constants::RM_HEAP_ID {
726 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
727 0 :
728 0 : if info == pg_constants::XLOG_HEAP_INSERT {
729 0 : let xlrec = v16::XlHeapInsert::decode(buf);
730 0 : assert_eq!(0, buf.remaining());
731 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
732 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
733 0 : }
734 0 : } else if info == pg_constants::XLOG_HEAP_DELETE {
735 0 : let xlrec = v16::XlHeapDelete::decode(buf);
736 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
737 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
738 0 : }
739 0 : } else if info == pg_constants::XLOG_HEAP_UPDATE
740 0 : || info == pg_constants::XLOG_HEAP_HOT_UPDATE
741 : {
742 0 : let xlrec = v16::XlHeapUpdate::decode(buf);
743 0 : // the size of tuple data is inferred from the size of the record.
744 0 : // we can't validate the remaining number of bytes without parsing
745 0 : // the tuple data.
746 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
747 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
748 0 : }
749 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
750 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
751 0 : // non-HOT update where the new tuple goes to different page than
752 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
753 0 : // set.
754 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
755 0 : }
756 0 : } else if info == pg_constants::XLOG_HEAP_LOCK {
757 0 : let xlrec = v16::XlHeapLock::decode(buf);
758 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
759 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
760 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
761 0 : }
762 0 : }
763 0 : } else if decoded.xl_rmid == pg_constants::RM_HEAP2_ID {
764 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
765 0 : if info == pg_constants::XLOG_HEAP2_MULTI_INSERT {
766 0 : let xlrec = v16::XlHeapMultiInsert::decode(buf);
767 :
768 0 : let offset_array_len =
769 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
770 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
771 0 : 0
772 : } else {
773 0 : size_of::<u16>() * xlrec.ntuples as usize
774 : };
775 0 : assert_eq!(offset_array_len, buf.remaining());
776 :
777 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
778 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
779 0 : }
780 0 : } else if info == pg_constants::XLOG_HEAP2_LOCK_UPDATED {
781 0 : let xlrec = v16::XlHeapLockUpdated::decode(buf);
782 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
783 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
784 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
785 0 : }
786 0 : }
787 : } else {
788 0 : bail!("Unknown RMGR {} for Heap decoding", decoded.xl_rmid);
789 : }
790 : }
791 0 : _ => {}
792 : }
793 :
794 : // Clear the VM bits if required.
795 436422 : if new_heap_blkno.is_some() || old_heap_blkno.is_some() {
796 36 : let vm_rel = RelTag {
797 36 : forknum: VISIBILITYMAP_FORKNUM,
798 36 : spcnode: decoded.blocks[0].rnode_spcnode,
799 36 : dbnode: decoded.blocks[0].rnode_dbnode,
800 36 : relnode: decoded.blocks[0].rnode_relnode,
801 36 : };
802 36 :
803 36 : let mut new_vm_blk = new_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
804 36 : let mut old_vm_blk = old_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
805 :
806 : // Sometimes, Postgres seems to create heap WAL records with the
807 : // ALL_VISIBLE_CLEARED flag set, even though the bit in the VM page is
808 : // not set. In fact, it's possible that the VM page does not exist at all.
809 : // In that case, we don't want to store a record to clear the VM bit;
810 : // replaying it would fail to find the previous image of the page, because
811 : // it doesn't exist. So check if the VM page(s) exist, and skip the WAL
812 : // record if it doesn't.
813 36 : let vm_size = get_relsize(modification, vm_rel, ctx).await?;
814 36 : if let Some(blknum) = new_vm_blk {
815 36 : if blknum >= vm_size {
816 0 : new_vm_blk = None;
817 36 : }
818 0 : }
819 36 : if let Some(blknum) = old_vm_blk {
820 0 : if blknum >= vm_size {
821 0 : old_vm_blk = None;
822 0 : }
823 36 : }
824 :
825 36 : if new_vm_blk.is_some() || old_vm_blk.is_some() {
826 36 : if new_vm_blk == old_vm_blk {
827 : // An UPDATE record that needs to clear the bits for both old and the
828 : // new page, both of which reside on the same VM page.
829 0 : self.put_rel_wal_record(
830 0 : modification,
831 0 : vm_rel,
832 0 : new_vm_blk.unwrap(),
833 0 : NeonWalRecord::ClearVisibilityMapFlags {
834 0 : new_heap_blkno,
835 0 : old_heap_blkno,
836 0 : flags,
837 0 : },
838 0 : ctx,
839 0 : )
840 0 : .await?;
841 : } else {
842 : // Clear VM bits for one heap page, or for two pages that reside on
843 : // different VM pages.
844 36 : if let Some(new_vm_blk) = new_vm_blk {
845 36 : self.put_rel_wal_record(
846 36 : modification,
847 36 : vm_rel,
848 36 : new_vm_blk,
849 36 : NeonWalRecord::ClearVisibilityMapFlags {
850 36 : new_heap_blkno,
851 36 : old_heap_blkno: None,
852 36 : flags,
853 36 : },
854 36 : ctx,
855 36 : )
856 0 : .await?;
857 0 : }
858 36 : if let Some(old_vm_blk) = old_vm_blk {
859 0 : self.put_rel_wal_record(
860 0 : modification,
861 0 : vm_rel,
862 0 : old_vm_blk,
863 0 : NeonWalRecord::ClearVisibilityMapFlags {
864 0 : new_heap_blkno: None,
865 0 : old_heap_blkno,
866 0 : flags,
867 0 : },
868 0 : ctx,
869 0 : )
870 0 : .await?;
871 36 : }
872 : }
873 0 : }
874 436386 : }
875 :
876 436422 : Ok(())
877 436422 : }
878 :
879 0 : async fn ingest_neonrmgr_record(
880 0 : &mut self,
881 0 : buf: &mut Bytes,
882 0 : modification: &mut DatadirModification<'_>,
883 0 : decoded: &DecodedWALRecord,
884 0 : ctx: &RequestContext,
885 0 : ) -> anyhow::Result<()> {
886 0 : // Handle VM bit updates that are implicitly part of heap records.
887 0 :
888 0 : // First, look at the record to determine which VM bits need
889 0 : // to be cleared. If either of these variables is set, we
890 0 : // need to clear the corresponding bits in the visibility map.
891 0 : let mut new_heap_blkno: Option<u32> = None;
892 0 : let mut old_heap_blkno: Option<u32> = None;
893 0 : let mut flags = pg_constants::VISIBILITYMAP_VALID_BITS;
894 0 : let pg_version = modification.tline.pg_version;
895 0 :
896 0 : assert_eq!(decoded.xl_rmid, pg_constants::RM_NEON_ID);
897 :
898 0 : match pg_version {
899 : 16 => {
900 0 : let info = decoded.xl_info & pg_constants::XLOG_HEAP_OPMASK;
901 0 :
902 0 : match info {
903 : pg_constants::XLOG_NEON_HEAP_INSERT => {
904 0 : let xlrec = v16::rm_neon::XlNeonHeapInsert::decode(buf);
905 0 : assert_eq!(0, buf.remaining());
906 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
907 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
908 0 : }
909 : }
910 : pg_constants::XLOG_NEON_HEAP_DELETE => {
911 0 : let xlrec = v16::rm_neon::XlNeonHeapDelete::decode(buf);
912 0 : if (xlrec.flags & pg_constants::XLH_DELETE_ALL_VISIBLE_CLEARED) != 0 {
913 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
914 0 : }
915 : }
916 : pg_constants::XLOG_NEON_HEAP_UPDATE
917 : | pg_constants::XLOG_NEON_HEAP_HOT_UPDATE => {
918 0 : let xlrec = v16::rm_neon::XlNeonHeapUpdate::decode(buf);
919 0 : // the size of tuple data is inferred from the size of the record.
920 0 : // we can't validate the remaining number of bytes without parsing
921 0 : // the tuple data.
922 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED) != 0 {
923 0 : old_heap_blkno = Some(decoded.blocks.last().unwrap().blkno);
924 0 : }
925 0 : if (xlrec.flags & pg_constants::XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED) != 0 {
926 0 : // PostgreSQL only uses XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED on a
927 0 : // non-HOT update where the new tuple goes to different page than
928 0 : // the old one. Otherwise, only XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED is
929 0 : // set.
930 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
931 0 : }
932 : }
933 : pg_constants::XLOG_NEON_HEAP_MULTI_INSERT => {
934 0 : let xlrec = v16::rm_neon::XlNeonHeapMultiInsert::decode(buf);
935 :
936 0 : let offset_array_len =
937 0 : if decoded.xl_info & pg_constants::XLOG_HEAP_INIT_PAGE > 0 {
938 : // the offsets array is omitted if XLOG_HEAP_INIT_PAGE is set
939 0 : 0
940 : } else {
941 0 : size_of::<u16>() * xlrec.ntuples as usize
942 : };
943 0 : assert_eq!(offset_array_len, buf.remaining());
944 :
945 0 : if (xlrec.flags & pg_constants::XLH_INSERT_ALL_VISIBLE_CLEARED) != 0 {
946 0 : new_heap_blkno = Some(decoded.blocks[0].blkno);
947 0 : }
948 : }
949 : pg_constants::XLOG_NEON_HEAP_LOCK => {
950 0 : let xlrec = v16::rm_neon::XlNeonHeapLock::decode(buf);
951 0 : if (xlrec.flags & pg_constants::XLH_LOCK_ALL_FROZEN_CLEARED) != 0 {
952 0 : old_heap_blkno = Some(decoded.blocks[0].blkno);
953 0 : flags = pg_constants::VISIBILITYMAP_ALL_FROZEN;
954 0 : }
955 : }
956 0 : info => bail!("Unknown WAL record type for Neon RMGR: {}", info),
957 : }
958 : }
959 0 : _ => bail!(
960 0 : "Neon RMGR has no known compatibility with PostgreSQL version {}",
961 0 : pg_version
962 0 : ),
963 : }
964 :
965 : // Clear the VM bits if required.
966 0 : if new_heap_blkno.is_some() || old_heap_blkno.is_some() {
967 0 : let vm_rel = RelTag {
968 0 : forknum: VISIBILITYMAP_FORKNUM,
969 0 : spcnode: decoded.blocks[0].rnode_spcnode,
970 0 : dbnode: decoded.blocks[0].rnode_dbnode,
971 0 : relnode: decoded.blocks[0].rnode_relnode,
972 0 : };
973 0 :
974 0 : let mut new_vm_blk = new_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
975 0 : let mut old_vm_blk = old_heap_blkno.map(pg_constants::HEAPBLK_TO_MAPBLOCK);
976 :
977 : // Sometimes, Postgres seems to create heap WAL records with the
978 : // ALL_VISIBLE_CLEARED flag set, even though the bit in the VM page is
979 : // not set. In fact, it's possible that the VM page does not exist at all.
980 : // In that case, we don't want to store a record to clear the VM bit;
981 : // replaying it would fail to find the previous image of the page, because
982 : // it doesn't exist. So check if the VM page(s) exist, and skip the WAL
983 : // record if it doesn't.
984 0 : let vm_size = get_relsize(modification, vm_rel, ctx).await?;
985 0 : if let Some(blknum) = new_vm_blk {
986 0 : if blknum >= vm_size {
987 0 : new_vm_blk = None;
988 0 : }
989 0 : }
990 0 : if let Some(blknum) = old_vm_blk {
991 0 : if blknum >= vm_size {
992 0 : old_vm_blk = None;
993 0 : }
994 0 : }
995 :
996 0 : if new_vm_blk.is_some() || old_vm_blk.is_some() {
997 0 : if new_vm_blk == old_vm_blk {
998 : // An UPDATE record that needs to clear the bits for both old and the
999 : // new page, both of which reside on the same VM page.
1000 0 : self.put_rel_wal_record(
1001 0 : modification,
1002 0 : vm_rel,
1003 0 : new_vm_blk.unwrap(),
1004 0 : NeonWalRecord::ClearVisibilityMapFlags {
1005 0 : new_heap_blkno,
1006 0 : old_heap_blkno,
1007 0 : flags,
1008 0 : },
1009 0 : ctx,
1010 0 : )
1011 0 : .await?;
1012 : } else {
1013 : // Clear VM bits for one heap page, or for two pages that reside on
1014 : // different VM pages.
1015 0 : if let Some(new_vm_blk) = new_vm_blk {
1016 0 : self.put_rel_wal_record(
1017 0 : modification,
1018 0 : vm_rel,
1019 0 : new_vm_blk,
1020 0 : NeonWalRecord::ClearVisibilityMapFlags {
1021 0 : new_heap_blkno,
1022 0 : old_heap_blkno: None,
1023 0 : flags,
1024 0 : },
1025 0 : ctx,
1026 0 : )
1027 0 : .await?;
1028 0 : }
1029 0 : if let Some(old_vm_blk) = old_vm_blk {
1030 0 : self.put_rel_wal_record(
1031 0 : modification,
1032 0 : vm_rel,
1033 0 : old_vm_blk,
1034 0 : NeonWalRecord::ClearVisibilityMapFlags {
1035 0 : new_heap_blkno: None,
1036 0 : old_heap_blkno,
1037 0 : flags,
1038 0 : },
1039 0 : ctx,
1040 0 : )
1041 0 : .await?;
1042 0 : }
1043 : }
1044 0 : }
1045 0 : }
1046 :
1047 0 : Ok(())
1048 0 : }
1049 :
1050 : /// Subroutine of ingest_record(), to handle an XLOG_DBASE_CREATE record.
1051 0 : async fn ingest_xlog_dbase_create(
1052 0 : &mut self,
1053 0 : modification: &mut DatadirModification<'_>,
1054 0 : rec: &XlCreateDatabase,
1055 0 : ctx: &RequestContext,
1056 0 : ) -> anyhow::Result<()> {
1057 0 : let db_id = rec.db_id;
1058 0 : let tablespace_id = rec.tablespace_id;
1059 0 : let src_db_id = rec.src_db_id;
1060 0 : let src_tablespace_id = rec.src_tablespace_id;
1061 :
1062 0 : let rels = modification
1063 0 : .tline
1064 0 : .list_rels(
1065 0 : src_tablespace_id,
1066 0 : src_db_id,
1067 0 : Version::Modified(modification),
1068 0 : ctx,
1069 0 : )
1070 0 : .await?;
1071 :
1072 0 : debug!("ingest_xlog_dbase_create: {} rels", rels.len());
1073 :
1074 : // Copy relfilemap
1075 0 : let filemap = modification
1076 0 : .tline
1077 0 : .get_relmap_file(
1078 0 : src_tablespace_id,
1079 0 : src_db_id,
1080 0 : Version::Modified(modification),
1081 0 : ctx,
1082 0 : )
1083 0 : .await?;
1084 0 : modification
1085 0 : .put_relmap_file(tablespace_id, db_id, filemap, ctx)
1086 0 : .await?;
1087 :
1088 0 : let mut num_rels_copied = 0;
1089 0 : let mut num_blocks_copied = 0;
1090 0 : for src_rel in rels {
1091 0 : assert_eq!(src_rel.spcnode, src_tablespace_id);
1092 0 : assert_eq!(src_rel.dbnode, src_db_id);
1093 :
1094 0 : let nblocks = modification
1095 0 : .tline
1096 0 : .get_rel_size(src_rel, Version::Modified(modification), ctx)
1097 0 : .await?;
1098 0 : let dst_rel = RelTag {
1099 0 : spcnode: tablespace_id,
1100 0 : dbnode: db_id,
1101 0 : relnode: src_rel.relnode,
1102 0 : forknum: src_rel.forknum,
1103 0 : };
1104 0 :
1105 0 : modification.put_rel_creation(dst_rel, nblocks, ctx).await?;
1106 :
1107 : // Copy content
1108 0 : debug!("copying rel {} to {}, {} blocks", src_rel, dst_rel, nblocks);
1109 0 : for blknum in 0..nblocks {
1110 : // Sharding:
1111 : // - src and dst are always on the same shard, because they differ only by dbNode, and
1112 : // dbNode is not included in the hash inputs for sharding.
1113 : // - This WAL command is replayed on all shards, but each shard only copies the blocks
1114 : // that belong to it.
1115 0 : let src_key = rel_block_to_key(src_rel, blknum);
1116 0 : if !self.shard.is_key_local(&src_key) {
1117 0 : debug!(
1118 0 : "Skipping non-local key {} during XLOG_DBASE_CREATE",
1119 : src_key
1120 : );
1121 0 : continue;
1122 0 : }
1123 0 : debug!(
1124 0 : "copying block {} from {} ({}) to {}",
1125 : blknum, src_rel, src_key, dst_rel
1126 : );
1127 :
1128 0 : let content = modification
1129 0 : .tline
1130 0 : .get_rel_page_at_lsn(src_rel, blknum, Version::Modified(modification), ctx)
1131 0 : .await?;
1132 0 : modification.put_rel_page_image(dst_rel, blknum, content)?;
1133 0 : num_blocks_copied += 1;
1134 : }
1135 :
1136 0 : num_rels_copied += 1;
1137 : }
1138 :
1139 0 : info!(
1140 0 : "Created database {}/{}, copied {} blocks in {} rels",
1141 : tablespace_id, db_id, num_blocks_copied, num_rels_copied
1142 : );
1143 0 : Ok(())
1144 0 : }
1145 :
1146 48 : async fn ingest_xlog_smgr_create(
1147 48 : &mut self,
1148 48 : modification: &mut DatadirModification<'_>,
1149 48 : rec: &XlSmgrCreate,
1150 48 : ctx: &RequestContext,
1151 48 : ) -> anyhow::Result<()> {
1152 48 : let rel = RelTag {
1153 48 : spcnode: rec.rnode.spcnode,
1154 48 : dbnode: rec.rnode.dbnode,
1155 48 : relnode: rec.rnode.relnode,
1156 48 : forknum: rec.forknum,
1157 48 : };
1158 48 : self.put_rel_creation(modification, rel, ctx).await?;
1159 48 : Ok(())
1160 48 : }
1161 :
1162 : /// Subroutine of ingest_record(), to handle an XLOG_SMGR_TRUNCATE record.
1163 : ///
1164 : /// This is the same logic as in PostgreSQL's smgr_redo() function.
1165 0 : async fn ingest_xlog_smgr_truncate(
1166 0 : &mut self,
1167 0 : modification: &mut DatadirModification<'_>,
1168 0 : rec: &XlSmgrTruncate,
1169 0 : ctx: &RequestContext,
1170 0 : ) -> anyhow::Result<()> {
1171 0 : let spcnode = rec.rnode.spcnode;
1172 0 : let dbnode = rec.rnode.dbnode;
1173 0 : let relnode = rec.rnode.relnode;
1174 0 :
1175 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_HEAP) != 0 {
1176 0 : let rel = RelTag {
1177 0 : spcnode,
1178 0 : dbnode,
1179 0 : relnode,
1180 0 : forknum: MAIN_FORKNUM,
1181 0 : };
1182 0 : self.put_rel_truncation(modification, rel, rec.blkno, ctx)
1183 0 : .await?;
1184 0 : }
1185 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_FSM) != 0 {
1186 0 : let rel = RelTag {
1187 0 : spcnode,
1188 0 : dbnode,
1189 0 : relnode,
1190 0 : forknum: FSM_FORKNUM,
1191 0 : };
1192 0 :
1193 0 : let fsm_logical_page_no = rec.blkno / pg_constants::SLOTS_PER_FSM_PAGE;
1194 0 : let mut fsm_physical_page_no = fsm_logical_to_physical(fsm_logical_page_no);
1195 0 : if rec.blkno % pg_constants::SLOTS_PER_FSM_PAGE != 0 {
1196 : // Tail of last remaining FSM page has to be zeroed.
1197 : // We are not precise here and instead of digging in FSM bitmap format just clear the whole page.
1198 0 : modification.put_rel_page_image(rel, fsm_physical_page_no, ZERO_PAGE.clone())?;
1199 0 : fsm_physical_page_no += 1;
1200 0 : }
1201 0 : let nblocks = get_relsize(modification, rel, ctx).await?;
1202 0 : if nblocks > fsm_physical_page_no {
1203 : // check if something to do: FSM is larger than truncate position
1204 0 : self.put_rel_truncation(modification, rel, fsm_physical_page_no, ctx)
1205 0 : .await?;
1206 0 : }
1207 0 : }
1208 0 : if (rec.flags & pg_constants::SMGR_TRUNCATE_VM) != 0 {
1209 0 : let rel = RelTag {
1210 0 : spcnode,
1211 0 : dbnode,
1212 0 : relnode,
1213 0 : forknum: VISIBILITYMAP_FORKNUM,
1214 0 : };
1215 0 :
1216 0 : let mut vm_page_no = rec.blkno / pg_constants::VM_HEAPBLOCKS_PER_PAGE;
1217 0 : if rec.blkno % pg_constants::VM_HEAPBLOCKS_PER_PAGE != 0 {
1218 : // Tail of last remaining vm page has to be zeroed.
1219 : // We are not precise here and instead of digging in VM bitmap format just clear the whole page.
1220 0 : modification.put_rel_page_image(rel, vm_page_no, ZERO_PAGE.clone())?;
1221 0 : vm_page_no += 1;
1222 0 : }
1223 0 : let nblocks = get_relsize(modification, rel, ctx).await?;
1224 0 : if nblocks > vm_page_no {
1225 : // check if something to do: VM is larger than truncate position
1226 0 : self.put_rel_truncation(modification, rel, vm_page_no, ctx)
1227 0 : .await?;
1228 0 : }
1229 0 : }
1230 0 : Ok(())
1231 0 : }
1232 :
1233 24 : fn warn_on_ingest_lag(
1234 24 : &mut self,
1235 24 : conf: &crate::config::PageServerConf,
1236 24 : wal_timestmap: TimestampTz,
1237 24 : ) {
1238 24 : debug_assert_current_span_has_tenant_and_timeline_id();
1239 24 : let now = SystemTime::now();
1240 24 : let rate_limits = &mut self.warn_ingest_lag;
1241 24 : match try_from_pg_timestamp(wal_timestmap) {
1242 24 : Ok(ts) => {
1243 24 : match now.duration_since(ts) {
1244 24 : Ok(lag) => {
1245 24 : if lag > conf.wait_lsn_timeout {
1246 24 : rate_limits.lag_msg_ratelimit.call2(|rate_limit_stats| {
1247 6 : let lag = humantime::format_duration(lag);
1248 6 : warn!(%rate_limit_stats, %lag, "ingesting record with timestamp lagging more than wait_lsn_timeout");
1249 24 : })
1250 0 : }
1251 : },
1252 0 : Err(e) => {
1253 0 : let delta_t = e.duration();
1254 0 : // determined by prod victoriametrics query: 1000 * (timestamp(node_time_seconds{neon_service="pageserver"}) - node_time_seconds)
1255 0 : // => https://www.robustperception.io/time-metric-from-the-node-exporter/
1256 0 : const IGNORED_DRIFT: Duration = Duration::from_millis(100);
1257 0 : if delta_t > IGNORED_DRIFT {
1258 0 : let delta_t = humantime::format_duration(delta_t);
1259 0 : rate_limits.future_lsn_msg_ratelimit.call2(|rate_limit_stats| {
1260 0 : warn!(%rate_limit_stats, %delta_t, "ingesting record with timestamp from future");
1261 0 : })
1262 0 : }
1263 : }
1264 : };
1265 :
1266 : }
1267 0 : Err(error) => {
1268 0 : rate_limits.timestamp_invalid_msg_ratelimit.call2(|rate_limit_stats| {
1269 0 : warn!(%rate_limit_stats, %error, "ingesting record with invalid timestamp, cannot calculate lag and will fail find-lsn-for-timestamp type queries");
1270 0 : })
1271 : }
1272 : }
1273 24 : }
1274 :
1275 : /// Subroutine of ingest_record(), to handle an XLOG_XACT_* records.
1276 : ///
1277 24 : async fn ingest_xact_record(
1278 24 : &mut self,
1279 24 : modification: &mut DatadirModification<'_>,
1280 24 : parsed: &XlXactParsedRecord,
1281 24 : is_commit: bool,
1282 24 : origin_id: u16,
1283 24 : ctx: &RequestContext,
1284 24 : ) -> anyhow::Result<()> {
1285 24 : // Record update of CLOG pages
1286 24 : let mut pageno = parsed.xid / pg_constants::CLOG_XACTS_PER_PAGE;
1287 24 : let mut segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1288 24 : let mut rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1289 24 : let mut page_xids: Vec<TransactionId> = vec![parsed.xid];
1290 24 :
1291 24 : self.warn_on_ingest_lag(modification.tline.conf, parsed.xact_time);
1292 :
1293 24 : for subxact in &parsed.subxacts {
1294 0 : let subxact_pageno = subxact / pg_constants::CLOG_XACTS_PER_PAGE;
1295 0 : if subxact_pageno != pageno {
1296 : // This subxact goes to different page. Write the record
1297 : // for all the XIDs on the previous page, and continue
1298 : // accumulating XIDs on this new page.
1299 0 : modification.put_slru_wal_record(
1300 0 : SlruKind::Clog,
1301 0 : segno,
1302 0 : rpageno,
1303 0 : if is_commit {
1304 0 : NeonWalRecord::ClogSetCommitted {
1305 0 : xids: page_xids,
1306 0 : timestamp: parsed.xact_time,
1307 0 : }
1308 : } else {
1309 0 : NeonWalRecord::ClogSetAborted { xids: page_xids }
1310 : },
1311 0 : )?;
1312 0 : page_xids = Vec::new();
1313 0 : }
1314 0 : pageno = subxact_pageno;
1315 0 : segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1316 0 : rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1317 0 : page_xids.push(*subxact);
1318 : }
1319 24 : modification.put_slru_wal_record(
1320 24 : SlruKind::Clog,
1321 24 : segno,
1322 24 : rpageno,
1323 24 : if is_commit {
1324 24 : NeonWalRecord::ClogSetCommitted {
1325 24 : xids: page_xids,
1326 24 : timestamp: parsed.xact_time,
1327 24 : }
1328 : } else {
1329 0 : NeonWalRecord::ClogSetAborted { xids: page_xids }
1330 : },
1331 0 : )?;
1332 :
1333 24 : for xnode in &parsed.xnodes {
1334 0 : for forknum in MAIN_FORKNUM..=INIT_FORKNUM {
1335 0 : let rel = RelTag {
1336 0 : forknum,
1337 0 : spcnode: xnode.spcnode,
1338 0 : dbnode: xnode.dbnode,
1339 0 : relnode: xnode.relnode,
1340 0 : };
1341 0 : if modification
1342 0 : .tline
1343 0 : .get_rel_exists(rel, Version::Modified(modification), ctx)
1344 0 : .await?
1345 : {
1346 0 : self.put_rel_drop(modification, rel, ctx).await?;
1347 0 : }
1348 : }
1349 : }
1350 24 : if origin_id != 0 {
1351 0 : modification
1352 0 : .set_replorigin(origin_id, parsed.origin_lsn)
1353 0 : .await?;
1354 24 : }
1355 24 : Ok(())
1356 24 : }
1357 :
1358 0 : async fn ingest_clog_truncate_record(
1359 0 : &mut self,
1360 0 : modification: &mut DatadirModification<'_>,
1361 0 : xlrec: &XlClogTruncate,
1362 0 : ctx: &RequestContext,
1363 0 : ) -> anyhow::Result<()> {
1364 0 : info!(
1365 0 : "RM_CLOG_ID truncate pageno {} oldestXid {} oldestXidDB {}",
1366 : xlrec.pageno, xlrec.oldest_xid, xlrec.oldest_xid_db
1367 : );
1368 :
1369 : // In Postgres, oldestXid and oldestXidDB are updated in memory when the CLOG is
1370 : // truncated, but a checkpoint record with the updated values isn't written until
1371 : // later. In Neon, a server can start at any LSN, not just on a checkpoint record,
1372 : // so we keep the oldestXid and oldestXidDB up-to-date.
1373 0 : self.checkpoint.oldestXid = xlrec.oldest_xid;
1374 0 : self.checkpoint.oldestXidDB = xlrec.oldest_xid_db;
1375 0 : self.checkpoint_modified = true;
1376 0 :
1377 0 : // TODO Treat AdvanceOldestClogXid() or write a comment why we don't need it
1378 0 :
1379 0 : let latest_page_number =
1380 0 : self.checkpoint.nextXid.value as u32 / pg_constants::CLOG_XACTS_PER_PAGE;
1381 0 :
1382 0 : // Now delete all segments containing pages between xlrec.pageno
1383 0 : // and latest_page_number.
1384 0 :
1385 0 : // First, make an important safety check:
1386 0 : // the current endpoint page must not be eligible for removal.
1387 0 : // See SimpleLruTruncate() in slru.c
1388 0 : if clogpage_precedes(latest_page_number, xlrec.pageno) {
1389 0 : info!("could not truncate directory pg_xact apparent wraparound");
1390 0 : return Ok(());
1391 0 : }
1392 :
1393 : // Iterate via SLRU CLOG segments and drop segments that we're ready to truncate
1394 : //
1395 : // We cannot pass 'lsn' to the Timeline.list_nonrels(), or it
1396 : // will block waiting for the last valid LSN to advance up to
1397 : // it. So we use the previous record's LSN in the get calls
1398 : // instead.
1399 0 : for segno in modification
1400 0 : .tline
1401 0 : .list_slru_segments(SlruKind::Clog, Version::Modified(modification), ctx)
1402 0 : .await?
1403 : {
1404 0 : let segpage = segno * pg_constants::SLRU_PAGES_PER_SEGMENT;
1405 0 : if slru_may_delete_clogsegment(segpage, xlrec.pageno) {
1406 0 : modification
1407 0 : .drop_slru_segment(SlruKind::Clog, segno, ctx)
1408 0 : .await?;
1409 0 : trace!("Drop CLOG segment {:>04X}", segno);
1410 0 : }
1411 : }
1412 :
1413 0 : Ok(())
1414 0 : }
1415 :
1416 0 : fn ingest_multixact_create_record(
1417 0 : &mut self,
1418 0 : modification: &mut DatadirModification,
1419 0 : xlrec: &XlMultiXactCreate,
1420 0 : ) -> Result<()> {
1421 0 : // Create WAL record for updating the multixact-offsets page
1422 0 : let pageno = xlrec.mid / pg_constants::MULTIXACT_OFFSETS_PER_PAGE as u32;
1423 0 : let segno = pageno / pg_constants::SLRU_PAGES_PER_SEGMENT;
1424 0 : let rpageno = pageno % pg_constants::SLRU_PAGES_PER_SEGMENT;
1425 0 :
1426 0 : modification.put_slru_wal_record(
1427 0 : SlruKind::MultiXactOffsets,
1428 0 : segno,
1429 0 : rpageno,
1430 0 : NeonWalRecord::MultixactOffsetCreate {
1431 0 : mid: xlrec.mid,
1432 0 : moff: xlrec.moff,
1433 0 : },
1434 0 : )?;
1435 :
1436 : // Create WAL records for the update of each affected multixact-members page
1437 0 : let mut members = xlrec.members.iter();
1438 0 : let mut offset = xlrec.moff;
1439 : loop {
1440 0 : let pageno = offset / pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
1441 0 :
1442 0 : // How many members fit on this page?
1443 0 : let page_remain = pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32
1444 0 : - offset % pg_constants::MULTIXACT_MEMBERS_PER_PAGE as u32;
1445 0 :
1446 0 : let mut this_page_members: Vec<MultiXactMember> = Vec::new();
1447 0 : for _ in 0..page_remain {
1448 0 : if let Some(m) = members.next() {
1449 0 : this_page_members.push(m.clone());
1450 0 : } else {
1451 0 : break;
1452 : }
1453 : }
1454 0 : if this_page_members.is_empty() {
1455 : // all done
1456 0 : break;
1457 0 : }
1458 0 : let n_this_page = this_page_members.len();
1459 0 :
1460 0 : modification.put_slru_wal_record(
1461 0 : SlruKind::MultiXactMembers,
1462 0 : pageno / pg_constants::SLRU_PAGES_PER_SEGMENT,
1463 0 : pageno % pg_constants::SLRU_PAGES_PER_SEGMENT,
1464 0 : NeonWalRecord::MultixactMembersCreate {
1465 0 : moff: offset,
1466 0 : members: this_page_members,
1467 0 : },
1468 0 : )?;
1469 :
1470 : // Note: The multixact members can wrap around, even within one WAL record.
1471 0 : offset = offset.wrapping_add(n_this_page as u32);
1472 : }
1473 0 : let next_offset = offset;
1474 0 : assert!(xlrec.moff.wrapping_add(xlrec.nmembers) == next_offset);
1475 :
1476 : // Update next-multi-xid and next-offset
1477 : //
1478 : // NB: In PostgreSQL, the next-multi-xid stored in the control file is allowed to
1479 : // go to 0, and it's fixed up by skipping to FirstMultiXactId in functions that
1480 : // read it, like GetNewMultiXactId(). This is different from how nextXid is
1481 : // incremented! nextXid skips over < FirstNormalTransactionId when the the value
1482 : // is stored, so it's never 0 in a checkpoint.
1483 : //
1484 : // I don't know why it's done that way, it seems less error-prone to skip over 0
1485 : // when the value is stored rather than when it's read. But let's do it the same
1486 : // way here.
1487 0 : let next_multi_xid = xlrec.mid.wrapping_add(1);
1488 0 :
1489 0 : if self
1490 0 : .checkpoint
1491 0 : .update_next_multixid(next_multi_xid, next_offset)
1492 0 : {
1493 0 : self.checkpoint_modified = true;
1494 0 : }
1495 :
1496 : // Also update the next-xid with the highest member. According to the comments in
1497 : // multixact_redo(), this shouldn't be necessary, but let's do the same here.
1498 0 : let max_mbr_xid = xlrec.members.iter().fold(None, |acc, mbr| {
1499 0 : if let Some(max_xid) = acc {
1500 0 : if mbr.xid.wrapping_sub(max_xid) as i32 > 0 {
1501 0 : Some(mbr.xid)
1502 : } else {
1503 0 : acc
1504 : }
1505 : } else {
1506 0 : Some(mbr.xid)
1507 : }
1508 0 : });
1509 :
1510 0 : if let Some(max_xid) = max_mbr_xid {
1511 0 : if self.checkpoint.update_next_xid(max_xid) {
1512 0 : self.checkpoint_modified = true;
1513 0 : }
1514 0 : }
1515 0 : Ok(())
1516 0 : }
1517 :
1518 0 : async fn ingest_multixact_truncate_record(
1519 0 : &mut self,
1520 0 : modification: &mut DatadirModification<'_>,
1521 0 : xlrec: &XlMultiXactTruncate,
1522 0 : ctx: &RequestContext,
1523 0 : ) -> Result<()> {
1524 0 : self.checkpoint.oldestMulti = xlrec.end_trunc_off;
1525 0 : self.checkpoint.oldestMultiDB = xlrec.oldest_multi_db;
1526 0 : self.checkpoint_modified = true;
1527 0 :
1528 0 : // PerformMembersTruncation
1529 0 : let maxsegment: i32 = mx_offset_to_member_segment(pg_constants::MAX_MULTIXACT_OFFSET);
1530 0 : let startsegment: i32 = mx_offset_to_member_segment(xlrec.start_trunc_memb);
1531 0 : let endsegment: i32 = mx_offset_to_member_segment(xlrec.end_trunc_memb);
1532 0 : let mut segment: i32 = startsegment;
1533 :
1534 : // Delete all the segments except the last one. The last segment can still
1535 : // contain, possibly partially, valid data.
1536 0 : while segment != endsegment {
1537 0 : modification
1538 0 : .drop_slru_segment(SlruKind::MultiXactMembers, segment as u32, ctx)
1539 0 : .await?;
1540 :
1541 : /* move to next segment, handling wraparound correctly */
1542 0 : if segment == maxsegment {
1543 0 : segment = 0;
1544 0 : } else {
1545 0 : segment += 1;
1546 0 : }
1547 : }
1548 :
1549 : // Truncate offsets
1550 : // FIXME: this did not handle wraparound correctly
1551 :
1552 0 : Ok(())
1553 0 : }
1554 :
1555 0 : async fn ingest_relmap_page(
1556 0 : &mut self,
1557 0 : modification: &mut DatadirModification<'_>,
1558 0 : xlrec: &XlRelmapUpdate,
1559 0 : decoded: &DecodedWALRecord,
1560 0 : ctx: &RequestContext,
1561 0 : ) -> Result<()> {
1562 0 : let mut buf = decoded.record.clone();
1563 0 : buf.advance(decoded.main_data_offset);
1564 0 : // skip xl_relmap_update
1565 0 : buf.advance(12);
1566 0 :
1567 0 : modification
1568 0 : .put_relmap_file(
1569 0 : xlrec.tsid,
1570 0 : xlrec.dbid,
1571 0 : Bytes::copy_from_slice(&buf[..]),
1572 0 : ctx,
1573 0 : )
1574 0 : .await
1575 0 : }
1576 :
1577 54 : async fn put_rel_creation(
1578 54 : &mut self,
1579 54 : modification: &mut DatadirModification<'_>,
1580 54 : rel: RelTag,
1581 54 : ctx: &RequestContext,
1582 54 : ) -> Result<()> {
1583 54 : modification.put_rel_creation(rel, 0, ctx).await?;
1584 54 : Ok(())
1585 54 : }
1586 :
1587 817278 : async fn put_rel_page_image(
1588 817278 : &mut self,
1589 817278 : modification: &mut DatadirModification<'_>,
1590 817278 : rel: RelTag,
1591 817278 : blknum: BlockNumber,
1592 817278 : img: Bytes,
1593 817278 : ctx: &RequestContext,
1594 817278 : ) -> Result<(), PageReconstructError> {
1595 817278 : self.handle_rel_extend(modification, rel, blknum, ctx)
1596 11254 : .await?;
1597 817278 : modification.put_rel_page_image(rel, blknum, img)?;
1598 817278 : Ok(())
1599 817278 : }
1600 :
1601 436890 : async fn put_rel_wal_record(
1602 436890 : &mut self,
1603 436890 : modification: &mut DatadirModification<'_>,
1604 436890 : rel: RelTag,
1605 436890 : blknum: BlockNumber,
1606 436890 : rec: NeonWalRecord,
1607 436890 : ctx: &RequestContext,
1608 436890 : ) -> Result<()> {
1609 436890 : self.handle_rel_extend(modification, rel, blknum, ctx)
1610 848 : .await?;
1611 436890 : modification.put_rel_wal_record(rel, blknum, rec)?;
1612 436890 : Ok(())
1613 436890 : }
1614 :
1615 18036 : async fn put_rel_truncation(
1616 18036 : &mut self,
1617 18036 : modification: &mut DatadirModification<'_>,
1618 18036 : rel: RelTag,
1619 18036 : nblocks: BlockNumber,
1620 18036 : ctx: &RequestContext,
1621 18036 : ) -> anyhow::Result<()> {
1622 18036 : modification.put_rel_truncation(rel, nblocks, ctx).await?;
1623 18036 : Ok(())
1624 18036 : }
1625 :
1626 6 : async fn put_rel_drop(
1627 6 : &mut self,
1628 6 : modification: &mut DatadirModification<'_>,
1629 6 : rel: RelTag,
1630 6 : ctx: &RequestContext,
1631 6 : ) -> Result<()> {
1632 6 : modification.put_rel_drop(rel, ctx).await?;
1633 6 : Ok(())
1634 6 : }
1635 :
1636 1254168 : async fn handle_rel_extend(
1637 1254168 : &mut self,
1638 1254168 : modification: &mut DatadirModification<'_>,
1639 1254168 : rel: RelTag,
1640 1254168 : blknum: BlockNumber,
1641 1254168 : ctx: &RequestContext,
1642 1254168 : ) -> Result<(), PageReconstructError> {
1643 1254168 : let new_nblocks = blknum + 1;
1644 : // Check if the relation exists. We implicitly create relations on first
1645 : // record.
1646 : // TODO: would be nice if to be more explicit about it
1647 :
1648 : // Get current size and put rel creation if rel doesn't exist
1649 : //
1650 : // NOTE: we check the cache first even though get_rel_exists and get_rel_size would
1651 : // check the cache too. This is because eagerly checking the cache results in
1652 : // less work overall and 10% better performance. It's more work on cache miss
1653 : // but cache miss is rare.
1654 1254168 : let old_nblocks = if let Some(nblocks) = modification
1655 1254168 : .tline
1656 1254168 : .get_cached_rel_size(&rel, modification.get_lsn())
1657 : {
1658 1254138 : nblocks
1659 30 : } else if !modification
1660 30 : .tline
1661 30 : .get_rel_exists(rel, Version::Modified(modification), ctx)
1662 6 : .await?
1663 : {
1664 : // create it with 0 size initially, the logic below will extend it
1665 30 : modification
1666 30 : .put_rel_creation(rel, 0, ctx)
1667 9 : .await
1668 30 : .context("Relation Error")?;
1669 30 : 0
1670 : } else {
1671 0 : modification
1672 0 : .tline
1673 0 : .get_rel_size(rel, Version::Modified(modification), ctx)
1674 0 : .await?
1675 : };
1676 :
1677 1254168 : if new_nblocks > old_nblocks {
1678 : //info!("extending {} {} to {}", rel, old_nblocks, new_nblocks);
1679 824364 : modification.put_rel_extend(rel, new_nblocks, ctx).await?;
1680 :
1681 824364 : let mut key = rel_block_to_key(rel, blknum);
1682 : // fill the gap with zeros
1683 824364 : for gap_blknum in old_nblocks..blknum {
1684 8994 : key.field6 = gap_blknum;
1685 8994 :
1686 8994 : if self.shard.get_shard_number(&key) != self.shard.number {
1687 0 : continue;
1688 8994 : }
1689 8994 :
1690 8994 : modification.put_rel_page_image(rel, gap_blknum, ZERO_PAGE.clone())?;
1691 : }
1692 429804 : }
1693 1254168 : Ok(())
1694 1254168 : }
1695 :
1696 0 : async fn put_slru_page_image(
1697 0 : &mut self,
1698 0 : modification: &mut DatadirModification<'_>,
1699 0 : kind: SlruKind,
1700 0 : segno: u32,
1701 0 : blknum: BlockNumber,
1702 0 : img: Bytes,
1703 0 : ctx: &RequestContext,
1704 0 : ) -> Result<()> {
1705 0 : self.handle_slru_extend(modification, kind, segno, blknum, ctx)
1706 0 : .await?;
1707 0 : modification.put_slru_page_image(kind, segno, blknum, img)?;
1708 0 : Ok(())
1709 0 : }
1710 :
1711 0 : async fn handle_slru_extend(
1712 0 : &mut self,
1713 0 : modification: &mut DatadirModification<'_>,
1714 0 : kind: SlruKind,
1715 0 : segno: u32,
1716 0 : blknum: BlockNumber,
1717 0 : ctx: &RequestContext,
1718 0 : ) -> anyhow::Result<()> {
1719 0 : // we don't use a cache for this like we do for relations. SLRUS are explcitly
1720 0 : // extended with ZEROPAGE records, not with commit records, so it happens
1721 0 : // a lot less frequently.
1722 0 :
1723 0 : let new_nblocks = blknum + 1;
1724 : // Check if the relation exists. We implicitly create relations on first
1725 : // record.
1726 : // TODO: would be nice if to be more explicit about it
1727 0 : let old_nblocks = if !modification
1728 0 : .tline
1729 0 : .get_slru_segment_exists(kind, segno, Version::Modified(modification), ctx)
1730 0 : .await?
1731 : {
1732 : // create it with 0 size initially, the logic below will extend it
1733 0 : modification
1734 0 : .put_slru_segment_creation(kind, segno, 0, ctx)
1735 0 : .await?;
1736 0 : 0
1737 : } else {
1738 0 : modification
1739 0 : .tline
1740 0 : .get_slru_segment_size(kind, segno, Version::Modified(modification), ctx)
1741 0 : .await?
1742 : };
1743 :
1744 0 : if new_nblocks > old_nblocks {
1745 0 : trace!(
1746 0 : "extending SLRU {:?} seg {} from {} to {} blocks",
1747 : kind,
1748 : segno,
1749 : old_nblocks,
1750 : new_nblocks
1751 : );
1752 0 : modification.put_slru_extend(kind, segno, new_nblocks)?;
1753 :
1754 : // fill the gap with zeros
1755 0 : for gap_blknum in old_nblocks..blknum {
1756 0 : modification.put_slru_page_image(kind, segno, gap_blknum, ZERO_PAGE.clone())?;
1757 : }
1758 0 : }
1759 0 : Ok(())
1760 0 : }
1761 : }
1762 :
1763 36 : async fn get_relsize(
1764 36 : modification: &DatadirModification<'_>,
1765 36 : rel: RelTag,
1766 36 : ctx: &RequestContext,
1767 36 : ) -> Result<BlockNumber, PageReconstructError> {
1768 36 : let nblocks = if !modification
1769 36 : .tline
1770 36 : .get_rel_exists(rel, Version::Modified(modification), ctx)
1771 0 : .await?
1772 : {
1773 0 : 0
1774 : } else {
1775 36 : modification
1776 36 : .tline
1777 36 : .get_rel_size(rel, Version::Modified(modification), ctx)
1778 0 : .await?
1779 : };
1780 36 : Ok(nblocks)
1781 36 : }
1782 :
1783 : #[allow(clippy::bool_assert_comparison)]
1784 : #[cfg(test)]
1785 : mod tests {
1786 : use super::*;
1787 : use crate::tenant::harness::*;
1788 : use crate::tenant::remote_timeline_client::{remote_initdb_archive_path, INITDB_PATH};
1789 : use postgres_ffi::RELSEG_SIZE;
1790 :
1791 : use crate::DEFAULT_PG_VERSION;
1792 :
1793 : /// Arbitrary relation tag, for testing.
1794 : const TESTREL_A: RelTag = RelTag {
1795 : spcnode: 0,
1796 : dbnode: 111,
1797 : relnode: 1000,
1798 : forknum: 0,
1799 : };
1800 :
1801 36 : fn assert_current_logical_size(_timeline: &Timeline, _lsn: Lsn) {
1802 36 : // TODO
1803 36 : }
1804 :
1805 : static ZERO_CHECKPOINT: Bytes = Bytes::from_static(&[0u8; SIZEOF_CHECKPOINT]);
1806 :
1807 24 : async fn init_walingest_test(tline: &Timeline, ctx: &RequestContext) -> Result<WalIngest> {
1808 24 : let mut m = tline.begin_modification(Lsn(0x10));
1809 24 : m.put_checkpoint(ZERO_CHECKPOINT.clone())?;
1810 48 : m.put_relmap_file(0, 111, Bytes::from(""), ctx).await?; // dummy relmapper file
1811 24 : m.commit(ctx).await?;
1812 24 : let walingest = WalIngest::new(tline, Lsn(0x10), ctx).await?;
1813 :
1814 24 : Ok(walingest)
1815 24 : }
1816 :
1817 : #[tokio::test]
1818 6 : async fn test_relsize() -> Result<()> {
1819 24 : let (tenant, ctx) = TenantHarness::create("test_relsize").await?.load().await;
1820 6 : let tline = tenant
1821 6 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
1822 12 : .await?;
1823 15 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
1824 6 :
1825 6 : let mut m = tline.begin_modification(Lsn(0x20));
1826 6 : walingest.put_rel_creation(&mut m, TESTREL_A, &ctx).await?;
1827 6 : walingest
1828 6 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 2"), &ctx)
1829 6 : .await?;
1830 6 : m.commit(&ctx).await?;
1831 6 : let mut m = tline.begin_modification(Lsn(0x30));
1832 6 : walingest
1833 6 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 3"), &ctx)
1834 6 : .await?;
1835 6 : m.commit(&ctx).await?;
1836 6 : let mut m = tline.begin_modification(Lsn(0x40));
1837 6 : walingest
1838 6 : .put_rel_page_image(&mut m, TESTREL_A, 1, test_img("foo blk 1 at 4"), &ctx)
1839 6 : .await?;
1840 6 : m.commit(&ctx).await?;
1841 6 : let mut m = tline.begin_modification(Lsn(0x50));
1842 6 : walingest
1843 6 : .put_rel_page_image(&mut m, TESTREL_A, 2, test_img("foo blk 2 at 5"), &ctx)
1844 6 : .await?;
1845 6 : m.commit(&ctx).await?;
1846 6 :
1847 6 : assert_current_logical_size(&tline, Lsn(0x50));
1848 6 :
1849 6 : // The relation was created at LSN 2, not visible at LSN 1 yet.
1850 6 : assert_eq!(
1851 6 : tline
1852 6 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x10)), &ctx)
1853 6 : .await?,
1854 6 : false
1855 6 : );
1856 6 : assert!(tline
1857 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x10)), &ctx)
1858 6 : .await
1859 6 : .is_err());
1860 6 : assert_eq!(
1861 6 : tline
1862 6 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
1863 6 : .await?,
1864 6 : true
1865 6 : );
1866 6 : assert_eq!(
1867 6 : tline
1868 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
1869 6 : .await?,
1870 6 : 1
1871 6 : );
1872 6 : assert_eq!(
1873 6 : tline
1874 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), &ctx)
1875 6 : .await?,
1876 6 : 3
1877 6 : );
1878 6 :
1879 6 : // Check page contents at each LSN
1880 6 : assert_eq!(
1881 6 : tline
1882 6 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x20)), &ctx)
1883 6 : .await?,
1884 6 : test_img("foo blk 0 at 2")
1885 6 : );
1886 6 :
1887 6 : assert_eq!(
1888 6 : tline
1889 6 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x30)), &ctx)
1890 6 : .await?,
1891 6 : test_img("foo blk 0 at 3")
1892 6 : );
1893 6 :
1894 6 : assert_eq!(
1895 6 : tline
1896 6 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x40)), &ctx)
1897 6 : .await?,
1898 6 : test_img("foo blk 0 at 3")
1899 6 : );
1900 6 : assert_eq!(
1901 6 : tline
1902 6 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x40)), &ctx)
1903 6 : .await?,
1904 6 : test_img("foo blk 1 at 4")
1905 6 : );
1906 6 :
1907 6 : assert_eq!(
1908 6 : tline
1909 6 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x50)), &ctx)
1910 6 : .await?,
1911 6 : test_img("foo blk 0 at 3")
1912 6 : );
1913 6 : assert_eq!(
1914 6 : tline
1915 6 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x50)), &ctx)
1916 6 : .await?,
1917 6 : test_img("foo blk 1 at 4")
1918 6 : );
1919 6 : assert_eq!(
1920 6 : tline
1921 6 : .get_rel_page_at_lsn(TESTREL_A, 2, Version::Lsn(Lsn(0x50)), &ctx)
1922 6 : .await?,
1923 6 : test_img("foo blk 2 at 5")
1924 6 : );
1925 6 :
1926 6 : // Truncate last block
1927 6 : let mut m = tline.begin_modification(Lsn(0x60));
1928 6 : walingest
1929 6 : .put_rel_truncation(&mut m, TESTREL_A, 2, &ctx)
1930 6 : .await?;
1931 6 : m.commit(&ctx).await?;
1932 6 : assert_current_logical_size(&tline, Lsn(0x60));
1933 6 :
1934 6 : // Check reported size and contents after truncation
1935 6 : assert_eq!(
1936 6 : tline
1937 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x60)), &ctx)
1938 6 : .await?,
1939 6 : 2
1940 6 : );
1941 6 : assert_eq!(
1942 6 : tline
1943 6 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x60)), &ctx)
1944 6 : .await?,
1945 6 : test_img("foo blk 0 at 3")
1946 6 : );
1947 6 : assert_eq!(
1948 6 : tline
1949 6 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x60)), &ctx)
1950 6 : .await?,
1951 6 : test_img("foo blk 1 at 4")
1952 6 : );
1953 6 :
1954 6 : // should still see the truncated block with older LSN
1955 6 : assert_eq!(
1956 6 : tline
1957 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), &ctx)
1958 6 : .await?,
1959 6 : 3
1960 6 : );
1961 6 : assert_eq!(
1962 6 : tline
1963 6 : .get_rel_page_at_lsn(TESTREL_A, 2, Version::Lsn(Lsn(0x50)), &ctx)
1964 6 : .await?,
1965 6 : test_img("foo blk 2 at 5")
1966 6 : );
1967 6 :
1968 6 : // Truncate to zero length
1969 6 : let mut m = tline.begin_modification(Lsn(0x68));
1970 6 : walingest
1971 6 : .put_rel_truncation(&mut m, TESTREL_A, 0, &ctx)
1972 6 : .await?;
1973 6 : m.commit(&ctx).await?;
1974 6 : assert_eq!(
1975 6 : tline
1976 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x68)), &ctx)
1977 6 : .await?,
1978 6 : 0
1979 6 : );
1980 6 :
1981 6 : // Extend from 0 to 2 blocks, leaving a gap
1982 6 : let mut m = tline.begin_modification(Lsn(0x70));
1983 6 : walingest
1984 6 : .put_rel_page_image(&mut m, TESTREL_A, 1, test_img("foo blk 1"), &ctx)
1985 6 : .await?;
1986 6 : m.commit(&ctx).await?;
1987 6 : assert_eq!(
1988 6 : tline
1989 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x70)), &ctx)
1990 6 : .await?,
1991 6 : 2
1992 6 : );
1993 6 : assert_eq!(
1994 6 : tline
1995 6 : .get_rel_page_at_lsn(TESTREL_A, 0, Version::Lsn(Lsn(0x70)), &ctx)
1996 6 : .await?,
1997 6 : ZERO_PAGE
1998 6 : );
1999 6 : assert_eq!(
2000 6 : tline
2001 6 : .get_rel_page_at_lsn(TESTREL_A, 1, Version::Lsn(Lsn(0x70)), &ctx)
2002 6 : .await?,
2003 6 : test_img("foo blk 1")
2004 6 : );
2005 6 :
2006 6 : // Extend a lot more, leaving a big gap that spans across segments
2007 6 : let mut m = tline.begin_modification(Lsn(0x80));
2008 6 : walingest
2009 6 : .put_rel_page_image(&mut m, TESTREL_A, 1500, test_img("foo blk 1500"), &ctx)
2010 6 : .await?;
2011 570 : m.commit(&ctx).await?;
2012 6 : assert_eq!(
2013 6 : tline
2014 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x80)), &ctx)
2015 6 : .await?,
2016 6 : 1501
2017 6 : );
2018 8994 : for blk in 2..1500 {
2019 8988 : assert_eq!(
2020 8988 : tline
2021 8988 : .get_rel_page_at_lsn(TESTREL_A, blk, Version::Lsn(Lsn(0x80)), &ctx)
2022 4620 : .await?,
2023 8988 : ZERO_PAGE
2024 6 : );
2025 6 : }
2026 6 : assert_eq!(
2027 6 : tline
2028 6 : .get_rel_page_at_lsn(TESTREL_A, 1500, Version::Lsn(Lsn(0x80)), &ctx)
2029 6 : .await?,
2030 6 : test_img("foo blk 1500")
2031 6 : );
2032 6 :
2033 6 : Ok(())
2034 6 : }
2035 :
2036 : // Test what happens if we dropped a relation
2037 : // and then created it again within the same layer.
2038 : #[tokio::test]
2039 6 : async fn test_drop_extend() -> Result<()> {
2040 6 : let (tenant, ctx) = TenantHarness::create("test_drop_extend")
2041 6 : .await?
2042 6 : .load()
2043 24 : .await;
2044 6 : let tline = tenant
2045 6 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
2046 12 : .await?;
2047 15 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
2048 6 :
2049 6 : let mut m = tline.begin_modification(Lsn(0x20));
2050 6 : walingest
2051 6 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 2"), &ctx)
2052 6 : .await?;
2053 6 : m.commit(&ctx).await?;
2054 6 :
2055 6 : // Check that rel exists and size is correct
2056 6 : assert_eq!(
2057 6 : tline
2058 6 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
2059 6 : .await?,
2060 6 : true
2061 6 : );
2062 6 : assert_eq!(
2063 6 : tline
2064 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
2065 6 : .await?,
2066 6 : 1
2067 6 : );
2068 6 :
2069 6 : // Drop rel
2070 6 : let mut m = tline.begin_modification(Lsn(0x30));
2071 6 : walingest.put_rel_drop(&mut m, TESTREL_A, &ctx).await?;
2072 6 : m.commit(&ctx).await?;
2073 6 :
2074 6 : // Check that rel is not visible anymore
2075 6 : assert_eq!(
2076 6 : tline
2077 6 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x30)), &ctx)
2078 6 : .await?,
2079 6 : false
2080 6 : );
2081 6 :
2082 6 : // FIXME: should fail
2083 6 : //assert!(tline.get_rel_size(TESTREL_A, Lsn(0x30), false)?.is_none());
2084 6 :
2085 6 : // Re-create it
2086 6 : let mut m = tline.begin_modification(Lsn(0x40));
2087 6 : walingest
2088 6 : .put_rel_page_image(&mut m, TESTREL_A, 0, test_img("foo blk 0 at 4"), &ctx)
2089 6 : .await?;
2090 6 : m.commit(&ctx).await?;
2091 6 :
2092 6 : // Check that rel exists and size is correct
2093 6 : assert_eq!(
2094 6 : tline
2095 6 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x40)), &ctx)
2096 6 : .await?,
2097 6 : true
2098 6 : );
2099 6 : assert_eq!(
2100 6 : tline
2101 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x40)), &ctx)
2102 6 : .await?,
2103 6 : 1
2104 6 : );
2105 6 :
2106 6 : Ok(())
2107 6 : }
2108 :
2109 : // Test what happens if we truncated a relation
2110 : // so that one of its segments was dropped
2111 : // and then extended it again within the same layer.
2112 : #[tokio::test]
2113 6 : async fn test_truncate_extend() -> Result<()> {
2114 6 : let (tenant, ctx) = TenantHarness::create("test_truncate_extend")
2115 6 : .await?
2116 6 : .load()
2117 24 : .await;
2118 6 : let tline = tenant
2119 6 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
2120 12 : .await?;
2121 15 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
2122 6 :
2123 6 : // Create a 20 MB relation (the size is arbitrary)
2124 6 : let relsize = 20 * 1024 * 1024 / 8192;
2125 6 : let mut m = tline.begin_modification(Lsn(0x20));
2126 15360 : for blkno in 0..relsize {
2127 15360 : let data = format!("foo blk {} at {}", blkno, Lsn(0x20));
2128 15360 : walingest
2129 15360 : .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx)
2130 6 : .await?;
2131 6 : }
2132 6 : m.commit(&ctx).await?;
2133 6 :
2134 6 : // The relation was created at LSN 20, not visible at LSN 1 yet.
2135 6 : assert_eq!(
2136 6 : tline
2137 6 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x10)), &ctx)
2138 6 : .await?,
2139 6 : false
2140 6 : );
2141 6 : assert!(tline
2142 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x10)), &ctx)
2143 6 : .await
2144 6 : .is_err());
2145 6 :
2146 6 : assert_eq!(
2147 6 : tline
2148 6 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
2149 6 : .await?,
2150 6 : true
2151 6 : );
2152 6 : assert_eq!(
2153 6 : tline
2154 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x20)), &ctx)
2155 6 : .await?,
2156 6 : relsize
2157 6 : );
2158 6 :
2159 6 : // Check relation content
2160 15360 : for blkno in 0..relsize {
2161 15360 : let lsn = Lsn(0x20);
2162 15360 : let data = format!("foo blk {} at {}", blkno, lsn);
2163 15360 : assert_eq!(
2164 15360 : tline
2165 15360 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(lsn), &ctx)
2166 5368 : .await?,
2167 15360 : test_img(&data)
2168 6 : );
2169 6 : }
2170 6 :
2171 6 : // Truncate relation so that second segment was dropped
2172 6 : // - only leave one page
2173 6 : let mut m = tline.begin_modification(Lsn(0x60));
2174 6 : walingest
2175 6 : .put_rel_truncation(&mut m, TESTREL_A, 1, &ctx)
2176 6 : .await?;
2177 6 : m.commit(&ctx).await?;
2178 6 :
2179 6 : // Check reported size and contents after truncation
2180 6 : assert_eq!(
2181 6 : tline
2182 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x60)), &ctx)
2183 6 : .await?,
2184 6 : 1
2185 6 : );
2186 6 :
2187 12 : for blkno in 0..1 {
2188 6 : let lsn = Lsn(0x20);
2189 6 : let data = format!("foo blk {} at {}", blkno, lsn);
2190 6 : assert_eq!(
2191 6 : tline
2192 6 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x60)), &ctx)
2193 6 : .await?,
2194 6 : test_img(&data)
2195 6 : );
2196 6 : }
2197 6 :
2198 6 : // should still see all blocks with older LSN
2199 6 : assert_eq!(
2200 6 : tline
2201 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x50)), &ctx)
2202 6 : .await?,
2203 6 : relsize
2204 6 : );
2205 15360 : for blkno in 0..relsize {
2206 15360 : let lsn = Lsn(0x20);
2207 15360 : let data = format!("foo blk {} at {}", blkno, lsn);
2208 15360 : assert_eq!(
2209 15360 : tline
2210 15360 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x50)), &ctx)
2211 10728 : .await?,
2212 15360 : test_img(&data)
2213 6 : );
2214 6 : }
2215 6 :
2216 6 : // Extend relation again.
2217 6 : // Add enough blocks to create second segment
2218 6 : let lsn = Lsn(0x80);
2219 6 : let mut m = tline.begin_modification(lsn);
2220 15360 : for blkno in 0..relsize {
2221 15360 : let data = format!("foo blk {} at {}", blkno, lsn);
2222 15360 : walingest
2223 15360 : .put_rel_page_image(&mut m, TESTREL_A, blkno, test_img(&data), &ctx)
2224 6 : .await?;
2225 6 : }
2226 9 : m.commit(&ctx).await?;
2227 6 :
2228 6 : assert_eq!(
2229 6 : tline
2230 6 : .get_rel_exists(TESTREL_A, Version::Lsn(Lsn(0x80)), &ctx)
2231 6 : .await?,
2232 6 : true
2233 6 : );
2234 6 : assert_eq!(
2235 6 : tline
2236 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(0x80)), &ctx)
2237 6 : .await?,
2238 6 : relsize
2239 6 : );
2240 6 : // Check relation content
2241 15360 : for blkno in 0..relsize {
2242 15360 : let lsn = Lsn(0x80);
2243 15360 : let data = format!("foo blk {} at {}", blkno, lsn);
2244 15360 : assert_eq!(
2245 15360 : tline
2246 15360 : .get_rel_page_at_lsn(TESTREL_A, blkno, Version::Lsn(Lsn(0x80)), &ctx)
2247 5448 : .await?,
2248 15360 : test_img(&data)
2249 6 : );
2250 6 : }
2251 6 :
2252 6 : Ok(())
2253 6 : }
2254 :
2255 : /// Test get_relsize() and truncation with a file larger than 1 GB, so that it's
2256 : /// split into multiple 1 GB segments in Postgres.
2257 : #[tokio::test]
2258 6 : async fn test_large_rel() -> Result<()> {
2259 24 : let (tenant, ctx) = TenantHarness::create("test_large_rel").await?.load().await;
2260 6 : let tline = tenant
2261 6 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
2262 12 : .await?;
2263 15 : let mut walingest = init_walingest_test(&tline, &ctx).await?;
2264 6 :
2265 6 : let mut lsn = 0x10;
2266 786438 : for blknum in 0..RELSEG_SIZE + 1 {
2267 786438 : lsn += 0x10;
2268 786438 : let mut m = tline.begin_modification(Lsn(lsn));
2269 786438 : let img = test_img(&format!("foo blk {} at {}", blknum, Lsn(lsn)));
2270 786438 : walingest
2271 786438 : .put_rel_page_image(&mut m, TESTREL_A, blknum as BlockNumber, img, &ctx)
2272 11239 : .await?;
2273 786438 : m.commit(&ctx).await?;
2274 6 : }
2275 6 :
2276 6 : assert_current_logical_size(&tline, Lsn(lsn));
2277 6 :
2278 6 : assert_eq!(
2279 6 : tline
2280 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), &ctx)
2281 6 : .await?,
2282 6 : RELSEG_SIZE + 1
2283 6 : );
2284 6 :
2285 6 : // Truncate one block
2286 6 : lsn += 0x10;
2287 6 : let mut m = tline.begin_modification(Lsn(lsn));
2288 6 : walingest
2289 6 : .put_rel_truncation(&mut m, TESTREL_A, RELSEG_SIZE, &ctx)
2290 6 : .await?;
2291 6 : m.commit(&ctx).await?;
2292 6 : assert_eq!(
2293 6 : tline
2294 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), &ctx)
2295 6 : .await?,
2296 6 : RELSEG_SIZE
2297 6 : );
2298 6 : assert_current_logical_size(&tline, Lsn(lsn));
2299 6 :
2300 6 : // Truncate another block
2301 6 : lsn += 0x10;
2302 6 : let mut m = tline.begin_modification(Lsn(lsn));
2303 6 : walingest
2304 6 : .put_rel_truncation(&mut m, TESTREL_A, RELSEG_SIZE - 1, &ctx)
2305 6 : .await?;
2306 6 : m.commit(&ctx).await?;
2307 6 : assert_eq!(
2308 6 : tline
2309 6 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), &ctx)
2310 6 : .await?,
2311 6 : RELSEG_SIZE - 1
2312 6 : );
2313 6 : assert_current_logical_size(&tline, Lsn(lsn));
2314 6 :
2315 6 : // Truncate to 1500, and then truncate all the way down to 0, one block at a time
2316 6 : // This tests the behavior at segment boundaries
2317 6 : let mut size: i32 = 3000;
2318 18012 : while size >= 0 {
2319 18006 : lsn += 0x10;
2320 18006 : let mut m = tline.begin_modification(Lsn(lsn));
2321 18006 : walingest
2322 18006 : .put_rel_truncation(&mut m, TESTREL_A, size as BlockNumber, &ctx)
2323 281 : .await?;
2324 18006 : m.commit(&ctx).await?;
2325 18006 : assert_eq!(
2326 18006 : tline
2327 18006 : .get_rel_size(TESTREL_A, Version::Lsn(Lsn(lsn)), &ctx)
2328 6 : .await?,
2329 18006 : size as BlockNumber
2330 6 : );
2331 6 :
2332 18006 : size -= 1;
2333 6 : }
2334 6 : assert_current_logical_size(&tline, Lsn(lsn));
2335 6 :
2336 6 : Ok(())
2337 6 : }
2338 :
2339 : /// Replay a wal segment file taken directly from safekeepers.
2340 : ///
2341 : /// This test is useful for benchmarking since it allows us to profile only
2342 : /// the walingest code in a single-threaded executor, and iterate more quickly
2343 : /// without waiting for unrelated steps.
2344 : #[tokio::test]
2345 6 : async fn test_ingest_real_wal() {
2346 6 : use crate::tenant::harness::*;
2347 6 : use postgres_ffi::waldecoder::WalStreamDecoder;
2348 6 : use postgres_ffi::WAL_SEGMENT_SIZE;
2349 6 :
2350 6 : // Define test data path and constants.
2351 6 : //
2352 6 : // Steps to reconstruct the data, if needed:
2353 6 : // 1. Run the pgbench python test
2354 6 : // 2. Take the first wal segment file from safekeeper
2355 6 : // 3. Compress it using `zstd --long input_file`
2356 6 : // 4. Copy initdb.tar.zst from local_fs_remote_storage
2357 6 : // 5. Grep sk logs for "restart decoder" to get startpoint
2358 6 : // 6. Run just the decoder from this test to get the endpoint.
2359 6 : // It's the last LSN the decoder will output.
2360 6 : let pg_version = 15; // The test data was generated by pg15
2361 6 : let path = "test_data/sk_wal_segment_from_pgbench";
2362 6 : let wal_segment_path = format!("{path}/000000010000000000000001.zst");
2363 6 : let source_initdb_path = format!("{path}/{INITDB_PATH}");
2364 6 : let startpoint = Lsn::from_hex("14AEC08").unwrap();
2365 6 : let _endpoint = Lsn::from_hex("1FFFF98").unwrap();
2366 6 :
2367 6 : let harness = TenantHarness::create("test_ingest_real_wal").await.unwrap();
2368 6 : let span = harness
2369 6 : .span()
2370 6 : .in_scope(|| info_span!("timeline_span", timeline_id=%TIMELINE_ID));
2371 24 : let (tenant, ctx) = harness.load().await;
2372 6 :
2373 6 : let remote_initdb_path =
2374 6 : remote_initdb_archive_path(&tenant.tenant_shard_id().tenant_id, &TIMELINE_ID);
2375 6 : let initdb_path = harness.remote_fs_dir.join(remote_initdb_path.get_path());
2376 6 :
2377 6 : std::fs::create_dir_all(initdb_path.parent().unwrap())
2378 6 : .expect("creating test dir should work");
2379 6 : std::fs::copy(source_initdb_path, initdb_path).expect("copying the initdb.tar.zst works");
2380 6 :
2381 6 : // Bootstrap a real timeline. We can't use create_test_timeline because
2382 6 : // it doesn't create a real checkpoint, and Walingest::new tries to parse
2383 6 : // the garbage data.
2384 6 : let tline = tenant
2385 6 : .bootstrap_timeline_test(TIMELINE_ID, pg_version, Some(TIMELINE_ID), &ctx)
2386 61705 : .await
2387 6 : .unwrap();
2388 6 :
2389 6 : // We fully read and decompress this into memory before decoding
2390 6 : // to get a more accurate perf profile of the decoder.
2391 6 : let bytes = {
2392 6 : use async_compression::tokio::bufread::ZstdDecoder;
2393 6 : let file = tokio::fs::File::open(wal_segment_path).await.unwrap();
2394 6 : let reader = tokio::io::BufReader::new(file);
2395 6 : let decoder = ZstdDecoder::new(reader);
2396 6 : let mut reader = tokio::io::BufReader::new(decoder);
2397 6 : let mut buffer = Vec::new();
2398 664 : tokio::io::copy_buf(&mut reader, &mut buffer).await.unwrap();
2399 6 : buffer
2400 6 : };
2401 6 :
2402 6 : // TODO start a profiler too
2403 6 : let started_at = std::time::Instant::now();
2404 6 :
2405 6 : // Initialize walingest
2406 6 : let xlogoff: usize = startpoint.segment_offset(WAL_SEGMENT_SIZE);
2407 6 : let mut decoder = WalStreamDecoder::new(startpoint, pg_version);
2408 6 : let mut walingest = WalIngest::new(tline.as_ref(), startpoint, &ctx)
2409 15 : .await
2410 6 : .unwrap();
2411 6 : let mut modification = tline.begin_modification(startpoint);
2412 6 : let mut decoded = DecodedWALRecord::default();
2413 6 : println!("decoding {} bytes", bytes.len() - xlogoff);
2414 6 :
2415 6 : // Decode and ingest wal. We process the wal in chunks because
2416 6 : // that's what happens when we get bytes from safekeepers.
2417 1424058 : for chunk in bytes[xlogoff..].chunks(50) {
2418 1424058 : decoder.feed_bytes(chunk);
2419 1861608 : while let Some((lsn, recdata)) = decoder.poll_decode().unwrap() {
2420 437550 : walingest
2421 437550 : .ingest_record(recdata, lsn, &mut modification, &mut decoded, &ctx)
2422 437550 : .instrument(span.clone())
2423 899 : .await
2424 437550 : .unwrap();
2425 6 : }
2426 1424058 : modification.commit(&ctx).await.unwrap();
2427 6 : }
2428 6 :
2429 6 : let duration = started_at.elapsed();
2430 6 : println!("done in {:?}", duration);
2431 6 : }
2432 : }
|