Line data Source code
1 : //! Every image of a certain timeline from [`crate::tenant::Tenant`]
2 : //! has a metadata that needs to be stored persistently.
3 : //!
4 : //! Later, the file gets used in [`remote_timeline_client`] as a part of
5 : //! external storage import and export operations.
6 : //!
7 : //! The module contains all structs and related helper methods related to timeline metadata.
8 : //!
9 : //! [`remote_timeline_client`]: super::remote_timeline_client
10 :
11 : use std::io::{self};
12 :
13 : use anyhow::{ensure, Context};
14 : use pageserver_api::shard::TenantShardId;
15 : use serde::{de::Error, Deserialize, Serialize, Serializer};
16 : use thiserror::Error;
17 : use utils::bin_ser::SerializeError;
18 : use utils::crashsafe::path_with_suffix_extension;
19 : use utils::{bin_ser::BeSer, id::TimelineId, lsn::Lsn};
20 :
21 : use crate::config::PageServerConf;
22 : use crate::virtual_file::VirtualFile;
23 : use crate::TEMP_FILE_SUFFIX;
24 :
25 : /// Use special format number to enable backward compatibility.
26 : const METADATA_FORMAT_VERSION: u16 = 4;
27 :
28 : /// Previous supported format versions.
29 : const METADATA_OLD_FORMAT_VERSION: u16 = 3;
30 :
31 : /// We assume that a write of up to METADATA_MAX_SIZE bytes is atomic.
32 : ///
33 : /// This is the same assumption that PostgreSQL makes with the control file,
34 : /// see PG_CONTROL_MAX_SAFE_SIZE
35 : const METADATA_MAX_SIZE: usize = 512;
36 :
37 : /// Metadata stored on disk for each timeline
38 : ///
39 : /// The fields correspond to the values we hold in memory, in Timeline.
40 15068 : #[derive(Debug, Clone, PartialEq, Eq)]
41 : pub struct TimelineMetadata {
42 : hdr: TimelineMetadataHeader,
43 : body: TimelineMetadataBodyV2,
44 : }
45 :
46 27656 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
47 : struct TimelineMetadataHeader {
48 : checksum: u32, // CRC of serialized metadata body
49 : size: u16, // size of serialized metadata
50 : format_version: u16, // metadata format version (used for compatibility checks)
51 : }
52 : const METADATA_HDR_SIZE: usize = std::mem::size_of::<TimelineMetadataHeader>();
53 :
54 27660 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
55 : struct TimelineMetadataBodyV2 {
56 : disk_consistent_lsn: Lsn,
57 : // This is only set if we know it. We track it in memory when the page
58 : // server is running, but we only track the value corresponding to
59 : // 'last_record_lsn', not 'disk_consistent_lsn' which can lag behind by a
60 : // lot. We only store it in the metadata file when we flush *all* the
61 : // in-memory data so that 'last_record_lsn' is the same as
62 : // 'disk_consistent_lsn'. That's OK, because after page server restart, as
63 : // soon as we reprocess at least one record, we will have a valid
64 : // 'prev_record_lsn' value in memory again. This is only really needed when
65 : // doing a clean shutdown, so that there is no more WAL beyond
66 : // 'disk_consistent_lsn'
67 : prev_record_lsn: Option<Lsn>,
68 : ancestor_timeline: Option<TimelineId>,
69 : ancestor_lsn: Lsn,
70 : latest_gc_cutoff_lsn: Lsn,
71 : initdb_lsn: Lsn,
72 : pg_version: u32,
73 : }
74 :
75 4 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
76 : struct TimelineMetadataBodyV1 {
77 : disk_consistent_lsn: Lsn,
78 : // This is only set if we know it. We track it in memory when the page
79 : // server is running, but we only track the value corresponding to
80 : // 'last_record_lsn', not 'disk_consistent_lsn' which can lag behind by a
81 : // lot. We only store it in the metadata file when we flush *all* the
82 : // in-memory data so that 'last_record_lsn' is the same as
83 : // 'disk_consistent_lsn'. That's OK, because after page server restart, as
84 : // soon as we reprocess at least one record, we will have a valid
85 : // 'prev_record_lsn' value in memory again. This is only really needed when
86 : // doing a clean shutdown, so that there is no more WAL beyond
87 : // 'disk_consistent_lsn'
88 : prev_record_lsn: Option<Lsn>,
89 : ancestor_timeline: Option<TimelineId>,
90 : ancestor_lsn: Lsn,
91 : latest_gc_cutoff_lsn: Lsn,
92 : initdb_lsn: Lsn,
93 : }
94 :
95 : impl TimelineMetadata {
96 6449 : pub fn new(
97 6449 : disk_consistent_lsn: Lsn,
98 6449 : prev_record_lsn: Option<Lsn>,
99 6449 : ancestor_timeline: Option<TimelineId>,
100 6449 : ancestor_lsn: Lsn,
101 6449 : latest_gc_cutoff_lsn: Lsn,
102 6449 : initdb_lsn: Lsn,
103 6449 : pg_version: u32,
104 6449 : ) -> Self {
105 6449 : Self {
106 6449 : hdr: TimelineMetadataHeader {
107 6449 : checksum: 0,
108 6449 : size: 0,
109 6449 : format_version: METADATA_FORMAT_VERSION,
110 6449 : },
111 6449 : body: TimelineMetadataBodyV2 {
112 6449 : disk_consistent_lsn,
113 6449 : prev_record_lsn,
114 6449 : ancestor_timeline,
115 6449 : ancestor_lsn,
116 6449 : latest_gc_cutoff_lsn,
117 6449 : initdb_lsn,
118 6449 : pg_version,
119 6449 : },
120 6449 : }
121 6449 : }
122 :
123 2 : fn upgrade_timeline_metadata(metadata_bytes: &[u8]) -> anyhow::Result<Self> {
124 2 : let mut hdr = TimelineMetadataHeader::des(&metadata_bytes[0..METADATA_HDR_SIZE])?;
125 :
126 : // backward compatible only up to this version
127 : ensure!(
128 2 : hdr.format_version == METADATA_OLD_FORMAT_VERSION,
129 0 : "unsupported metadata format version {}",
130 : hdr.format_version
131 : );
132 :
133 2 : let metadata_size = hdr.size as usize;
134 :
135 2 : let body: TimelineMetadataBodyV1 =
136 2 : TimelineMetadataBodyV1::des(&metadata_bytes[METADATA_HDR_SIZE..metadata_size])?;
137 :
138 2 : let body = TimelineMetadataBodyV2 {
139 2 : disk_consistent_lsn: body.disk_consistent_lsn,
140 2 : prev_record_lsn: body.prev_record_lsn,
141 2 : ancestor_timeline: body.ancestor_timeline,
142 2 : ancestor_lsn: body.ancestor_lsn,
143 2 : latest_gc_cutoff_lsn: body.latest_gc_cutoff_lsn,
144 2 : initdb_lsn: body.initdb_lsn,
145 2 : pg_version: 14, // All timelines created before this version had pg_version 14
146 2 : };
147 2 :
148 2 : hdr.format_version = METADATA_FORMAT_VERSION;
149 2 :
150 2 : Ok(Self { hdr, body })
151 2 : }
152 :
153 488 : pub fn from_bytes(metadata_bytes: &[u8]) -> anyhow::Result<Self> {
154 488 : ensure!(
155 488 : metadata_bytes.len() == METADATA_MAX_SIZE,
156 0 : "metadata bytes size is wrong"
157 : );
158 488 : let hdr = TimelineMetadataHeader::des(&metadata_bytes[0..METADATA_HDR_SIZE])?;
159 :
160 488 : let metadata_size = hdr.size as usize;
161 488 : ensure!(
162 488 : metadata_size <= METADATA_MAX_SIZE,
163 0 : "corrupted metadata file"
164 : );
165 488 : let calculated_checksum = crc32c::crc32c(&metadata_bytes[METADATA_HDR_SIZE..metadata_size]);
166 488 : ensure!(
167 488 : hdr.checksum == calculated_checksum,
168 2 : "metadata checksum mismatch"
169 : );
170 :
171 486 : if hdr.format_version != METADATA_FORMAT_VERSION {
172 : // If metadata has the old format,
173 : // upgrade it and return the result
174 2 : TimelineMetadata::upgrade_timeline_metadata(metadata_bytes)
175 : } else {
176 484 : let body =
177 484 : TimelineMetadataBodyV2::des(&metadata_bytes[METADATA_HDR_SIZE..metadata_size])?;
178 : ensure!(
179 484 : body.disk_consistent_lsn.is_aligned(),
180 0 : "disk_consistent_lsn is not aligned"
181 : );
182 484 : Ok(TimelineMetadata { hdr, body })
183 : }
184 488 : }
185 :
186 13826 : pub fn to_bytes(&self) -> Result<Vec<u8>, SerializeError> {
187 13826 : let body_bytes = self.body.ser()?;
188 13826 : let metadata_size = METADATA_HDR_SIZE + body_bytes.len();
189 13826 : let hdr = TimelineMetadataHeader {
190 13826 : size: metadata_size as u16,
191 13826 : format_version: METADATA_FORMAT_VERSION,
192 13826 : checksum: crc32c::crc32c(&body_bytes),
193 13826 : };
194 13826 : let hdr_bytes = hdr.ser()?;
195 13826 : let mut metadata_bytes = vec![0u8; METADATA_MAX_SIZE];
196 13826 : metadata_bytes[0..METADATA_HDR_SIZE].copy_from_slice(&hdr_bytes);
197 13826 : metadata_bytes[METADATA_HDR_SIZE..metadata_size].copy_from_slice(&body_bytes);
198 13826 : Ok(metadata_bytes)
199 13826 : }
200 :
201 : /// [`Lsn`] that corresponds to the corresponding timeline directory
202 : /// contents, stored locally in the pageserver workdir.
203 9603 : pub fn disk_consistent_lsn(&self) -> Lsn {
204 9603 : self.body.disk_consistent_lsn
205 9603 : }
206 :
207 1568 : pub fn prev_record_lsn(&self) -> Option<Lsn> {
208 1568 : self.body.prev_record_lsn
209 1568 : }
210 :
211 2380 : pub fn ancestor_timeline(&self) -> Option<TimelineId> {
212 2380 : self.body.ancestor_timeline
213 2380 : }
214 :
215 1568 : pub fn ancestor_lsn(&self) -> Lsn {
216 1568 : self.body.ancestor_lsn
217 1568 : }
218 :
219 1568 : pub fn latest_gc_cutoff_lsn(&self) -> Lsn {
220 1568 : self.body.latest_gc_cutoff_lsn
221 1568 : }
222 :
223 1568 : pub fn initdb_lsn(&self) -> Lsn {
224 1568 : self.body.initdb_lsn
225 1568 : }
226 :
227 1568 : pub fn pg_version(&self) -> u32 {
228 1568 : self.body.pg_version
229 1568 : }
230 :
231 : // Checksums make it awkward to build a valid instance by hand. This helper
232 : // provides a TimelineMetadata with a valid checksum in its header.
233 : #[cfg(test)]
234 12 : pub fn example() -> Self {
235 12 : let instance = Self::new(
236 12 : "0/16960E8".parse::<Lsn>().unwrap(),
237 12 : None,
238 12 : None,
239 12 : Lsn::from_hex("00000000").unwrap(),
240 12 : Lsn::from_hex("00000000").unwrap(),
241 12 : Lsn::from_hex("00000000").unwrap(),
242 12 : 0,
243 12 : );
244 12 : let bytes = instance.to_bytes().unwrap();
245 12 : Self::from_bytes(&bytes).unwrap()
246 12 : }
247 : }
248 :
249 : impl<'de> Deserialize<'de> for TimelineMetadata {
250 458 : fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
251 458 : where
252 458 : D: serde::Deserializer<'de>,
253 458 : {
254 458 : let bytes = Vec::<u8>::deserialize(deserializer)?;
255 458 : Self::from_bytes(bytes.as_slice()).map_err(|e| D::Error::custom(format!("{e}")))
256 458 : }
257 : }
258 :
259 : impl Serialize for TimelineMetadata {
260 6974 : fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
261 6974 : where
262 6974 : S: Serializer,
263 6974 : {
264 6974 : let bytes = self
265 6974 : .to_bytes()
266 6974 : .map_err(|e| serde::ser::Error::custom(format!("{e}")))?;
267 6974 : bytes.serialize(serializer)
268 6974 : }
269 : }
270 :
271 : /// Save timeline metadata to file
272 6834 : #[tracing::instrument(skip_all, fields(%tenant_id=tenant_shard_id.tenant_id, %shard_id=tenant_shard_id.shard_slug(), %timeline_id))]
273 : pub async fn save_metadata(
274 : conf: &'static PageServerConf,
275 : tenant_shard_id: &TenantShardId,
276 : timeline_id: &TimelineId,
277 : data: &TimelineMetadata,
278 : ) -> anyhow::Result<()> {
279 : let path = conf.metadata_path(tenant_shard_id, timeline_id);
280 : let temp_path = path_with_suffix_extension(&path, TEMP_FILE_SUFFIX);
281 : let metadata_bytes = data.to_bytes().context("serialize metadata")?;
282 : VirtualFile::crashsafe_overwrite(&path, &temp_path, &metadata_bytes)
283 : .await
284 : .context("write metadata")?;
285 : Ok(())
286 : }
287 :
288 6 : #[derive(Error, Debug)]
289 : pub enum LoadMetadataError {
290 : #[error(transparent)]
291 : Read(#[from] io::Error),
292 :
293 : #[error(transparent)]
294 : Decode(#[from] anyhow::Error),
295 : }
296 :
297 2 : pub fn load_metadata(
298 2 : conf: &'static PageServerConf,
299 2 : tenant_shard_id: &TenantShardId,
300 2 : timeline_id: &TimelineId,
301 2 : ) -> Result<TimelineMetadata, LoadMetadataError> {
302 2 : let metadata_path = conf.metadata_path(tenant_shard_id, timeline_id);
303 2 : let metadata_bytes = std::fs::read(metadata_path)?;
304 :
305 2 : Ok(TimelineMetadata::from_bytes(&metadata_bytes)?)
306 2 : }
307 :
308 : #[cfg(test)]
309 : mod tests {
310 : use super::*;
311 : use crate::tenant::harness::TIMELINE_ID;
312 :
313 2 : #[test]
314 2 : fn metadata_serializes_correctly() {
315 2 : let original_metadata = TimelineMetadata::new(
316 2 : Lsn(0x200),
317 2 : Some(Lsn(0x100)),
318 2 : Some(TIMELINE_ID),
319 2 : Lsn(0),
320 2 : Lsn(0),
321 2 : Lsn(0),
322 2 : // Any version will do here, so use the default
323 2 : crate::DEFAULT_PG_VERSION,
324 2 : );
325 2 :
326 2 : let metadata_bytes = original_metadata
327 2 : .to_bytes()
328 2 : .expect("Should serialize correct metadata to bytes");
329 2 :
330 2 : let deserialized_metadata = TimelineMetadata::from_bytes(&metadata_bytes)
331 2 : .expect("Should deserialize its own bytes");
332 2 :
333 2 : assert_eq!(
334 : deserialized_metadata.body, original_metadata.body,
335 0 : "Metadata that was serialized to bytes and deserialized back should not change"
336 : );
337 2 : }
338 :
339 : // Generate old version metadata and read it with current code.
340 : // Ensure that it is upgraded correctly
341 2 : #[test]
342 2 : fn test_metadata_upgrade() {
343 2 : #[derive(Debug, Clone, PartialEq, Eq)]
344 2 : struct TimelineMetadataV1 {
345 2 : hdr: TimelineMetadataHeader,
346 2 : body: TimelineMetadataBodyV1,
347 2 : }
348 2 :
349 2 : let metadata_v1 = TimelineMetadataV1 {
350 2 : hdr: TimelineMetadataHeader {
351 2 : checksum: 0,
352 2 : size: 0,
353 2 : format_version: METADATA_OLD_FORMAT_VERSION,
354 2 : },
355 2 : body: TimelineMetadataBodyV1 {
356 2 : disk_consistent_lsn: Lsn(0x200),
357 2 : prev_record_lsn: Some(Lsn(0x100)),
358 2 : ancestor_timeline: Some(TIMELINE_ID),
359 2 : ancestor_lsn: Lsn(0),
360 2 : latest_gc_cutoff_lsn: Lsn(0),
361 2 : initdb_lsn: Lsn(0),
362 2 : },
363 2 : };
364 2 :
365 2 : impl TimelineMetadataV1 {
366 2 : pub fn to_bytes(&self) -> anyhow::Result<Vec<u8>> {
367 2 : let body_bytes = self.body.ser()?;
368 2 : let metadata_size = METADATA_HDR_SIZE + body_bytes.len();
369 2 : let hdr = TimelineMetadataHeader {
370 2 : size: metadata_size as u16,
371 2 : format_version: METADATA_OLD_FORMAT_VERSION,
372 2 : checksum: crc32c::crc32c(&body_bytes),
373 2 : };
374 2 : let hdr_bytes = hdr.ser()?;
375 2 : let mut metadata_bytes = vec![0u8; METADATA_MAX_SIZE];
376 2 : metadata_bytes[0..METADATA_HDR_SIZE].copy_from_slice(&hdr_bytes);
377 2 : metadata_bytes[METADATA_HDR_SIZE..metadata_size].copy_from_slice(&body_bytes);
378 2 : Ok(metadata_bytes)
379 2 : }
380 2 : }
381 2 :
382 2 : let metadata_bytes = metadata_v1
383 2 : .to_bytes()
384 2 : .expect("Should serialize correct metadata to bytes");
385 2 :
386 2 : // This should deserialize to the latest version format
387 2 : let deserialized_metadata = TimelineMetadata::from_bytes(&metadata_bytes)
388 2 : .expect("Should deserialize its own bytes");
389 2 :
390 2 : let expected_metadata = TimelineMetadata::new(
391 2 : Lsn(0x200),
392 2 : Some(Lsn(0x100)),
393 2 : Some(TIMELINE_ID),
394 2 : Lsn(0),
395 2 : Lsn(0),
396 2 : Lsn(0),
397 2 : 14, // All timelines created before this version had pg_version 14
398 2 : );
399 2 :
400 2 : assert_eq!(
401 : deserialized_metadata.body, expected_metadata.body,
402 0 : "Metadata of the old version {} should be upgraded to the latest version {}",
403 : METADATA_OLD_FORMAT_VERSION, METADATA_FORMAT_VERSION
404 : );
405 2 : }
406 :
407 2 : #[test]
408 2 : fn test_metadata_bincode_serde() {
409 2 : let original_metadata = TimelineMetadata::new(
410 2 : Lsn(0x200),
411 2 : Some(Lsn(0x100)),
412 2 : Some(TIMELINE_ID),
413 2 : Lsn(0),
414 2 : Lsn(0),
415 2 : Lsn(0),
416 2 : // Any version will do here, so use the default
417 2 : crate::DEFAULT_PG_VERSION,
418 2 : );
419 2 : let metadata_bytes = original_metadata
420 2 : .to_bytes()
421 2 : .expect("Cannot create bytes array from metadata");
422 2 :
423 2 : let metadata_bincode_be_bytes = original_metadata
424 2 : .ser()
425 2 : .expect("Cannot serialize the metadata");
426 2 :
427 2 : // 8 bytes for the length of the vector
428 2 : assert_eq!(metadata_bincode_be_bytes.len(), 8 + metadata_bytes.len());
429 :
430 2 : let expected_bincode_bytes = {
431 2 : let mut temp = vec![];
432 2 : let len_bytes = metadata_bytes.len().to_be_bytes();
433 2 : temp.extend_from_slice(&len_bytes);
434 2 : temp.extend_from_slice(&metadata_bytes);
435 2 : temp
436 2 : };
437 2 : assert_eq!(metadata_bincode_be_bytes, expected_bincode_bytes);
438 :
439 2 : let deserialized_metadata = TimelineMetadata::des(&metadata_bincode_be_bytes).unwrap();
440 2 : // Deserialized metadata has the metadata header, which is different from the serialized one.
441 2 : // Reference: TimelineMetaData::to_bytes()
442 2 : let expected_metadata = {
443 2 : let mut temp_metadata = original_metadata;
444 2 : let body_bytes = temp_metadata
445 2 : .body
446 2 : .ser()
447 2 : .expect("Cannot serialize the metadata body");
448 2 : let metadata_size = METADATA_HDR_SIZE + body_bytes.len();
449 2 : let hdr = TimelineMetadataHeader {
450 2 : size: metadata_size as u16,
451 2 : format_version: METADATA_FORMAT_VERSION,
452 2 : checksum: crc32c::crc32c(&body_bytes),
453 2 : };
454 2 : temp_metadata.hdr = hdr;
455 2 : temp_metadata
456 2 : };
457 2 : assert_eq!(deserialized_metadata, expected_metadata);
458 2 : }
459 :
460 2 : #[test]
461 2 : fn test_metadata_bincode_serde_ensure_roundtrip() {
462 2 : let original_metadata = TimelineMetadata::new(
463 2 : Lsn(0x200),
464 2 : Some(Lsn(0x100)),
465 2 : Some(TIMELINE_ID),
466 2 : Lsn(0),
467 2 : Lsn(0),
468 2 : Lsn(0),
469 2 : // Any version will do here, so use the default
470 2 : crate::DEFAULT_PG_VERSION,
471 2 : );
472 2 : let expected_bytes = vec![
473 2 : /* bincode length encoding bytes */
474 2 : 0, 0, 0, 0, 0, 0, 2, 0, // 8 bytes for the length of the serialized vector
475 2 : /* TimelineMetadataHeader */
476 2 : 4, 37, 101, 34, 0, 70, 0, 4, // checksum, size, format_version (4 + 2 + 2)
477 2 : /* TimelineMetadataBodyV2 */
478 2 : 0, 0, 0, 0, 0, 0, 2, 0, // disk_consistent_lsn (8 bytes)
479 2 : 1, 0, 0, 0, 0, 0, 0, 1, 0, // prev_record_lsn (9 bytes)
480 2 : 1, 17, 34, 51, 68, 85, 102, 119, 136, 17, 34, 51, 68, 85, 102, 119,
481 2 : 136, // ancestor_timeline (17 bytes)
482 2 : 0, 0, 0, 0, 0, 0, 0, 0, // ancestor_lsn (8 bytes)
483 2 : 0, 0, 0, 0, 0, 0, 0, 0, // latest_gc_cutoff_lsn (8 bytes)
484 2 : 0, 0, 0, 0, 0, 0, 0, 0, // initdb_lsn (8 bytes)
485 2 : 0, 0, 0, 15, // pg_version (4 bytes)
486 2 : /* padding bytes */
487 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
488 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
489 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
490 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
491 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
493 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
494 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
495 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
496 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
497 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
498 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
499 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
500 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
501 2 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
502 2 : 0, 0, 0, 0, 0, 0, 0,
503 2 : ];
504 2 : let metadata_ser_bytes = original_metadata.ser().unwrap();
505 2 : assert_eq!(metadata_ser_bytes, expected_bytes);
506 :
507 2 : let expected_metadata = {
508 2 : let mut temp_metadata = original_metadata;
509 2 : let body_bytes = temp_metadata
510 2 : .body
511 2 : .ser()
512 2 : .expect("Cannot serialize the metadata body");
513 2 : let metadata_size = METADATA_HDR_SIZE + body_bytes.len();
514 2 : let hdr = TimelineMetadataHeader {
515 2 : size: metadata_size as u16,
516 2 : format_version: METADATA_FORMAT_VERSION,
517 2 : checksum: crc32c::crc32c(&body_bytes),
518 2 : };
519 2 : temp_metadata.hdr = hdr;
520 2 : temp_metadata
521 2 : };
522 2 : let des_metadata = TimelineMetadata::des(&metadata_ser_bytes).unwrap();
523 2 : assert_eq!(des_metadata, expected_metadata);
524 2 : }
525 : }
|