TLA Line data Source code
1 : //! Every image of a certain timeline from [`crate::tenant::Tenant`]
2 : //! has a metadata that needs to be stored persistently.
3 : //!
4 : //! Later, the file gets used in [`remote_timeline_client`] as a part of
5 : //! external storage import and export operations.
6 : //!
7 : //! The module contains all structs and related helper methods related to timeline metadata.
8 : //!
9 : //! [`remote_timeline_client`]: super::remote_timeline_client
10 :
11 : use std::io::{self};
12 :
13 : use anyhow::{ensure, Context};
14 : use pageserver_api::shard::TenantShardId;
15 : use serde::{de::Error, Deserialize, Serialize, Serializer};
16 : use thiserror::Error;
17 : use utils::bin_ser::SerializeError;
18 : use utils::crashsafe::path_with_suffix_extension;
19 : use utils::{bin_ser::BeSer, id::TimelineId, lsn::Lsn};
20 :
21 : use crate::config::PageServerConf;
22 : use crate::virtual_file::VirtualFile;
23 : use crate::TEMP_FILE_SUFFIX;
24 :
25 : /// Use special format number to enable backward compatibility.
26 : const METADATA_FORMAT_VERSION: u16 = 4;
27 :
28 : /// Previous supported format versions.
29 : const METADATA_OLD_FORMAT_VERSION: u16 = 3;
30 :
31 : /// We assume that a write of up to METADATA_MAX_SIZE bytes is atomic.
32 : ///
33 : /// This is the same assumption that PostgreSQL makes with the control file,
34 : /// see PG_CONTROL_MAX_SAFE_SIZE
35 : const METADATA_MAX_SIZE: usize = 512;
36 :
37 : /// Metadata stored on disk for each timeline
38 : ///
39 : /// The fields correspond to the values we hold in memory, in Timeline.
40 CBC 12696 : #[derive(Debug, Clone, PartialEq, Eq)]
41 : pub struct TimelineMetadata {
42 : hdr: TimelineMetadataHeader,
43 : body: TimelineMetadataBodyV2,
44 : }
45 :
46 23430 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
47 : struct TimelineMetadataHeader {
48 : checksum: u32, // CRC of serialized metadata body
49 : size: u16, // size of serialized metadata
50 : format_version: u16, // metadata format version (used for compatibility checks)
51 : }
52 : const METADATA_HDR_SIZE: usize = std::mem::size_of::<TimelineMetadataHeader>();
53 :
54 23432 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
55 : struct TimelineMetadataBodyV2 {
56 : disk_consistent_lsn: Lsn,
57 : // This is only set if we know it. We track it in memory when the page
58 : // server is running, but we only track the value corresponding to
59 : // 'last_record_lsn', not 'disk_consistent_lsn' which can lag behind by a
60 : // lot. We only store it in the metadata file when we flush *all* the
61 : // in-memory data so that 'last_record_lsn' is the same as
62 : // 'disk_consistent_lsn'. That's OK, because after page server restart, as
63 : // soon as we reprocess at least one record, we will have a valid
64 : // 'prev_record_lsn' value in memory again. This is only really needed when
65 : // doing a clean shutdown, so that there is no more WAL beyond
66 : // 'disk_consistent_lsn'
67 : prev_record_lsn: Option<Lsn>,
68 : ancestor_timeline: Option<TimelineId>,
69 : ancestor_lsn: Lsn,
70 : latest_gc_cutoff_lsn: Lsn,
71 : initdb_lsn: Lsn,
72 : pg_version: u32,
73 : }
74 :
75 2 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
76 : struct TimelineMetadataBodyV1 {
77 : disk_consistent_lsn: Lsn,
78 : // This is only set if we know it. We track it in memory when the page
79 : // server is running, but we only track the value corresponding to
80 : // 'last_record_lsn', not 'disk_consistent_lsn' which can lag behind by a
81 : // lot. We only store it in the metadata file when we flush *all* the
82 : // in-memory data so that 'last_record_lsn' is the same as
83 : // 'disk_consistent_lsn'. That's OK, because after page server restart, as
84 : // soon as we reprocess at least one record, we will have a valid
85 : // 'prev_record_lsn' value in memory again. This is only really needed when
86 : // doing a clean shutdown, so that there is no more WAL beyond
87 : // 'disk_consistent_lsn'
88 : prev_record_lsn: Option<Lsn>,
89 : ancestor_timeline: Option<TimelineId>,
90 : ancestor_lsn: Lsn,
91 : latest_gc_cutoff_lsn: Lsn,
92 : initdb_lsn: Lsn,
93 : }
94 :
95 : impl TimelineMetadata {
96 5396 : pub fn new(
97 5396 : disk_consistent_lsn: Lsn,
98 5396 : prev_record_lsn: Option<Lsn>,
99 5396 : ancestor_timeline: Option<TimelineId>,
100 5396 : ancestor_lsn: Lsn,
101 5396 : latest_gc_cutoff_lsn: Lsn,
102 5396 : initdb_lsn: Lsn,
103 5396 : pg_version: u32,
104 5396 : ) -> Self {
105 5396 : Self {
106 5396 : hdr: TimelineMetadataHeader {
107 5396 : checksum: 0,
108 5396 : size: 0,
109 5396 : format_version: METADATA_FORMAT_VERSION,
110 5396 : },
111 5396 : body: TimelineMetadataBodyV2 {
112 5396 : disk_consistent_lsn,
113 5396 : prev_record_lsn,
114 5396 : ancestor_timeline,
115 5396 : ancestor_lsn,
116 5396 : latest_gc_cutoff_lsn,
117 5396 : initdb_lsn,
118 5396 : pg_version,
119 5396 : },
120 5396 : }
121 5396 : }
122 :
123 1 : fn upgrade_timeline_metadata(metadata_bytes: &[u8]) -> anyhow::Result<Self> {
124 1 : let mut hdr = TimelineMetadataHeader::des(&metadata_bytes[0..METADATA_HDR_SIZE])?;
125 :
126 : // backward compatible only up to this version
127 : ensure!(
128 1 : hdr.format_version == METADATA_OLD_FORMAT_VERSION,
129 UBC 0 : "unsupported metadata format version {}",
130 : hdr.format_version
131 : );
132 :
133 CBC 1 : let metadata_size = hdr.size as usize;
134 :
135 1 : let body: TimelineMetadataBodyV1 =
136 1 : TimelineMetadataBodyV1::des(&metadata_bytes[METADATA_HDR_SIZE..metadata_size])?;
137 :
138 1 : let body = TimelineMetadataBodyV2 {
139 1 : disk_consistent_lsn: body.disk_consistent_lsn,
140 1 : prev_record_lsn: body.prev_record_lsn,
141 1 : ancestor_timeline: body.ancestor_timeline,
142 1 : ancestor_lsn: body.ancestor_lsn,
143 1 : latest_gc_cutoff_lsn: body.latest_gc_cutoff_lsn,
144 1 : initdb_lsn: body.initdb_lsn,
145 1 : pg_version: 14, // All timelines created before this version had pg_version 14
146 1 : };
147 1 :
148 1 : hdr.format_version = METADATA_FORMAT_VERSION;
149 1 :
150 1 : Ok(Self { hdr, body })
151 1 : }
152 :
153 404 : pub fn from_bytes(metadata_bytes: &[u8]) -> anyhow::Result<Self> {
154 404 : ensure!(
155 404 : metadata_bytes.len() == METADATA_MAX_SIZE,
156 UBC 0 : "metadata bytes size is wrong"
157 : );
158 CBC 404 : let hdr = TimelineMetadataHeader::des(&metadata_bytes[0..METADATA_HDR_SIZE])?;
159 :
160 404 : let metadata_size = hdr.size as usize;
161 404 : ensure!(
162 404 : metadata_size <= METADATA_MAX_SIZE,
163 UBC 0 : "corrupted metadata file"
164 : );
165 CBC 404 : let calculated_checksum = crc32c::crc32c(&metadata_bytes[METADATA_HDR_SIZE..metadata_size]);
166 404 : ensure!(
167 404 : hdr.checksum == calculated_checksum,
168 1 : "metadata checksum mismatch"
169 : );
170 :
171 403 : if hdr.format_version != METADATA_FORMAT_VERSION {
172 : // If metadata has the old format,
173 : // upgrade it and return the result
174 1 : TimelineMetadata::upgrade_timeline_metadata(metadata_bytes)
175 : } else {
176 402 : let body =
177 402 : TimelineMetadataBodyV2::des(&metadata_bytes[METADATA_HDR_SIZE..metadata_size])?;
178 : ensure!(
179 402 : body.disk_consistent_lsn.is_aligned(),
180 UBC 0 : "disk_consistent_lsn is not aligned"
181 : );
182 CBC 402 : Ok(TimelineMetadata { hdr, body })
183 : }
184 404 : }
185 :
186 11714 : pub fn to_bytes(&self) -> Result<Vec<u8>, SerializeError> {
187 11714 : let body_bytes = self.body.ser()?;
188 11714 : let metadata_size = METADATA_HDR_SIZE + body_bytes.len();
189 11714 : let hdr = TimelineMetadataHeader {
190 11714 : size: metadata_size as u16,
191 11714 : format_version: METADATA_FORMAT_VERSION,
192 11714 : checksum: crc32c::crc32c(&body_bytes),
193 11714 : };
194 11714 : let hdr_bytes = hdr.ser()?;
195 11714 : let mut metadata_bytes = vec![0u8; METADATA_MAX_SIZE];
196 11714 : metadata_bytes[0..METADATA_HDR_SIZE].copy_from_slice(&hdr_bytes);
197 11714 : metadata_bytes[METADATA_HDR_SIZE..metadata_size].copy_from_slice(&body_bytes);
198 11714 : Ok(metadata_bytes)
199 11714 : }
200 :
201 : /// [`Lsn`] that corresponds to the corresponding timeline directory
202 : /// contents, stored locally in the pageserver workdir.
203 8131 : pub fn disk_consistent_lsn(&self) -> Lsn {
204 8131 : self.body.disk_consistent_lsn
205 8131 : }
206 :
207 1290 : pub fn prev_record_lsn(&self) -> Option<Lsn> {
208 1290 : self.body.prev_record_lsn
209 1290 : }
210 :
211 1992 : pub fn ancestor_timeline(&self) -> Option<TimelineId> {
212 1992 : self.body.ancestor_timeline
213 1992 : }
214 :
215 1290 : pub fn ancestor_lsn(&self) -> Lsn {
216 1290 : self.body.ancestor_lsn
217 1290 : }
218 :
219 1290 : pub fn latest_gc_cutoff_lsn(&self) -> Lsn {
220 1290 : self.body.latest_gc_cutoff_lsn
221 1290 : }
222 :
223 1290 : pub fn initdb_lsn(&self) -> Lsn {
224 1290 : self.body.initdb_lsn
225 1290 : }
226 :
227 1290 : pub fn pg_version(&self) -> u32 {
228 1290 : self.body.pg_version
229 1290 : }
230 :
231 : // Checksums make it awkward to build a valid instance by hand. This helper
232 : // provides a TimelineMetadata with a valid checksum in its header.
233 : #[cfg(test)]
234 6 : pub fn example() -> Self {
235 6 : let instance = Self::new(
236 6 : "0/16960E8".parse::<Lsn>().unwrap(),
237 6 : None,
238 6 : None,
239 6 : Lsn::from_hex("00000000").unwrap(),
240 6 : Lsn::from_hex("00000000").unwrap(),
241 6 : Lsn::from_hex("00000000").unwrap(),
242 6 : 0,
243 6 : );
244 6 : let bytes = instance.to_bytes().unwrap();
245 6 : Self::from_bytes(&bytes).unwrap()
246 6 : }
247 : }
248 :
249 : impl<'de> Deserialize<'de> for TimelineMetadata {
250 389 : fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
251 389 : where
252 389 : D: serde::Deserializer<'de>,
253 389 : {
254 389 : let bytes = Vec::<u8>::deserialize(deserializer)?;
255 389 : Self::from_bytes(bytes.as_slice()).map_err(|e| D::Error::custom(format!("{e}")))
256 389 : }
257 : }
258 :
259 : impl Serialize for TimelineMetadata {
260 5968 : fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
261 5968 : where
262 5968 : S: Serializer,
263 5968 : {
264 5968 : let bytes = self
265 5968 : .to_bytes()
266 5968 : .map_err(|e| serde::ser::Error::custom(format!("{e}")))?;
267 5968 : bytes.serialize(serializer)
268 5968 : }
269 : }
270 :
271 : /// Save timeline metadata to file
272 5737 : #[tracing::instrument(skip_all, fields(%tenant_id=tenant_shard_id.tenant_id, %shard_id=tenant_shard_id.shard_slug(), %timeline_id))]
273 : pub async fn save_metadata(
274 : conf: &'static PageServerConf,
275 : tenant_shard_id: &TenantShardId,
276 : timeline_id: &TimelineId,
277 : data: &TimelineMetadata,
278 : ) -> anyhow::Result<()> {
279 : let path = conf.metadata_path(tenant_shard_id, timeline_id);
280 : let temp_path = path_with_suffix_extension(&path, TEMP_FILE_SUFFIX);
281 : let metadata_bytes = data.to_bytes().context("serialize metadata")?;
282 : VirtualFile::crashsafe_overwrite(&path, &temp_path, &metadata_bytes)
283 : .await
284 : .context("write metadata")?;
285 : Ok(())
286 : }
287 :
288 3 : #[derive(Error, Debug)]
289 : pub enum LoadMetadataError {
290 : #[error(transparent)]
291 : Read(#[from] io::Error),
292 :
293 : #[error(transparent)]
294 : Decode(#[from] anyhow::Error),
295 : }
296 :
297 1 : pub fn load_metadata(
298 1 : conf: &'static PageServerConf,
299 1 : tenant_shard_id: &TenantShardId,
300 1 : timeline_id: &TimelineId,
301 1 : ) -> Result<TimelineMetadata, LoadMetadataError> {
302 1 : let metadata_path = conf.metadata_path(tenant_shard_id, timeline_id);
303 1 : let metadata_bytes = std::fs::read(metadata_path)?;
304 :
305 1 : Ok(TimelineMetadata::from_bytes(&metadata_bytes)?)
306 1 : }
307 :
308 : #[cfg(test)]
309 : mod tests {
310 : use super::*;
311 : use crate::tenant::harness::TIMELINE_ID;
312 :
313 1 : #[test]
314 1 : fn metadata_serializes_correctly() {
315 1 : let original_metadata = TimelineMetadata::new(
316 1 : Lsn(0x200),
317 1 : Some(Lsn(0x100)),
318 1 : Some(TIMELINE_ID),
319 1 : Lsn(0),
320 1 : Lsn(0),
321 1 : Lsn(0),
322 1 : // Any version will do here, so use the default
323 1 : crate::DEFAULT_PG_VERSION,
324 1 : );
325 1 :
326 1 : let metadata_bytes = original_metadata
327 1 : .to_bytes()
328 1 : .expect("Should serialize correct metadata to bytes");
329 1 :
330 1 : let deserialized_metadata = TimelineMetadata::from_bytes(&metadata_bytes)
331 1 : .expect("Should deserialize its own bytes");
332 1 :
333 1 : assert_eq!(
334 : deserialized_metadata.body, original_metadata.body,
335 UBC 0 : "Metadata that was serialized to bytes and deserialized back should not change"
336 : );
337 CBC 1 : }
338 :
339 : // Generate old version metadata and read it with current code.
340 : // Ensure that it is upgraded correctly
341 1 : #[test]
342 1 : fn test_metadata_upgrade() {
343 1 : #[derive(Debug, Clone, PartialEq, Eq)]
344 1 : struct TimelineMetadataV1 {
345 1 : hdr: TimelineMetadataHeader,
346 1 : body: TimelineMetadataBodyV1,
347 1 : }
348 1 :
349 1 : let metadata_v1 = TimelineMetadataV1 {
350 1 : hdr: TimelineMetadataHeader {
351 1 : checksum: 0,
352 1 : size: 0,
353 1 : format_version: METADATA_OLD_FORMAT_VERSION,
354 1 : },
355 1 : body: TimelineMetadataBodyV1 {
356 1 : disk_consistent_lsn: Lsn(0x200),
357 1 : prev_record_lsn: Some(Lsn(0x100)),
358 1 : ancestor_timeline: Some(TIMELINE_ID),
359 1 : ancestor_lsn: Lsn(0),
360 1 : latest_gc_cutoff_lsn: Lsn(0),
361 1 : initdb_lsn: Lsn(0),
362 1 : },
363 1 : };
364 1 :
365 1 : impl TimelineMetadataV1 {
366 1 : pub fn to_bytes(&self) -> anyhow::Result<Vec<u8>> {
367 1 : let body_bytes = self.body.ser()?;
368 1 : let metadata_size = METADATA_HDR_SIZE + body_bytes.len();
369 1 : let hdr = TimelineMetadataHeader {
370 1 : size: metadata_size as u16,
371 1 : format_version: METADATA_OLD_FORMAT_VERSION,
372 1 : checksum: crc32c::crc32c(&body_bytes),
373 1 : };
374 1 : let hdr_bytes = hdr.ser()?;
375 1 : let mut metadata_bytes = vec![0u8; METADATA_MAX_SIZE];
376 1 : metadata_bytes[0..METADATA_HDR_SIZE].copy_from_slice(&hdr_bytes);
377 1 : metadata_bytes[METADATA_HDR_SIZE..metadata_size].copy_from_slice(&body_bytes);
378 1 : Ok(metadata_bytes)
379 1 : }
380 1 : }
381 1 :
382 1 : let metadata_bytes = metadata_v1
383 1 : .to_bytes()
384 1 : .expect("Should serialize correct metadata to bytes");
385 1 :
386 1 : // This should deserialize to the latest version format
387 1 : let deserialized_metadata = TimelineMetadata::from_bytes(&metadata_bytes)
388 1 : .expect("Should deserialize its own bytes");
389 1 :
390 1 : let expected_metadata = TimelineMetadata::new(
391 1 : Lsn(0x200),
392 1 : Some(Lsn(0x100)),
393 1 : Some(TIMELINE_ID),
394 1 : Lsn(0),
395 1 : Lsn(0),
396 1 : Lsn(0),
397 1 : 14, // All timelines created before this version had pg_version 14
398 1 : );
399 1 :
400 1 : assert_eq!(
401 : deserialized_metadata.body, expected_metadata.body,
402 UBC 0 : "Metadata of the old version {} should be upgraded to the latest version {}",
403 : METADATA_OLD_FORMAT_VERSION, METADATA_FORMAT_VERSION
404 : );
405 CBC 1 : }
406 :
407 1 : #[test]
408 1 : fn test_metadata_bincode_serde() {
409 1 : let original_metadata = TimelineMetadata::new(
410 1 : Lsn(0x200),
411 1 : Some(Lsn(0x100)),
412 1 : Some(TIMELINE_ID),
413 1 : Lsn(0),
414 1 : Lsn(0),
415 1 : Lsn(0),
416 1 : // Any version will do here, so use the default
417 1 : crate::DEFAULT_PG_VERSION,
418 1 : );
419 1 : let metadata_bytes = original_metadata
420 1 : .to_bytes()
421 1 : .expect("Cannot create bytes array from metadata");
422 1 :
423 1 : let metadata_bincode_be_bytes = original_metadata
424 1 : .ser()
425 1 : .expect("Cannot serialize the metadata");
426 1 :
427 1 : // 8 bytes for the length of the vector
428 1 : assert_eq!(metadata_bincode_be_bytes.len(), 8 + metadata_bytes.len());
429 :
430 1 : let expected_bincode_bytes = {
431 1 : let mut temp = vec![];
432 1 : let len_bytes = metadata_bytes.len().to_be_bytes();
433 1 : temp.extend_from_slice(&len_bytes);
434 1 : temp.extend_from_slice(&metadata_bytes);
435 1 : temp
436 1 : };
437 1 : assert_eq!(metadata_bincode_be_bytes, expected_bincode_bytes);
438 :
439 1 : let deserialized_metadata = TimelineMetadata::des(&metadata_bincode_be_bytes).unwrap();
440 1 : // Deserialized metadata has the metadata header, which is different from the serialized one.
441 1 : // Reference: TimelineMetaData::to_bytes()
442 1 : let expected_metadata = {
443 1 : let mut temp_metadata = original_metadata;
444 1 : let body_bytes = temp_metadata
445 1 : .body
446 1 : .ser()
447 1 : .expect("Cannot serialize the metadata body");
448 1 : let metadata_size = METADATA_HDR_SIZE + body_bytes.len();
449 1 : let hdr = TimelineMetadataHeader {
450 1 : size: metadata_size as u16,
451 1 : format_version: METADATA_FORMAT_VERSION,
452 1 : checksum: crc32c::crc32c(&body_bytes),
453 1 : };
454 1 : temp_metadata.hdr = hdr;
455 1 : temp_metadata
456 1 : };
457 1 : assert_eq!(deserialized_metadata, expected_metadata);
458 1 : }
459 :
460 1 : #[test]
461 1 : fn test_metadata_bincode_serde_ensure_roundtrip() {
462 1 : let original_metadata = TimelineMetadata::new(
463 1 : Lsn(0x200),
464 1 : Some(Lsn(0x100)),
465 1 : Some(TIMELINE_ID),
466 1 : Lsn(0),
467 1 : Lsn(0),
468 1 : Lsn(0),
469 1 : // Any version will do here, so use the default
470 1 : crate::DEFAULT_PG_VERSION,
471 1 : );
472 1 : let expected_bytes = vec![
473 1 : /* bincode length encoding bytes */
474 1 : 0, 0, 0, 0, 0, 0, 2, 0, // 8 bytes for the length of the serialized vector
475 1 : /* TimelineMetadataHeader */
476 1 : 4, 37, 101, 34, 0, 70, 0, 4, // checksum, size, format_version (4 + 2 + 2)
477 1 : /* TimelineMetadataBodyV2 */
478 1 : 0, 0, 0, 0, 0, 0, 2, 0, // disk_consistent_lsn (8 bytes)
479 1 : 1, 0, 0, 0, 0, 0, 0, 1, 0, // prev_record_lsn (9 bytes)
480 1 : 1, 17, 34, 51, 68, 85, 102, 119, 136, 17, 34, 51, 68, 85, 102, 119,
481 1 : 136, // ancestor_timeline (17 bytes)
482 1 : 0, 0, 0, 0, 0, 0, 0, 0, // ancestor_lsn (8 bytes)
483 1 : 0, 0, 0, 0, 0, 0, 0, 0, // latest_gc_cutoff_lsn (8 bytes)
484 1 : 0, 0, 0, 0, 0, 0, 0, 0, // initdb_lsn (8 bytes)
485 1 : 0, 0, 0, 15, // pg_version (4 bytes)
486 1 : /* padding bytes */
487 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
488 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
489 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
490 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
491 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
493 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
494 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
495 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
496 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
497 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
498 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
499 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
500 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
501 1 : 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
502 1 : 0, 0, 0, 0, 0, 0, 0,
503 1 : ];
504 1 : let metadata_ser_bytes = original_metadata.ser().unwrap();
505 1 : assert_eq!(metadata_ser_bytes, expected_bytes);
506 :
507 1 : let expected_metadata = {
508 1 : let mut temp_metadata = original_metadata;
509 1 : let body_bytes = temp_metadata
510 1 : .body
511 1 : .ser()
512 1 : .expect("Cannot serialize the metadata body");
513 1 : let metadata_size = METADATA_HDR_SIZE + body_bytes.len();
514 1 : let hdr = TimelineMetadataHeader {
515 1 : size: metadata_size as u16,
516 1 : format_version: METADATA_FORMAT_VERSION,
517 1 : checksum: crc32c::crc32c(&body_bytes),
518 1 : };
519 1 : temp_metadata.hdr = hdr;
520 1 : temp_metadata
521 1 : };
522 1 : let des_metadata = TimelineMetadata::des(&metadata_ser_bytes).unwrap();
523 1 : assert_eq!(des_metadata, expected_metadata);
524 1 : }
525 : }
|