LCOV - differential code coverage report
Current view: top level - pageserver/src/tenant/storage_layer - inmemory_layer.rs (source / functions) Coverage Total Hit UBC CBC
Current: f6946e90941b557c917ac98cd5a7e9506d180f3e.info Lines: 70.6 % 214 151 63 151
Current Date: 2023-10-19 02:04:12 Functions: 63.3 % 30 19 11 19
Baseline: c8637f37369098875162f194f92736355783b050.info
Baseline Date: 2023-10-18 20:25:20

           TLA  Line data    Source code
       1                 : //! An in-memory layer stores recently received key-value pairs.
       2                 : //!
       3                 : //! The "in-memory" part of the name is a bit misleading: the actual page versions are
       4                 : //! held in an ephemeral file, not in memory. The metadata for each page version, i.e.
       5                 : //! its position in the file, is kept in memory, though.
       6                 : //!
       7                 : use crate::config::PageServerConf;
       8                 : use crate::context::{PageContentKind, RequestContext, RequestContextBuilder};
       9                 : use crate::repository::{Key, Value};
      10                 : use crate::tenant::block_io::BlockReader;
      11                 : use crate::tenant::ephemeral_file::EphemeralFile;
      12                 : use crate::tenant::storage_layer::{ValueReconstructResult, ValueReconstructState};
      13                 : use crate::walrecord;
      14                 : use anyhow::{ensure, Result};
      15                 : use pageserver_api::models::InMemoryLayerInfo;
      16                 : use std::collections::HashMap;
      17                 : use std::sync::OnceLock;
      18                 : use tracing::*;
      19                 : use utils::{
      20                 :     bin_ser::BeSer,
      21                 :     id::{TenantId, TimelineId},
      22                 :     lsn::Lsn,
      23                 :     vec_map::VecMap,
      24                 : };
      25                 : // avoid binding to Write (conflicts with std::io::Write)
      26                 : // while being able to use std::fmt::Write's methods
      27                 : use std::fmt::Write as _;
      28                 : use std::ops::Range;
      29                 : use tokio::sync::RwLock;
      30                 : 
      31                 : use super::{DeltaLayer, DeltaLayerWriter, Layer};
      32                 : 
      33                 : pub struct InMemoryLayer {
      34                 :     conf: &'static PageServerConf,
      35                 :     tenant_id: TenantId,
      36                 :     timeline_id: TimelineId,
      37                 : 
      38                 :     /// This layer contains all the changes from 'start_lsn'. The
      39                 :     /// start is inclusive.
      40                 :     start_lsn: Lsn,
      41                 : 
      42                 :     /// Frozen layers have an exclusive end LSN.
      43                 :     /// Writes are only allowed when this is `None`.
      44                 :     end_lsn: OnceLock<Lsn>,
      45                 : 
      46                 :     /// The above fields never change, except for `end_lsn`, which is only set once.
      47                 :     /// All other changing parts are in `inner`, and protected by a mutex.
      48                 :     inner: RwLock<InMemoryLayerInner>,
      49                 : }
      50                 : 
      51                 : impl std::fmt::Debug for InMemoryLayer {
      52 UBC           0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      53               0 :         f.debug_struct("InMemoryLayer")
      54               0 :             .field("start_lsn", &self.start_lsn)
      55               0 :             .field("end_lsn", &self.end_lsn)
      56               0 :             .field("inner", &self.inner)
      57               0 :             .finish()
      58               0 :     }
      59                 : }
      60                 : 
      61                 : pub struct InMemoryLayerInner {
      62                 :     /// All versions of all pages in the layer are kept here.  Indexed
      63                 :     /// by block number and LSN. The value is an offset into the
      64                 :     /// ephemeral file where the page version is stored.
      65                 :     index: HashMap<Key, VecMap<Lsn, u64>>,
      66                 : 
      67                 :     /// The values are stored in a serialized format in this file.
      68                 :     /// Each serialized Value is preceded by a 'u32' length field.
      69                 :     /// PerSeg::page_versions map stores offsets into this file.
      70                 :     file: EphemeralFile,
      71                 : }
      72                 : 
      73                 : impl std::fmt::Debug for InMemoryLayerInner {
      74               0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      75               0 :         f.debug_struct("InMemoryLayerInner").finish()
      76               0 :     }
      77                 : }
      78                 : 
      79                 : impl InMemoryLayer {
      80 CBC           2 :     pub(crate) fn get_timeline_id(&self) -> TimelineId {
      81               2 :         self.timeline_id
      82               2 :     }
      83                 : 
      84              21 :     pub(crate) fn info(&self) -> InMemoryLayerInfo {
      85              21 :         let lsn_start = self.start_lsn;
      86                 : 
      87              21 :         if let Some(&lsn_end) = self.end_lsn.get() {
      88 UBC           0 :             InMemoryLayerInfo::Frozen { lsn_start, lsn_end }
      89                 :         } else {
      90 CBC          21 :             InMemoryLayerInfo::Open { lsn_start }
      91                 :         }
      92              21 :     }
      93                 : 
      94        76622219 :     pub(crate) fn assert_writable(&self) {
      95        76622219 :         assert!(self.end_lsn.get().is_none());
      96        76622219 :     }
      97                 : 
      98       104583751 :     pub(crate) fn end_lsn_or_max(&self) -> Lsn {
      99       104583751 :         self.end_lsn.get().copied().unwrap_or(Lsn::MAX)
     100       104583751 :     }
     101                 : 
     102       104578398 :     pub(crate) fn get_lsn_range(&self) -> Range<Lsn> {
     103       104578398 :         self.start_lsn..self.end_lsn_or_max()
     104       104578398 :     }
     105                 : 
     106                 :     /// debugging function to print out the contents of the layer
     107                 :     ///
     108                 :     /// this is likely completly unused
     109 UBC           0 :     pub async fn dump(&self, verbose: bool, ctx: &RequestContext) -> Result<()> {
     110               0 :         let inner = self.inner.read().await;
     111                 : 
     112               0 :         let end_str = self.end_lsn_or_max();
     113               0 : 
     114               0 :         println!(
     115               0 :             "----- in-memory layer for tli {} LSNs {}-{} ----",
     116               0 :             self.timeline_id, self.start_lsn, end_str,
     117               0 :         );
     118               0 : 
     119               0 :         if !verbose {
     120               0 :             return Ok(());
     121               0 :         }
     122               0 : 
     123               0 :         let cursor = inner.file.block_cursor();
     124               0 :         let mut buf = Vec::new();
     125               0 :         for (key, vec_map) in inner.index.iter() {
     126               0 :             for (lsn, pos) in vec_map.as_slice() {
     127               0 :                 let mut desc = String::new();
     128               0 :                 cursor.read_blob_into_buf(*pos, &mut buf, ctx).await?;
     129               0 :                 let val = Value::des(&buf);
     130               0 :                 match val {
     131               0 :                     Ok(Value::Image(img)) => {
     132               0 :                         write!(&mut desc, " img {} bytes", img.len())?;
     133                 :                     }
     134               0 :                     Ok(Value::WalRecord(rec)) => {
     135               0 :                         let wal_desc = walrecord::describe_wal_record(&rec).unwrap();
     136               0 :                         write!(
     137               0 :                             &mut desc,
     138               0 :                             " rec {} bytes will_init: {} {}",
     139               0 :                             buf.len(),
     140               0 :                             rec.will_init(),
     141               0 :                             wal_desc
     142               0 :                         )?;
     143                 :                     }
     144               0 :                     Err(err) => {
     145               0 :                         write!(&mut desc, " DESERIALIZATION ERROR: {}", err)?;
     146                 :                     }
     147                 :                 }
     148               0 :                 println!("  key {} at {}: {}", key, lsn, desc);
     149                 :             }
     150                 :         }
     151                 : 
     152               0 :         Ok(())
     153               0 :     }
     154                 : 
     155                 :     /// Look up given value in the layer.
     156 CBC     5463334 :     pub(crate) async fn get_value_reconstruct_data(
     157         5463334 :         &self,
     158         5463334 :         key: Key,
     159         5463334 :         lsn_range: Range<Lsn>,
     160         5463334 :         reconstruct_state: &mut ValueReconstructState,
     161         5463334 :         ctx: &RequestContext,
     162         5463334 :     ) -> anyhow::Result<ValueReconstructResult> {
     163         5463331 :         ensure!(lsn_range.start >= self.start_lsn);
     164         5463331 :         let mut need_image = true;
     165         5463331 : 
     166         5463331 :         let ctx = RequestContextBuilder::extend(ctx)
     167         5463331 :             .page_content_kind(PageContentKind::InMemoryLayer)
     168         5463331 :             .build();
     169                 : 
     170         5463331 :         let inner = self.inner.read().await;
     171                 : 
     172         5463330 :         let reader = inner.file.block_cursor();
     173                 : 
     174                 :         // Scan the page versions backwards, starting from `lsn`.
     175         5463330 :         if let Some(vec_map) = inner.index.get(&key) {
     176         3156997 :             let slice = vec_map.slice_range(lsn_range);
     177        31272819 :             for (entry_lsn, pos) in slice.iter().rev() {
     178        31272819 :                 let buf = reader.read_blob(*pos, &ctx).await?;
     179        31272819 :                 let value = Value::des(&buf)?;
     180        31272819 :                 match value {
     181         1560787 :                     Value::Image(img) => {
     182         1560787 :                         reconstruct_state.img = Some((*entry_lsn, img));
     183         1560787 :                         return Ok(ValueReconstructResult::Complete);
     184                 :                     }
     185        29712032 :                     Value::WalRecord(rec) => {
     186        29712032 :                         let will_init = rec.will_init();
     187        29712032 :                         reconstruct_state.records.push((*entry_lsn, rec));
     188        29712032 :                         if will_init {
     189                 :                             // This WAL record initializes the page, so no need to go further back
     190          473619 :                             need_image = false;
     191          473619 :                             break;
     192        29238413 :                         }
     193                 :                     }
     194                 :                 }
     195                 :             }
     196         2306333 :         }
     197                 : 
     198                 :         // release lock on 'inner'
     199                 : 
     200                 :         // If an older page image is needed to reconstruct the page, let the
     201                 :         // caller know.
     202         3902543 :         if need_image {
     203         3428924 :             Ok(ValueReconstructResult::Continue)
     204                 :         } else {
     205          473619 :             Ok(ValueReconstructResult::Complete)
     206                 :         }
     207         5463330 :     }
     208                 : }
     209                 : 
     210                 : #[async_trait::async_trait]
     211                 : impl Layer for InMemoryLayer {
     212 UBC           0 :     async fn get_value_reconstruct_data(
     213               0 :         &self,
     214               0 :         key: Key,
     215               0 :         lsn_range: Range<Lsn>,
     216               0 :         reconstruct_data: &mut ValueReconstructState,
     217               0 :         ctx: &RequestContext,
     218               0 :     ) -> Result<ValueReconstructResult> {
     219               0 :         self.get_value_reconstruct_data(key, lsn_range, reconstruct_data, ctx)
     220               0 :             .await
     221               0 :     }
     222                 : }
     223                 : 
     224                 : impl std::fmt::Display for InMemoryLayer {
     225 CBC        5353 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     226            5353 :         let end_lsn = self.end_lsn_or_max();
     227            5353 :         write!(f, "inmem-{:016X}-{:016X}", self.start_lsn.0, end_lsn.0)
     228            5353 :     }
     229                 : }
     230                 : 
     231                 : impl InMemoryLayer {
     232                 :     ///
     233                 :     /// Get layer size.
     234                 :     ///
     235          776346 :     pub async fn size(&self) -> Result<u64> {
     236          776346 :         let inner = self.inner.read().await;
     237          776346 :         Ok(inner.file.len())
     238          776346 :     }
     239                 : 
     240                 :     ///
     241                 :     /// Create a new, empty, in-memory layer
     242                 :     ///
     243            5679 :     pub async fn create(
     244            5679 :         conf: &'static PageServerConf,
     245            5679 :         timeline_id: TimelineId,
     246            5679 :         tenant_id: TenantId,
     247            5679 :         start_lsn: Lsn,
     248            5679 :     ) -> Result<InMemoryLayer> {
     249 UBC           0 :         trace!("initializing new empty InMemoryLayer for writing on timeline {timeline_id} at {start_lsn}");
     250                 : 
     251 CBC        5679 :         let file = EphemeralFile::create(conf, tenant_id, timeline_id).await?;
     252                 : 
     253            5679 :         Ok(InMemoryLayer {
     254            5679 :             conf,
     255            5679 :             timeline_id,
     256            5679 :             tenant_id,
     257            5679 :             start_lsn,
     258            5679 :             end_lsn: OnceLock::new(),
     259            5679 :             inner: RwLock::new(InMemoryLayerInner {
     260            5679 :                 index: HashMap::new(),
     261            5679 :                 file,
     262            5679 :             }),
     263            5679 :         })
     264            5679 :     }
     265                 : 
     266                 :     // Write operations
     267                 : 
     268                 :     /// Common subroutine of the public put_wal_record() and put_page_image() functions.
     269                 :     /// Adds the page version to the in-memory tree
     270        76622220 :     pub async fn put_value(
     271        76622220 :         &self,
     272        76622220 :         key: Key,
     273        76622220 :         lsn: Lsn,
     274        76622220 :         val: &Value,
     275        76622220 :         ctx: &RequestContext,
     276        76622220 :     ) -> Result<()> {
     277 UBC           0 :         trace!("put_value key {} at {}/{}", key, self.timeline_id, lsn);
     278 CBC    76622114 :         let inner: &mut _ = &mut *self.inner.write().await;
     279        76622114 :         self.assert_writable();
     280                 : 
     281        76622112 :         let off = {
     282                 :             // Avoid doing allocations for "small" values.
     283                 :             // In the regression test suite, the limit of 256 avoided allocations in 95% of cases:
     284                 :             // https://github.com/neondatabase/neon/pull/5056#discussion_r1301975061
     285        76622114 :             let mut buf = smallvec::SmallVec::<[u8; 256]>::new();
     286        76622114 :             buf.clear();
     287        76622114 :             val.ser_into(&mut buf)?;
     288        76622114 :             inner
     289        76622114 :                 .file
     290        76622114 :                 .write_blob(
     291        76622114 :                     &buf,
     292        76622114 :                     &RequestContextBuilder::extend(ctx)
     293        76622114 :                         .page_content_kind(PageContentKind::InMemoryLayer)
     294        76622114 :                         .build(),
     295        76622114 :                 )
     296           25627 :                 .await?
     297                 :         };
     298                 : 
     299        76622112 :         let vec_map = inner.index.entry(key).or_default();
     300        76622112 :         let old = vec_map.append_or_update_last(lsn, off).unwrap().0;
     301        76622112 :         if old.is_some() {
     302                 :             // We already had an entry for this LSN. That's odd..
     303 UBC           0 :             warn!("Key {} at {} already exists", key, lsn);
     304 CBC    76622111 :         }
     305                 : 
     306        76622111 :         Ok(())
     307        76622111 :     }
     308                 : 
     309           17687 :     pub async fn put_tombstone(&self, _key_range: Range<Key>, _lsn: Lsn) -> Result<()> {
     310           17687 :         // TODO: Currently, we just leak the storage for any deleted keys
     311           17687 : 
     312           17687 :         Ok(())
     313           17687 :     }
     314                 : 
     315                 :     /// Make the layer non-writeable. Only call once.
     316                 :     /// Records the end_lsn for non-dropped layers.
     317                 :     /// `end_lsn` is exclusive
     318            5385 :     pub async fn freeze(&self, end_lsn: Lsn) {
     319            5385 :         let inner = self.inner.write().await;
     320                 : 
     321            5385 :         assert!(self.start_lsn < end_lsn);
     322            5385 :         self.end_lsn.set(end_lsn).expect("end_lsn set only once");
     323                 : 
     324         4612127 :         for vec_map in inner.index.values() {
     325        53584553 :             for (lsn, _pos) in vec_map.as_slice() {
     326        53584553 :                 assert!(*lsn < end_lsn);
     327                 :             }
     328                 :         }
     329            5385 :     }
     330                 : 
     331                 :     /// Write this frozen in-memory layer to disk.
     332                 :     ///
     333                 :     /// Returns a new delta layer with all the same data as this in-memory layer
     334            5312 :     pub(crate) async fn write_to_disk(&self, ctx: &RequestContext) -> Result<DeltaLayer> {
     335                 :         // Grab the lock in read-mode. We hold it over the I/O, but because this
     336                 :         // layer is not writeable anymore, no one should be trying to acquire the
     337                 :         // write lock on it, so we shouldn't block anyone. There's one exception
     338                 :         // though: another thread might have grabbed a reference to this layer
     339                 :         // in `get_layer_for_write' just before the checkpointer called
     340                 :         // `freeze`, and then `write_to_disk` on it. When the thread gets the
     341                 :         // lock, it will see that it's not writeable anymore and retry, but it
     342                 :         // would have to wait until we release it. That race condition is very
     343                 :         // rare though, so we just accept the potential latency hit for now.
     344            5312 :         let inner = self.inner.read().await;
     345                 : 
     346            5312 :         let end_lsn = *self.end_lsn.get().unwrap();
     347                 : 
     348            5312 :         let mut delta_layer_writer = DeltaLayerWriter::new(
     349            5312 :             self.conf,
     350            5312 :             self.timeline_id,
     351            5312 :             self.tenant_id,
     352            5312 :             Key::MIN,
     353            5312 :             self.start_lsn..end_lsn,
     354            5312 :         )
     355 UBC           0 :         .await?;
     356                 : 
     357 CBC        5312 :         let mut buf = Vec::new();
     358            5312 : 
     359            5312 :         let cursor = inner.file.block_cursor();
     360            5312 : 
     361            5312 :         let mut keys: Vec<(&Key, &VecMap<Lsn, u64>)> = inner.index.iter().collect();
     362       104387946 :         keys.sort_by_key(|k| k.0);
     363            5312 : 
     364            5312 :         let ctx = RequestContextBuilder::extend(ctx)
     365            5312 :             .page_content_kind(PageContentKind::InMemoryLayer)
     366            5312 :             .build();
     367         4591873 :         for (key, vec_map) in keys.iter() {
     368         4591873 :             let key = **key;
     369                 :             // Write all page versions
     370        53152217 :             for (lsn, pos) in vec_map.as_slice() {
     371        53152217 :                 cursor.read_blob_into_buf(*pos, &mut buf, &ctx).await?;
     372        53152214 :                 let will_init = Value::des(&buf)?.will_init();
     373        53152214 :                 delta_layer_writer
     374        53152214 :                     .put_value_bytes(key, *lsn, &buf, will_init)
     375 UBC           0 :                     .await?;
     376                 :             }
     377                 :         }
     378                 : 
     379 CBC        5308 :         let delta_layer = delta_layer_writer.finish(Key::MAX).await?;
     380            5308 :         Ok(delta_layer)
     381            5308 :     }
     382                 : }
        

Generated by: LCOV version 2.1-beta