LCOV - code coverage report
Current view: top level - storage_scrubber/src - checks.rs (source / functions) Coverage Total Hit
Test: 42f947419473a288706e86ecdf7c2863d760d5d7.info Lines: 0.0 % 286 0
Test Date: 2024-08-02 21:34:27 Functions: 0.0 % 18 0

            Line data    Source code
       1              : use std::collections::{HashMap, HashSet};
       2              : 
       3              : use anyhow::Context;
       4              : use aws_sdk_s3::Client;
       5              : use pageserver::tenant::layer_map::LayerMap;
       6              : use pageserver::tenant::remote_timeline_client::index::LayerFileMetadata;
       7              : use pageserver_api::shard::ShardIndex;
       8              : use tracing::{error, info, warn};
       9              : use utils::generation::Generation;
      10              : use utils::id::TimelineId;
      11              : 
      12              : use crate::cloud_admin_api::BranchData;
      13              : use crate::metadata_stream::stream_listing;
      14              : use crate::{download_object_with_retries, RootTarget, TenantShardTimelineId};
      15              : use futures_util::StreamExt;
      16              : use pageserver::tenant::remote_timeline_client::{parse_remote_index_path, remote_layer_path};
      17              : use pageserver::tenant::storage_layer::LayerName;
      18              : use pageserver::tenant::IndexPart;
      19              : use remote_storage::RemotePath;
      20              : 
      21              : pub(crate) struct TimelineAnalysis {
      22              :     /// Anomalies detected
      23              :     pub(crate) errors: Vec<String>,
      24              : 
      25              :     /// Healthy-but-noteworthy, like old-versioned structures that are readable but
      26              :     /// worth reporting for awareness that we must not remove that old version decoding
      27              :     /// yet.
      28              :     pub(crate) warnings: Vec<String>,
      29              : 
      30              :     /// Keys not referenced in metadata: candidates for removal, but NOT NECESSARILY: beware
      31              :     /// of races between reading the metadata and reading the objects.
      32              :     pub(crate) garbage_keys: Vec<String>,
      33              : }
      34              : 
      35              : impl TimelineAnalysis {
      36            0 :     fn new() -> Self {
      37            0 :         Self {
      38            0 :             errors: Vec::new(),
      39            0 :             warnings: Vec::new(),
      40            0 :             garbage_keys: Vec::new(),
      41            0 :         }
      42            0 :     }
      43              : 
      44              :     /// Whether a timeline is healthy.
      45            0 :     pub(crate) fn is_healthy(&self) -> bool {
      46            0 :         self.errors.is_empty() && self.warnings.is_empty()
      47            0 :     }
      48              : }
      49              : 
      50            0 : pub(crate) async fn branch_cleanup_and_check_errors(
      51            0 :     s3_client: &Client,
      52            0 :     target: &RootTarget,
      53            0 :     id: &TenantShardTimelineId,
      54            0 :     tenant_objects: &mut TenantObjectListing,
      55            0 :     s3_active_branch: Option<&BranchData>,
      56            0 :     console_branch: Option<BranchData>,
      57            0 :     s3_data: Option<S3TimelineBlobData>,
      58            0 : ) -> TimelineAnalysis {
      59            0 :     let mut result = TimelineAnalysis::new();
      60            0 : 
      61            0 :     info!("Checking timeline {id}");
      62              : 
      63            0 :     if let Some(s3_active_branch) = s3_active_branch {
      64            0 :         info!(
      65            0 :             "Checking console status for timeline for branch {:?}/{:?}",
      66              :             s3_active_branch.project_id, s3_active_branch.id
      67              :         );
      68            0 :         match console_branch {
      69            0 :             Some(_) => {result.errors.push(format!("Timeline has deleted branch data in the console (id = {:?}, project_id = {:?}), recheck whether it got removed during the check",
      70            0 :                 s3_active_branch.id, s3_active_branch.project_id))
      71              :             },
      72              :             None => {
      73            0 :                 result.errors.push(format!("Timeline has no branch data in the console (id = {:?}, project_id = {:?}), recheck whether it got removed during the check",
      74            0 :             s3_active_branch.id, s3_active_branch.project_id))
      75              :             }
      76              :         };
      77            0 :     }
      78              : 
      79            0 :     match s3_data {
      80            0 :         Some(s3_data) => {
      81            0 :             result.garbage_keys.extend(s3_data.unknown_keys);
      82            0 : 
      83            0 :             match s3_data.blob_data {
      84              :                 BlobDataParseResult::Parsed {
      85            0 :                     index_part,
      86            0 :                     index_part_generation: _index_part_generation,
      87            0 :                     s3_layers: _s3_layers,
      88            0 :                 } => {
      89            0 :                     if !IndexPart::KNOWN_VERSIONS.contains(&index_part.version()) {
      90            0 :                         result
      91            0 :                             .errors
      92            0 :                             .push(format!("index_part.json version: {}", index_part.version()))
      93            0 :                     }
      94              : 
      95            0 :                     let mut newest_versions = IndexPart::KNOWN_VERSIONS.iter().rev().take(2);
      96            0 :                     if !newest_versions.any(|ip| ip == &index_part.version()) {
      97            0 :                         info!(
      98            0 :                             "index_part.json version is not latest: {}",
      99            0 :                             index_part.version()
     100              :                         );
     101            0 :                     }
     102              : 
     103            0 :                     if index_part.metadata.disk_consistent_lsn()
     104            0 :                         != index_part.duplicated_disk_consistent_lsn()
     105              :                     {
     106              :                         // Tech debt: let's get rid of one of these, they are redundant
     107              :                         // https://github.com/neondatabase/neon/issues/8343
     108            0 :                         result.errors.push(format!(
     109            0 :                             "Mismatching disk_consistent_lsn in TimelineMetadata ({}) and in the index_part ({})",
     110            0 :                             index_part.metadata.disk_consistent_lsn(),
     111            0 :                             index_part.duplicated_disk_consistent_lsn(),
     112            0 :                         ))
     113            0 :                     }
     114              : 
     115            0 :                     if index_part.layer_metadata.is_empty() {
     116            0 :                         if index_part.metadata.ancestor_timeline().is_none() {
     117            0 :                             // The initial timeline with no ancestor should ALWAYS have layers.
     118            0 :                             result.errors.push(
     119            0 :                                 "index_part.json has no layers (ancestor_timeline=None)"
     120            0 :                                     .to_string(),
     121            0 :                             );
     122            0 :                         } else {
     123              :                             // Not an error, can happen for branches with zero writes, but notice that
     124            0 :                             info!("index_part.json has no layers (ancestor_timeline exists)");
     125              :                         }
     126            0 :                     }
     127              : 
     128            0 :                     for (layer, metadata) in index_part.layer_metadata {
     129            0 :                         if metadata.file_size == 0 {
     130            0 :                             result.errors.push(format!(
     131            0 :                                 "index_part.json contains a layer {} that has 0 size in its layer metadata", layer,
     132            0 :                             ))
     133            0 :                         }
     134              : 
     135            0 :                         if !tenant_objects.check_ref(id.timeline_id, &layer, &metadata) {
     136            0 :                             let path = remote_layer_path(
     137            0 :                                 &id.tenant_shard_id.tenant_id,
     138            0 :                                 &id.timeline_id,
     139            0 :                                 metadata.shard,
     140            0 :                                 &layer,
     141            0 :                                 metadata.generation,
     142            0 :                             );
     143              : 
     144              :                             // HEAD request used here to address a race condition  when an index was uploaded concurrently
     145              :                             // with our scan. We check if the object is uploaded to S3 after taking the listing snapshot.
     146            0 :                             let response = s3_client
     147            0 :                                 .head_object()
     148            0 :                                 .bucket(target.bucket_name())
     149            0 :                                 .key(path.get_path().as_str())
     150            0 :                                 .send()
     151            0 :                                 .await;
     152              : 
     153            0 :                             if response.is_err() {
     154              :                                 // Object is not present.
     155            0 :                                 let is_l0 = LayerMap::is_l0(layer.key_range());
     156            0 : 
     157            0 :                                 let msg = format!(
     158            0 :                                     "index_part.json contains a layer {}{} (shard {}) that is not present in remote storage (layer_is_l0: {})",
     159            0 :                                     layer,
     160            0 :                                     metadata.generation.get_suffix(),
     161            0 :                                     metadata.shard,
     162            0 :                                     is_l0,
     163            0 :                                 );
     164            0 : 
     165            0 :                                 if is_l0 {
     166            0 :                                     result.warnings.push(msg);
     167            0 :                                 } else {
     168            0 :                                     result.errors.push(msg);
     169            0 :                                 }
     170            0 :                             }
     171            0 :                         }
     172              :                     }
     173              :                 }
     174            0 :                 BlobDataParseResult::Relic => {}
     175            0 :                 BlobDataParseResult::Incorrect(parse_errors) => result.errors.extend(
     176            0 :                     parse_errors
     177            0 :                         .into_iter()
     178            0 :                         .map(|error| format!("parse error: {error}")),
     179            0 :                 ),
     180              :             }
     181              :         }
     182            0 :         None => result
     183            0 :             .errors
     184            0 :             .push("Timeline has no data on S3 at all".to_string()),
     185              :     }
     186              : 
     187            0 :     if result.errors.is_empty() {
     188            0 :         info!("No check errors found");
     189              :     } else {
     190            0 :         warn!("Timeline metadata errors: {0:?}", result.errors);
     191              :     }
     192              : 
     193            0 :     if !result.warnings.is_empty() {
     194            0 :         warn!("Timeline metadata warnings: {0:?}", result.warnings);
     195            0 :     }
     196              : 
     197            0 :     if !result.garbage_keys.is_empty() {
     198            0 :         error!(
     199            0 :             "The following keys should be removed from S3: {0:?}",
     200              :             result.garbage_keys
     201              :         )
     202            0 :     }
     203              : 
     204            0 :     result
     205            0 : }
     206              : 
     207              : #[derive(Default)]
     208              : pub(crate) struct LayerRef {
     209              :     ref_count: usize,
     210              : }
     211              : 
     212              : /// Top-level index of objects in a tenant.  This may be used by any shard-timeline within
     213              : /// the tenant to query whether an object exists.
     214              : #[derive(Default)]
     215              : pub(crate) struct TenantObjectListing {
     216              :     shard_timelines: HashMap<(ShardIndex, TimelineId), HashMap<(LayerName, Generation), LayerRef>>,
     217              : }
     218              : 
     219              : impl TenantObjectListing {
     220              :     /// Having done an S3 listing of the keys within a timeline prefix, merge them into the overall
     221              :     /// list of layer keys for the Tenant.
     222            0 :     pub(crate) fn push(
     223            0 :         &mut self,
     224            0 :         ttid: TenantShardTimelineId,
     225            0 :         layers: HashSet<(LayerName, Generation)>,
     226            0 :     ) {
     227            0 :         let shard_index = ShardIndex::new(
     228            0 :             ttid.tenant_shard_id.shard_number,
     229            0 :             ttid.tenant_shard_id.shard_count,
     230            0 :         );
     231            0 :         let replaced = self.shard_timelines.insert(
     232            0 :             (shard_index, ttid.timeline_id),
     233            0 :             layers
     234            0 :                 .into_iter()
     235            0 :                 .map(|l| (l, LayerRef::default()))
     236            0 :                 .collect(),
     237            0 :         );
     238            0 : 
     239            0 :         assert!(
     240            0 :             replaced.is_none(),
     241            0 :             "Built from an S3 object listing, which should never repeat a key"
     242              :         );
     243            0 :     }
     244              : 
     245              :     /// Having loaded a timeline index, check if a layer referenced by the index exists.  If it does,
     246              :     /// the layer's refcount will be incremented.  Later, after calling this for all references in all indices
     247              :     /// in a tenant, orphan layers may be detected by their zero refcounts.
     248              :     ///
     249              :     /// Returns true if the layer exists
     250            0 :     pub(crate) fn check_ref(
     251            0 :         &mut self,
     252            0 :         timeline_id: TimelineId,
     253            0 :         layer_file: &LayerName,
     254            0 :         metadata: &LayerFileMetadata,
     255            0 :     ) -> bool {
     256            0 :         let Some(shard_tl) = self.shard_timelines.get_mut(&(metadata.shard, timeline_id)) else {
     257            0 :             return false;
     258              :         };
     259              : 
     260            0 :         let Some(layer_ref) = shard_tl.get_mut(&(layer_file.clone(), metadata.generation)) else {
     261            0 :             return false;
     262              :         };
     263              : 
     264            0 :         layer_ref.ref_count += 1;
     265            0 : 
     266            0 :         true
     267            0 :     }
     268              : 
     269            0 :     pub(crate) fn get_orphans(&self) -> Vec<(ShardIndex, TimelineId, LayerName, Generation)> {
     270            0 :         let mut result = Vec::new();
     271            0 :         for ((shard_index, timeline_id), layers) in &self.shard_timelines {
     272            0 :             for ((layer_file, generation), layer_ref) in layers {
     273            0 :                 if layer_ref.ref_count == 0 {
     274            0 :                     result.push((*shard_index, *timeline_id, layer_file.clone(), *generation))
     275            0 :                 }
     276              :             }
     277              :         }
     278              : 
     279            0 :         result
     280            0 :     }
     281              : }
     282              : 
     283              : #[derive(Debug)]
     284              : pub(crate) struct S3TimelineBlobData {
     285              :     pub(crate) blob_data: BlobDataParseResult,
     286              : 
     287              :     // Index objects that were not used when loading `blob_data`, e.g. those from old generations
     288              :     pub(crate) unused_index_keys: Vec<String>,
     289              : 
     290              :     // Objects whose keys were not recognized at all, i.e. not layer files, not indices
     291              :     pub(crate) unknown_keys: Vec<String>,
     292              : }
     293              : 
     294              : #[derive(Debug)]
     295              : pub(crate) enum BlobDataParseResult {
     296              :     Parsed {
     297              :         index_part: Box<IndexPart>,
     298              :         index_part_generation: Generation,
     299              :         s3_layers: HashSet<(LayerName, Generation)>,
     300              :     },
     301              :     /// The remains of a deleted Timeline (i.e. an initdb archive only)
     302              :     Relic,
     303              :     Incorrect(Vec<String>),
     304              : }
     305              : 
     306            0 : pub(crate) fn parse_layer_object_name(name: &str) -> Result<(LayerName, Generation), String> {
     307            0 :     match name.rsplit_once('-') {
     308              :         // FIXME: this is gross, just use a regex?
     309            0 :         Some((layer_filename, gen)) if gen.len() == 8 => {
     310            0 :             let layer = layer_filename.parse::<LayerName>()?;
     311            0 :             let gen =
     312            0 :                 Generation::parse_suffix(gen).ok_or("Malformed generation suffix".to_string())?;
     313            0 :             Ok((layer, gen))
     314              :         }
     315            0 :         _ => Ok((name.parse::<LayerName>()?, Generation::none())),
     316              :     }
     317            0 : }
     318              : 
     319            0 : pub(crate) async fn list_timeline_blobs(
     320            0 :     s3_client: &Client,
     321            0 :     id: TenantShardTimelineId,
     322            0 :     s3_root: &RootTarget,
     323            0 : ) -> anyhow::Result<S3TimelineBlobData> {
     324            0 :     let mut s3_layers = HashSet::new();
     325            0 : 
     326            0 :     let mut errors = Vec::new();
     327            0 :     let mut unknown_keys = Vec::new();
     328            0 : 
     329            0 :     let mut timeline_dir_target = s3_root.timeline_root(&id);
     330            0 :     timeline_dir_target.delimiter = String::new();
     331            0 : 
     332            0 :     let mut index_part_keys: Vec<String> = Vec::new();
     333            0 :     let mut initdb_archive: bool = false;
     334            0 : 
     335            0 :     let mut stream = std::pin::pin!(stream_listing(s3_client, &timeline_dir_target));
     336            0 :     while let Some(obj) = stream.next().await {
     337            0 :         let obj = obj?;
     338            0 :         let key = obj.key();
     339            0 : 
     340            0 :         let blob_name = key.strip_prefix(&timeline_dir_target.prefix_in_bucket);
     341            0 :         match blob_name {
     342            0 :             Some(name) if name.starts_with("index_part.json") => {
     343            0 :                 tracing::debug!("Index key {key}");
     344            0 :                 index_part_keys.push(key.to_owned())
     345              :             }
     346            0 :             Some("initdb.tar.zst") => {
     347            0 :                 tracing::debug!("initdb archive {key}");
     348            0 :                 initdb_archive = true;
     349              :             }
     350            0 :             Some("initdb-preserved.tar.zst") => {
     351            0 :                 tracing::info!("initdb archive preserved {key}");
     352              :             }
     353            0 :             Some(maybe_layer_name) => match parse_layer_object_name(maybe_layer_name) {
     354            0 :                 Ok((new_layer, gen)) => {
     355            0 :                     tracing::debug!("Parsed layer key: {} {:?}", new_layer, gen);
     356            0 :                     s3_layers.insert((new_layer, gen));
     357              :                 }
     358            0 :                 Err(e) => {
     359            0 :                     tracing::info!("Error parsing key {maybe_layer_name}");
     360            0 :                     errors.push(
     361            0 :                         format!("S3 list response got an object with key {key} that is not a layer name: {e}"),
     362            0 :                     );
     363            0 :                     unknown_keys.push(key.to_string());
     364              :                 }
     365              :             },
     366              :             None => {
     367            0 :                 tracing::warn!("Unknown key {}", key);
     368            0 :                 errors.push(format!("S3 list response got an object with odd key {key}"));
     369            0 :                 unknown_keys.push(key.to_string());
     370              :             }
     371              :         }
     372              :     }
     373              : 
     374            0 :     if index_part_keys.is_empty() && s3_layers.is_empty() && initdb_archive {
     375            0 :         tracing::debug!(
     376            0 :             "Timeline is empty apart from initdb archive: expected post-deletion state."
     377              :         );
     378            0 :         return Ok(S3TimelineBlobData {
     379            0 :             blob_data: BlobDataParseResult::Relic,
     380            0 :             unused_index_keys: index_part_keys,
     381            0 :             unknown_keys: Vec::new(),
     382            0 :         });
     383            0 :     }
     384              : 
     385              :     // Choose the index_part with the highest generation
     386            0 :     let (index_part_object, index_part_generation) = match index_part_keys
     387            0 :         .iter()
     388            0 :         .filter_map(|key| {
     389            0 :             // Stripping the index key to the last part, because RemotePath doesn't
     390            0 :             // like absolute paths, and depending on prefix_in_bucket it's possible
     391            0 :             // for the keys we read back to start with a slash.
     392            0 :             let basename = key.rsplit_once('/').unwrap().1;
     393            0 :             parse_remote_index_path(RemotePath::from_string(basename).unwrap()).map(|g| (key, g))
     394            0 :         })
     395            0 :         .max_by_key(|i| i.1)
     396            0 :         .map(|(k, g)| (k.clone(), g))
     397              :     {
     398            0 :         Some((key, gen)) => (Some(key), gen),
     399              :         None => {
     400              :             // Legacy/missing case: one or zero index parts, which did not have a generation
     401            0 :             (index_part_keys.pop(), Generation::none())
     402              :         }
     403              :     };
     404              : 
     405            0 :     match index_part_object.as_ref() {
     406            0 :         Some(selected) => index_part_keys.retain(|k| k != selected),
     407            0 :         None => {
     408            0 :             errors.push("S3 list response got no index_part.json file".to_string());
     409            0 :         }
     410              :     }
     411              : 
     412            0 :     if let Some(index_part_object_key) = index_part_object.as_ref() {
     413            0 :         let index_part_bytes = download_object_with_retries(
     414            0 :             s3_client,
     415            0 :             &timeline_dir_target.bucket_name,
     416            0 :             index_part_object_key,
     417            0 :         )
     418            0 :         .await
     419            0 :         .context("index_part.json download")?;
     420              : 
     421            0 :         match serde_json::from_slice(&index_part_bytes) {
     422            0 :             Ok(index_part) => {
     423            0 :                 return Ok(S3TimelineBlobData {
     424            0 :                     blob_data: BlobDataParseResult::Parsed {
     425            0 :                         index_part: Box::new(index_part),
     426            0 :                         index_part_generation,
     427            0 :                         s3_layers,
     428            0 :                     },
     429            0 :                     unused_index_keys: index_part_keys,
     430            0 :                     unknown_keys,
     431            0 :                 })
     432              :             }
     433            0 :             Err(index_parse_error) => errors.push(format!(
     434            0 :                 "index_part.json body parsing error: {index_parse_error}"
     435            0 :             )),
     436              :         }
     437            0 :     }
     438              : 
     439            0 :     if errors.is_empty() {
     440            0 :         errors.push(
     441            0 :             "Unexpected: no errors did not lead to a successfully parsed blob return".to_string(),
     442            0 :         );
     443            0 :     }
     444              : 
     445            0 :     Ok(S3TimelineBlobData {
     446            0 :         blob_data: BlobDataParseResult::Incorrect(errors),
     447            0 :         unused_index_keys: index_part_keys,
     448            0 :         unknown_keys,
     449            0 :     })
     450            0 : }
        

Generated by: LCOV version 2.1-beta