LCOV - code coverage report
Current view: top level - storage_scrubber/src - checks.rs (source / functions) Coverage Total Hit
Test: 07bee600374ccd486c69370d0972d9035964fe68.info Lines: 0.0 % 430 0
Test Date: 2025-02-20 13:11:02 Functions: 0.0 % 27 0

            Line data    Source code
       1              : use std::collections::{HashMap, HashSet};
       2              : use std::time::SystemTime;
       3              : 
       4              : use itertools::Itertools;
       5              : use pageserver::tenant::checks::check_valid_layermap;
       6              : use pageserver::tenant::layer_map::LayerMap;
       7              : use pageserver::tenant::remote_timeline_client::index::LayerFileMetadata;
       8              : use pageserver::tenant::remote_timeline_client::manifest::TenantManifest;
       9              : use pageserver_api::shard::ShardIndex;
      10              : use tokio_util::sync::CancellationToken;
      11              : use tracing::{info, warn};
      12              : use utils::generation::Generation;
      13              : use utils::id::TimelineId;
      14              : use utils::shard::TenantShardId;
      15              : 
      16              : use crate::cloud_admin_api::BranchData;
      17              : use crate::metadata_stream::stream_listing;
      18              : use crate::{download_object_with_retries, RootTarget, TenantShardTimelineId};
      19              : use futures_util::StreamExt;
      20              : use pageserver::tenant::remote_timeline_client::{
      21              :     parse_remote_index_path, parse_remote_tenant_manifest_path, remote_layer_path,
      22              : };
      23              : use pageserver::tenant::storage_layer::LayerName;
      24              : use pageserver::tenant::IndexPart;
      25              : use remote_storage::{GenericRemoteStorage, ListingObject, RemotePath};
      26              : 
      27              : pub(crate) struct TimelineAnalysis {
      28              :     /// Anomalies detected
      29              :     pub(crate) errors: Vec<String>,
      30              : 
      31              :     /// Healthy-but-noteworthy, like old-versioned structures that are readable but
      32              :     /// worth reporting for awareness that we must not remove that old version decoding
      33              :     /// yet.
      34              :     pub(crate) warnings: Vec<String>,
      35              : 
      36              :     /// Objects whose keys were not recognized at all, i.e. not layer files, not indices, and not initdb archive.
      37              :     pub(crate) unknown_keys: Vec<String>,
      38              : }
      39              : 
      40              : impl TimelineAnalysis {
      41            0 :     fn new() -> Self {
      42            0 :         Self {
      43            0 :             errors: Vec::new(),
      44            0 :             warnings: Vec::new(),
      45            0 :             unknown_keys: Vec::new(),
      46            0 :         }
      47            0 :     }
      48              : 
      49              :     /// Whether a timeline is healthy.
      50            0 :     pub(crate) fn is_healthy(&self) -> bool {
      51            0 :         self.errors.is_empty() && self.warnings.is_empty()
      52            0 :     }
      53              : }
      54              : 
      55            0 : pub(crate) async fn branch_cleanup_and_check_errors(
      56            0 :     remote_client: &GenericRemoteStorage,
      57            0 :     id: &TenantShardTimelineId,
      58            0 :     tenant_objects: &mut TenantObjectListing,
      59            0 :     s3_active_branch: Option<&BranchData>,
      60            0 :     console_branch: Option<BranchData>,
      61            0 :     s3_data: Option<RemoteTimelineBlobData>,
      62            0 : ) -> TimelineAnalysis {
      63            0 :     let mut result = TimelineAnalysis::new();
      64            0 : 
      65            0 :     info!("Checking timeline");
      66              : 
      67            0 :     if let Some(s3_active_branch) = s3_active_branch {
      68            0 :         info!(
      69            0 :             "Checking console status for timeline for branch {:?}/{:?}",
      70              :             s3_active_branch.project_id, s3_active_branch.id
      71              :         );
      72            0 :         match console_branch {
      73            0 :             Some(_) => {result.errors.push(format!("Timeline has deleted branch data in the console (id = {:?}, project_id = {:?}), recheck whether it got removed during the check",
      74            0 :                 s3_active_branch.id, s3_active_branch.project_id))
      75              :             },
      76              :             None => {
      77            0 :                 result.errors.push(format!("Timeline has no branch data in the console (id = {:?}, project_id = {:?}), recheck whether it got removed during the check",
      78            0 :             s3_active_branch.id, s3_active_branch.project_id))
      79              :             }
      80              :         };
      81            0 :     }
      82              : 
      83            0 :     match s3_data {
      84            0 :         Some(s3_data) => {
      85            0 :             result
      86            0 :                 .unknown_keys
      87            0 :                 .extend(s3_data.unknown_keys.into_iter().map(|k| k.key.to_string()));
      88            0 : 
      89            0 :             match s3_data.blob_data {
      90              :                 BlobDataParseResult::Parsed {
      91            0 :                     index_part,
      92            0 :                     index_part_generation: _,
      93            0 :                     s3_layers: _,
      94            0 :                     index_part_last_modified_time,
      95            0 :                     index_part_snapshot_time,
      96              :                 } => {
      97              :                     // Ignore missing file error if index_part downloaded is different from the one when listing the layer files.
      98            0 :                     let ignore_error = index_part_snapshot_time < index_part_last_modified_time
      99            0 :                         && !cfg!(debug_assertions);
     100            0 :                     if !IndexPart::KNOWN_VERSIONS.contains(&index_part.version()) {
     101            0 :                         result
     102            0 :                             .errors
     103            0 :                             .push(format!("index_part.json version: {}", index_part.version()))
     104            0 :                     }
     105              : 
     106            0 :                     let mut newest_versions = IndexPart::KNOWN_VERSIONS.iter().rev().take(3);
     107            0 :                     if !newest_versions.any(|ip| ip == &index_part.version()) {
     108            0 :                         info!(
     109            0 :                             "index_part.json version is not latest: {}",
     110            0 :                             index_part.version()
     111              :                         );
     112            0 :                     }
     113              : 
     114            0 :                     if index_part.metadata.disk_consistent_lsn()
     115            0 :                         != index_part.duplicated_disk_consistent_lsn()
     116              :                     {
     117              :                         // Tech debt: let's get rid of one of these, they are redundant
     118              :                         // https://github.com/neondatabase/neon/issues/8343
     119            0 :                         result.errors.push(format!(
     120            0 :                             "Mismatching disk_consistent_lsn in TimelineMetadata ({}) and in the index_part ({})",
     121            0 :                             index_part.metadata.disk_consistent_lsn(),
     122            0 :                             index_part.duplicated_disk_consistent_lsn(),
     123            0 :                         ))
     124            0 :                     }
     125              : 
     126            0 :                     if index_part.layer_metadata.is_empty() {
     127            0 :                         if index_part.metadata.ancestor_timeline().is_none() {
     128            0 :                             // The initial timeline with no ancestor should ALWAYS have layers.
     129            0 :                             result.errors.push(
     130            0 :                                 "index_part.json has no layers (ancestor_timeline=None)"
     131            0 :                                     .to_string(),
     132            0 :                             );
     133            0 :                         } else {
     134              :                             // Not an error, can happen for branches with zero writes, but notice that
     135            0 :                             info!("index_part.json has no layers (ancestor_timeline exists)");
     136              :                         }
     137            0 :                     }
     138              : 
     139            0 :                     let layer_names = index_part.layer_metadata.keys().cloned().collect_vec();
     140            0 :                     if let Some(err) = check_valid_layermap(&layer_names) {
     141            0 :                         result.warnings.push(format!(
     142            0 :                             "index_part.json contains invalid layer map structure: {err}"
     143            0 :                         ));
     144            0 :                     }
     145              : 
     146            0 :                     for (layer, metadata) in index_part.layer_metadata {
     147            0 :                         if metadata.file_size == 0 {
     148            0 :                             result.errors.push(format!(
     149            0 :                                 "index_part.json contains a layer {} that has 0 size in its layer metadata", layer,
     150            0 :                             ))
     151            0 :                         }
     152              : 
     153            0 :                         if !tenant_objects.check_ref(id.timeline_id, &layer, &metadata) {
     154            0 :                             let path = remote_layer_path(
     155            0 :                                 &id.tenant_shard_id.tenant_id,
     156            0 :                                 &id.timeline_id,
     157            0 :                                 metadata.shard,
     158            0 :                                 &layer,
     159            0 :                                 metadata.generation,
     160            0 :                             );
     161              : 
     162              :                             // HEAD request used here to address a race condition  when an index was uploaded concurrently
     163              :                             // with our scan. We check if the object is uploaded to S3 after taking the listing snapshot.
     164            0 :                             let response = remote_client
     165            0 :                                 .head_object(&path, &CancellationToken::new())
     166            0 :                                 .await;
     167              : 
     168            0 :                             if response.is_err() {
     169              :                                 // Object is not present.
     170            0 :                                 let is_l0 = LayerMap::is_l0(layer.key_range(), layer.is_delta());
     171            0 : 
     172            0 :                                 let msg = format!(
     173            0 :                                     "index_part.json contains a layer {}{} (shard {}) that is not present in remote storage (layer_is_l0: {})",
     174            0 :                                     layer,
     175            0 :                                     metadata.generation.get_suffix(),
     176            0 :                                     metadata.shard,
     177            0 :                                     is_l0,
     178            0 :                                 );
     179            0 : 
     180            0 :                                 if is_l0 || ignore_error {
     181            0 :                                     result.warnings.push(msg);
     182            0 :                                 } else {
     183            0 :                                     result.errors.push(msg);
     184            0 :                                 }
     185            0 :                             }
     186            0 :                         }
     187              :                     }
     188              :                 }
     189            0 :                 BlobDataParseResult::Relic => {}
     190              :                 BlobDataParseResult::Incorrect {
     191            0 :                     errors,
     192            0 :                     s3_layers: _,
     193            0 :                 } => result.errors.extend(
     194            0 :                     errors
     195            0 :                         .into_iter()
     196            0 :                         .map(|error| format!("parse error: {error}")),
     197            0 :                 ),
     198              :             }
     199              :         }
     200            0 :         None => result
     201            0 :             .errors
     202            0 :             .push("Timeline has no data on S3 at all".to_string()),
     203              :     }
     204              : 
     205            0 :     if result.errors.is_empty() {
     206            0 :         info!("No check errors found");
     207              :     } else {
     208            0 :         warn!("Timeline metadata errors: {0:?}", result.errors);
     209              :     }
     210              : 
     211            0 :     if !result.warnings.is_empty() {
     212            0 :         warn!("Timeline metadata warnings: {0:?}", result.warnings);
     213            0 :     }
     214              : 
     215            0 :     if !result.unknown_keys.is_empty() {
     216            0 :         warn!(
     217            0 :             "The following keys are not recognized: {0:?}",
     218              :             result.unknown_keys
     219              :         )
     220            0 :     }
     221              : 
     222            0 :     result
     223            0 : }
     224              : 
     225              : #[derive(Default)]
     226              : pub(crate) struct LayerRef {
     227              :     ref_count: usize,
     228              : }
     229              : 
     230              : /// Top-level index of objects in a tenant.  This may be used by any shard-timeline within
     231              : /// the tenant to query whether an object exists.
     232              : #[derive(Default)]
     233              : pub(crate) struct TenantObjectListing {
     234              :     shard_timelines: HashMap<(ShardIndex, TimelineId), HashMap<(LayerName, Generation), LayerRef>>,
     235              : }
     236              : 
     237              : impl TenantObjectListing {
     238              :     /// Having done an S3 listing of the keys within a timeline prefix, merge them into the overall
     239              :     /// list of layer keys for the Tenant.
     240            0 :     pub(crate) fn push(
     241            0 :         &mut self,
     242            0 :         ttid: TenantShardTimelineId,
     243            0 :         layers: HashSet<(LayerName, Generation)>,
     244            0 :     ) {
     245            0 :         let shard_index = ShardIndex::new(
     246            0 :             ttid.tenant_shard_id.shard_number,
     247            0 :             ttid.tenant_shard_id.shard_count,
     248            0 :         );
     249            0 :         let replaced = self.shard_timelines.insert(
     250            0 :             (shard_index, ttid.timeline_id),
     251            0 :             layers
     252            0 :                 .into_iter()
     253            0 :                 .map(|l| (l, LayerRef::default()))
     254            0 :                 .collect(),
     255            0 :         );
     256            0 : 
     257            0 :         assert!(
     258            0 :             replaced.is_none(),
     259            0 :             "Built from an S3 object listing, which should never repeat a key"
     260              :         );
     261            0 :     }
     262              : 
     263              :     /// Having loaded a timeline index, check if a layer referenced by the index exists.  If it does,
     264              :     /// the layer's refcount will be incremented.  Later, after calling this for all references in all indices
     265              :     /// in a tenant, orphan layers may be detected by their zero refcounts.
     266              :     ///
     267              :     /// Returns true if the layer exists
     268            0 :     pub(crate) fn check_ref(
     269            0 :         &mut self,
     270            0 :         timeline_id: TimelineId,
     271            0 :         layer_file: &LayerName,
     272            0 :         metadata: &LayerFileMetadata,
     273            0 :     ) -> bool {
     274            0 :         let Some(shard_tl) = self.shard_timelines.get_mut(&(metadata.shard, timeline_id)) else {
     275            0 :             return false;
     276              :         };
     277              : 
     278            0 :         let Some(layer_ref) = shard_tl.get_mut(&(layer_file.clone(), metadata.generation)) else {
     279            0 :             return false;
     280              :         };
     281              : 
     282            0 :         layer_ref.ref_count += 1;
     283            0 : 
     284            0 :         true
     285            0 :     }
     286              : 
     287            0 :     pub(crate) fn get_orphans(&self) -> Vec<(ShardIndex, TimelineId, LayerName, Generation)> {
     288            0 :         let mut result = Vec::new();
     289            0 :         for ((shard_index, timeline_id), layers) in &self.shard_timelines {
     290            0 :             for ((layer_file, generation), layer_ref) in layers {
     291            0 :                 if layer_ref.ref_count == 0 {
     292            0 :                     result.push((*shard_index, *timeline_id, layer_file.clone(), *generation))
     293            0 :                 }
     294              :             }
     295              :         }
     296              : 
     297            0 :         result
     298            0 :     }
     299              : }
     300              : 
     301              : #[derive(Debug)]
     302              : pub(crate) struct RemoteTimelineBlobData {
     303              :     pub(crate) blob_data: BlobDataParseResult,
     304              : 
     305              :     /// Index objects that were not used when loading `blob_data`, e.g. those from old generations
     306              :     pub(crate) unused_index_keys: Vec<ListingObject>,
     307              : 
     308              :     /// Objects whose keys were not recognized at all, i.e. not layer files, not indices
     309              :     pub(crate) unknown_keys: Vec<ListingObject>,
     310              : }
     311              : 
     312              : #[derive(Debug)]
     313              : pub(crate) enum BlobDataParseResult {
     314              :     Parsed {
     315              :         index_part: Box<IndexPart>,
     316              :         index_part_generation: Generation,
     317              :         index_part_last_modified_time: SystemTime,
     318              :         index_part_snapshot_time: SystemTime,
     319              :         s3_layers: HashSet<(LayerName, Generation)>,
     320              :     },
     321              :     /// The remains of an uncleanly deleted Timeline or aborted timeline creation(e.g. an initdb archive only, or some layer without an index)
     322              :     Relic,
     323              :     Incorrect {
     324              :         errors: Vec<String>,
     325              :         s3_layers: HashSet<(LayerName, Generation)>,
     326              :     },
     327              : }
     328              : 
     329            0 : pub(crate) fn parse_layer_object_name(name: &str) -> Result<(LayerName, Generation), String> {
     330            0 :     match name.rsplit_once('-') {
     331              :         // FIXME: this is gross, just use a regex?
     332            0 :         Some((layer_filename, gen)) if gen.len() == 8 => {
     333            0 :             let layer = layer_filename.parse::<LayerName>()?;
     334            0 :             let gen =
     335            0 :                 Generation::parse_suffix(gen).ok_or("Malformed generation suffix".to_string())?;
     336            0 :             Ok((layer, gen))
     337              :         }
     338            0 :         _ => Ok((name.parse::<LayerName>()?, Generation::none())),
     339              :     }
     340            0 : }
     341              : 
     342              : /// Note (<https://github.com/neondatabase/neon/issues/8872>):
     343              : /// Since we do not gurantee the order of the listing, we could list layer keys right before
     344              : /// pageserver `RemoteTimelineClient` deletes the layer files and then the index.
     345              : /// In the rare case, this would give back a transient error where the index key is missing.
     346              : ///
     347              : /// To avoid generating false positive, we try streaming the listing for a second time.
     348            0 : pub(crate) async fn list_timeline_blobs(
     349            0 :     remote_client: &GenericRemoteStorage,
     350            0 :     id: TenantShardTimelineId,
     351            0 :     root_target: &RootTarget,
     352            0 : ) -> anyhow::Result<RemoteTimelineBlobData> {
     353            0 :     let res = list_timeline_blobs_impl(remote_client, id, root_target).await?;
     354            0 :     match res {
     355            0 :         ListTimelineBlobsResult::Ready(data) => Ok(data),
     356              :         ListTimelineBlobsResult::MissingIndexPart(_) => {
     357              :             // Retry if listing raced with removal of an index
     358            0 :             let data = list_timeline_blobs_impl(remote_client, id, root_target)
     359            0 :                 .await?
     360            0 :                 .into_data();
     361            0 :             Ok(data)
     362              :         }
     363              :     }
     364            0 : }
     365              : 
     366              : enum ListTimelineBlobsResult {
     367              :     /// Blob data is ready to be intepreted.
     368              :     Ready(RemoteTimelineBlobData),
     369              :     /// The listing contained an index but when we tried to fetch it, we couldn't
     370              :     MissingIndexPart(RemoteTimelineBlobData),
     371              : }
     372              : 
     373              : impl ListTimelineBlobsResult {
     374              :     /// Get the inner blob data regardless the status.
     375            0 :     pub fn into_data(self) -> RemoteTimelineBlobData {
     376            0 :         match self {
     377            0 :             ListTimelineBlobsResult::Ready(data) => data,
     378            0 :             ListTimelineBlobsResult::MissingIndexPart(data) => data,
     379              :         }
     380            0 :     }
     381              : }
     382              : 
     383              : /// Returns [`ListTimelineBlobsResult::MissingIndexPart`] if blob data has layer files
     384              : /// but is missing [`IndexPart`], otherwise returns [`ListTimelineBlobsResult::Ready`].
     385            0 : async fn list_timeline_blobs_impl(
     386            0 :     remote_client: &GenericRemoteStorage,
     387            0 :     id: TenantShardTimelineId,
     388            0 :     root_target: &RootTarget,
     389            0 : ) -> anyhow::Result<ListTimelineBlobsResult> {
     390            0 :     let mut s3_layers = HashSet::new();
     391            0 : 
     392            0 :     let mut errors = Vec::new();
     393            0 :     let mut unknown_keys = Vec::new();
     394            0 : 
     395            0 :     let mut timeline_dir_target = root_target.timeline_root(&id);
     396            0 :     timeline_dir_target.delimiter = String::new();
     397            0 : 
     398            0 :     let mut index_part_keys: Vec<ListingObject> = Vec::new();
     399            0 :     let mut initdb_archive: bool = false;
     400            0 : 
     401            0 :     let prefix_str = &timeline_dir_target
     402            0 :         .prefix_in_bucket
     403            0 :         .strip_prefix("/")
     404            0 :         .unwrap_or(&timeline_dir_target.prefix_in_bucket);
     405            0 : 
     406            0 :     let mut stream = std::pin::pin!(stream_listing(remote_client, &timeline_dir_target));
     407            0 :     while let Some(obj) = stream.next().await {
     408            0 :         let (key, Some(obj)) = obj? else {
     409            0 :             panic!("ListingObject not specified");
     410              :         };
     411              : 
     412            0 :         let blob_name = key.get_path().as_str().strip_prefix(prefix_str);
     413            0 :         match blob_name {
     414            0 :             Some(name) if name.starts_with("index_part.json") => {
     415            0 :                 tracing::debug!("Index key {key}");
     416            0 :                 index_part_keys.push(obj)
     417              :             }
     418            0 :             Some("initdb.tar.zst") => {
     419            0 :                 tracing::debug!("initdb archive {key}");
     420            0 :                 initdb_archive = true;
     421              :             }
     422            0 :             Some("initdb-preserved.tar.zst") => {
     423            0 :                 tracing::info!("initdb archive preserved {key}");
     424              :             }
     425            0 :             Some(maybe_layer_name) => match parse_layer_object_name(maybe_layer_name) {
     426            0 :                 Ok((new_layer, gen)) => {
     427            0 :                     tracing::debug!("Parsed layer key: {new_layer} {gen:?}");
     428            0 :                     s3_layers.insert((new_layer, gen));
     429              :                 }
     430            0 :                 Err(e) => {
     431            0 :                     tracing::info!("Error parsing {maybe_layer_name} as layer name: {e}");
     432            0 :                     unknown_keys.push(obj);
     433              :                 }
     434              :             },
     435              :             None => {
     436            0 :                 tracing::info!("S3 listed an unknown key: {key}");
     437            0 :                 unknown_keys.push(obj);
     438              :             }
     439              :         }
     440              :     }
     441              : 
     442            0 :     if index_part_keys.is_empty() && s3_layers.is_empty() {
     443            0 :         tracing::debug!("Timeline is empty: expected post-deletion state.");
     444            0 :         if initdb_archive {
     445            0 :             tracing::info!("Timeline is post deletion but initdb archive is still present.");
     446            0 :         }
     447              : 
     448            0 :         return Ok(ListTimelineBlobsResult::Ready(RemoteTimelineBlobData {
     449            0 :             blob_data: BlobDataParseResult::Relic,
     450            0 :             unused_index_keys: index_part_keys,
     451            0 :             unknown_keys,
     452            0 :         }));
     453            0 :     }
     454              : 
     455              :     // Choose the index_part with the highest generation
     456            0 :     let (index_part_object, index_part_generation) = match index_part_keys
     457            0 :         .iter()
     458            0 :         .filter_map(|key| {
     459            0 :             // Stripping the index key to the last part, because RemotePath doesn't
     460            0 :             // like absolute paths, and depending on prefix_in_bucket it's possible
     461            0 :             // for the keys we read back to start with a slash.
     462            0 :             let basename = key.key.get_path().as_str().rsplit_once('/').unwrap().1;
     463            0 :             parse_remote_index_path(RemotePath::from_string(basename).unwrap()).map(|g| (key, g))
     464            0 :         })
     465            0 :         .max_by_key(|i| i.1)
     466            0 :         .map(|(k, g)| (k.clone(), g))
     467              :     {
     468            0 :         Some((key, gen)) => (Some::<ListingObject>(key.to_owned()), gen),
     469              :         None => {
     470              :             // Legacy/missing case: one or zero index parts, which did not have a generation
     471            0 :             (index_part_keys.pop(), Generation::none())
     472              :         }
     473              :     };
     474              : 
     475            0 :     match index_part_object.as_ref() {
     476            0 :         Some(selected) => index_part_keys.retain(|k| k != selected),
     477              :         None => {
     478              :             // This case does not indicate corruption, but it should be very unusual.  It can
     479              :             // happen if:
     480              :             // - timeline creation is in progress (first layer is written before index is written)
     481              :             // - timeline deletion happened while a stale pageserver was still attached, it might upload
     482              :             //   a layer after the deletion is done.
     483            0 :             tracing::info!(
     484            0 :                 "S3 list response got no index_part.json file but still has layer files"
     485              :             );
     486            0 :             return Ok(ListTimelineBlobsResult::Ready(RemoteTimelineBlobData {
     487            0 :                 blob_data: BlobDataParseResult::Relic,
     488            0 :                 unused_index_keys: index_part_keys,
     489            0 :                 unknown_keys,
     490            0 :             }));
     491              :         }
     492              :     }
     493              : 
     494            0 :     if let Some(index_part_object_key) = index_part_object.as_ref() {
     495            0 :         let (index_part_bytes, index_part_last_modified_time) =
     496            0 :             match download_object_with_retries(remote_client, &index_part_object_key.key).await {
     497            0 :                 Ok(data) => data,
     498            0 :                 Err(e) => {
     499            0 :                     // It is possible that the branch gets deleted in-between we list the objects
     500            0 :                     // and we download the index part file.
     501            0 :                     errors.push(format!("failed to download index_part.json: {e}"));
     502            0 :                     return Ok(ListTimelineBlobsResult::MissingIndexPart(
     503            0 :                         RemoteTimelineBlobData {
     504            0 :                             blob_data: BlobDataParseResult::Incorrect { errors, s3_layers },
     505            0 :                             unused_index_keys: index_part_keys,
     506            0 :                             unknown_keys,
     507            0 :                         },
     508            0 :                     ));
     509              :                 }
     510              :             };
     511            0 :         let index_part_snapshot_time = index_part_object_key.last_modified;
     512            0 :         match serde_json::from_slice(&index_part_bytes) {
     513            0 :             Ok(index_part) => {
     514            0 :                 return Ok(ListTimelineBlobsResult::Ready(RemoteTimelineBlobData {
     515            0 :                     blob_data: BlobDataParseResult::Parsed {
     516            0 :                         index_part: Box::new(index_part),
     517            0 :                         index_part_generation,
     518            0 :                         s3_layers,
     519            0 :                         index_part_last_modified_time,
     520            0 :                         index_part_snapshot_time,
     521            0 :                     },
     522            0 :                     unused_index_keys: index_part_keys,
     523            0 :                     unknown_keys,
     524            0 :                 }))
     525              :             }
     526            0 :             Err(index_parse_error) => errors.push(format!(
     527            0 :                 "index_part.json body parsing error: {index_parse_error}"
     528            0 :             )),
     529              :         }
     530            0 :     }
     531              : 
     532            0 :     if errors.is_empty() {
     533            0 :         errors.push(
     534            0 :             "Unexpected: no errors did not lead to a successfully parsed blob return".to_string(),
     535            0 :         );
     536            0 :     }
     537              : 
     538            0 :     Ok(ListTimelineBlobsResult::Ready(RemoteTimelineBlobData {
     539            0 :         blob_data: BlobDataParseResult::Incorrect { errors, s3_layers },
     540            0 :         unused_index_keys: index_part_keys,
     541            0 :         unknown_keys,
     542            0 :     }))
     543            0 : }
     544              : 
     545              : pub(crate) struct RemoteTenantManifestInfo {
     546              :     pub(crate) generation: Generation,
     547              :     pub(crate) manifest: TenantManifest,
     548              :     pub(crate) listing_object: ListingObject,
     549              : }
     550              : 
     551              : pub(crate) enum ListTenantManifestResult {
     552              :     WithErrors {
     553              :         errors: Vec<(String, String)>,
     554              :         #[allow(dead_code)]
     555              :         unknown_keys: Vec<ListingObject>,
     556              :     },
     557              :     NoErrors {
     558              :         latest_generation: Option<RemoteTenantManifestInfo>,
     559              :         manifests: Vec<(Generation, ListingObject)>,
     560              :     },
     561              : }
     562              : 
     563              : /// Lists the tenant manifests in remote storage and parses the latest one, returning a [`ListTenantManifestResult`] object.
     564            0 : pub(crate) async fn list_tenant_manifests(
     565            0 :     remote_client: &GenericRemoteStorage,
     566            0 :     tenant_id: TenantShardId,
     567            0 :     root_target: &RootTarget,
     568            0 : ) -> anyhow::Result<ListTenantManifestResult> {
     569            0 :     let mut errors = Vec::new();
     570            0 :     let mut unknown_keys = Vec::new();
     571            0 : 
     572            0 :     let mut tenant_root_target = root_target.tenant_root(&tenant_id);
     573            0 :     let original_prefix = tenant_root_target.prefix_in_bucket.clone();
     574              :     const TENANT_MANIFEST_STEM: &str = "tenant-manifest";
     575            0 :     tenant_root_target.prefix_in_bucket += TENANT_MANIFEST_STEM;
     576            0 :     tenant_root_target.delimiter = String::new();
     577            0 : 
     578            0 :     let mut manifests: Vec<(Generation, ListingObject)> = Vec::new();
     579            0 : 
     580            0 :     let prefix_str = &original_prefix
     581            0 :         .strip_prefix("/")
     582            0 :         .unwrap_or(&original_prefix);
     583            0 : 
     584            0 :     let mut stream = std::pin::pin!(stream_listing(remote_client, &tenant_root_target));
     585            0 :     'outer: while let Some(obj) = stream.next().await {
     586            0 :         let (key, Some(obj)) = obj? else {
     587            0 :             panic!("ListingObject not specified");
     588              :         };
     589              : 
     590              :         'err: {
     591              :             // TODO a let chain would be nicer here.
     592            0 :             let Some(name) = key.object_name() else {
     593            0 :                 break 'err;
     594              :             };
     595            0 :             if !name.starts_with(TENANT_MANIFEST_STEM) {
     596            0 :                 break 'err;
     597            0 :             }
     598            0 :             let Some(generation) = parse_remote_tenant_manifest_path(key.clone()) else {
     599            0 :                 break 'err;
     600              :             };
     601            0 :             tracing::debug!("tenant manifest {key}");
     602            0 :             manifests.push((generation, obj));
     603            0 :             continue 'outer;
     604              :         }
     605            0 :         tracing::info!("Listed an unknown key: {key}");
     606            0 :         unknown_keys.push(obj);
     607              :     }
     608              : 
     609            0 :     if !unknown_keys.is_empty() {
     610            0 :         errors.push(((*prefix_str).to_owned(), "unknown keys listed".to_string()));
     611            0 : 
     612            0 :         return Ok(ListTenantManifestResult::WithErrors {
     613            0 :             errors,
     614            0 :             unknown_keys,
     615            0 :         });
     616            0 :     }
     617            0 : 
     618            0 :     if manifests.is_empty() {
     619            0 :         tracing::debug!("No manifest for timeline.");
     620              : 
     621            0 :         return Ok(ListTenantManifestResult::NoErrors {
     622            0 :             latest_generation: None,
     623            0 :             manifests,
     624            0 :         });
     625            0 :     }
     626            0 : 
     627            0 :     // Find the manifest with the highest generation
     628            0 :     let (latest_generation, latest_listing_object) = manifests
     629            0 :         .iter()
     630            0 :         .max_by_key(|i| i.0)
     631            0 :         .map(|(g, obj)| (*g, obj.clone()))
     632            0 :         .unwrap();
     633            0 : 
     634            0 :     manifests.retain(|(gen, _obj)| gen != &latest_generation);
     635              : 
     636            0 :     let manifest_bytes =
     637            0 :         match download_object_with_retries(remote_client, &latest_listing_object.key).await {
     638            0 :             Ok((bytes, _)) => bytes,
     639            0 :             Err(e) => {
     640            0 :                 // It is possible that the tenant gets deleted in-between we list the objects
     641            0 :                 // and we download the manifest file.
     642            0 :                 errors.push((
     643            0 :                     latest_listing_object.key.get_path().as_str().to_owned(),
     644            0 :                     format!("failed to download tenant-manifest.json: {e}"),
     645            0 :                 ));
     646            0 :                 return Ok(ListTenantManifestResult::WithErrors {
     647            0 :                     errors,
     648            0 :                     unknown_keys,
     649            0 :                 });
     650              :             }
     651              :         };
     652              : 
     653            0 :     match TenantManifest::from_json_bytes(&manifest_bytes) {
     654            0 :         Ok(manifest) => {
     655            0 :             return Ok(ListTenantManifestResult::NoErrors {
     656            0 :                 latest_generation: Some(RemoteTenantManifestInfo {
     657            0 :                     generation: latest_generation,
     658            0 :                     manifest,
     659            0 :                     listing_object: latest_listing_object,
     660            0 :                 }),
     661            0 :                 manifests,
     662            0 :             });
     663              :         }
     664            0 :         Err(parse_error) => errors.push((
     665            0 :             latest_listing_object.key.get_path().as_str().to_owned(),
     666            0 :             format!("tenant-manifest.json body parsing error: {parse_error}"),
     667            0 :         )),
     668            0 :     }
     669            0 : 
     670            0 :     if errors.is_empty() {
     671            0 :         errors.push((
     672            0 :             (*prefix_str).to_owned(),
     673            0 :             "Unexpected: no errors did not lead to a successfully parsed blob return".to_string(),
     674            0 :         ));
     675            0 :     }
     676              : 
     677            0 :     Ok(ListTenantManifestResult::WithErrors {
     678            0 :         errors,
     679            0 :         unknown_keys,
     680            0 :     })
     681            0 : }
        

Generated by: LCOV version 2.1-beta