Line data Source code
1 : use std::collections::HashMap;
2 : use std::sync::Arc;
3 :
4 : use crate::checks::{list_timeline_blobs, BlobDataParseResult, S3TimelineBlobData};
5 : use crate::metadata_stream::{stream_tenant_shards, stream_tenant_timelines};
6 : use crate::{
7 : download_object_to_file, init_remote, BucketConfig, NodeKind, RootTarget, TenantShardTimelineId,
8 : };
9 : use anyhow::Context;
10 : use async_stream::stream;
11 : use aws_sdk_s3::Client;
12 : use camino::Utf8PathBuf;
13 : use futures::{StreamExt, TryStreamExt};
14 : use pageserver::tenant::remote_timeline_client::index::IndexLayerMetadata;
15 : use pageserver::tenant::storage_layer::LayerName;
16 : use pageserver::tenant::IndexPart;
17 : use pageserver_api::shard::TenantShardId;
18 : use utils::generation::Generation;
19 : use utils::id::TenantId;
20 :
21 : pub struct SnapshotDownloader {
22 : s3_client: Arc<Client>,
23 : s3_root: RootTarget,
24 : bucket_config: BucketConfig,
25 : tenant_id: TenantId,
26 : output_path: Utf8PathBuf,
27 : concurrency: usize,
28 : }
29 :
30 : impl SnapshotDownloader {
31 0 : pub fn new(
32 0 : bucket_config: BucketConfig,
33 0 : tenant_id: TenantId,
34 0 : output_path: Utf8PathBuf,
35 0 : concurrency: usize,
36 0 : ) -> anyhow::Result<Self> {
37 0 : let (s3_client, s3_root) = init_remote(bucket_config.clone(), NodeKind::Pageserver)?;
38 0 : Ok(Self {
39 0 : s3_client,
40 0 : s3_root,
41 0 : bucket_config,
42 0 : tenant_id,
43 0 : output_path,
44 0 : concurrency,
45 0 : })
46 0 : }
47 :
48 0 : async fn download_layer(
49 0 : &self,
50 0 : ttid: TenantShardTimelineId,
51 0 : layer_name: LayerName,
52 0 : layer_metadata: IndexLayerMetadata,
53 0 : ) -> anyhow::Result<(LayerName, IndexLayerMetadata)> {
54 0 : // Note this is local as in a local copy of S3 data, not local as in the pageserver's local format. They use
55 0 : // different layer names (remote-style has the generation suffix)
56 0 : let local_path = self.output_path.join(format!(
57 0 : "{}/timelines/{}/{}{}",
58 0 : ttid.tenant_shard_id,
59 0 : ttid.timeline_id,
60 0 : layer_name,
61 0 : layer_metadata.generation.get_suffix()
62 0 : ));
63 0 :
64 0 : // We should only be called for layers that are owned by the input TTID
65 0 : assert_eq!(layer_metadata.shard, ttid.tenant_shard_id.to_index());
66 :
67 : // Assumption: we always write layer files atomically, and layer files are immutable. Therefore if the file
68 : // already exists on local disk, we assume it is fully correct and skip it.
69 0 : if tokio::fs::try_exists(&local_path).await? {
70 0 : tracing::debug!("{} already exists", local_path);
71 0 : return Ok((layer_name, layer_metadata));
72 : } else {
73 0 : tracing::debug!("{} requires download...", local_path);
74 :
75 0 : let timeline_root = self.s3_root.timeline_root(&ttid);
76 0 : let remote_layer_path = format!(
77 0 : "{}{}{}",
78 0 : timeline_root.prefix_in_bucket,
79 0 : layer_name,
80 0 : layer_metadata.generation.get_suffix()
81 0 : );
82 :
83 : // List versions: the object might be deleted.
84 0 : let versions = self
85 0 : .s3_client
86 0 : .list_object_versions()
87 0 : .bucket(self.bucket_config.bucket.clone())
88 0 : .prefix(&remote_layer_path)
89 0 : .send()
90 0 : .await?;
91 0 : let Some(version) = versions.versions.as_ref().and_then(|v| v.first()) else {
92 0 : return Err(anyhow::anyhow!("No versions found for {remote_layer_path}"));
93 : };
94 0 : download_object_to_file(
95 0 : &self.s3_client,
96 0 : &self.bucket_config.bucket,
97 0 : &remote_layer_path,
98 0 : version.version_id.as_deref(),
99 0 : &local_path,
100 0 : )
101 0 : .await?;
102 :
103 0 : tracing::debug!("Downloaded successfully to {local_path}");
104 : }
105 :
106 0 : Ok((layer_name, layer_metadata))
107 0 : }
108 :
109 : /// Download many layers belonging to the same TTID, with some concurrency
110 0 : async fn download_layers(
111 0 : &self,
112 0 : ttid: TenantShardTimelineId,
113 0 : layers: Vec<(LayerName, IndexLayerMetadata)>,
114 0 : ) -> anyhow::Result<()> {
115 0 : let layer_count = layers.len();
116 0 : tracing::info!("Downloading {} layers for timeline {ttid}...", layer_count);
117 0 : let layers_stream = stream! {
118 : for (layer_name, layer_metadata) in layers {
119 : yield self.download_layer(ttid, layer_name, layer_metadata);
120 : }
121 : };
122 :
123 0 : tokio::fs::create_dir_all(self.output_path.join(format!(
124 0 : "{}/timelines/{}",
125 0 : ttid.tenant_shard_id, ttid.timeline_id
126 0 : )))
127 0 : .await?;
128 :
129 0 : let layer_results = layers_stream.buffered(self.concurrency);
130 0 : let mut layer_results = std::pin::pin!(layer_results);
131 0 :
132 0 : let mut err = None;
133 0 : let mut download_count = 0;
134 0 : while let Some(i) = layer_results.next().await {
135 0 : download_count += 1;
136 0 : match i {
137 0 : Ok((layer_name, layer_metadata)) => {
138 0 : tracing::info!(
139 0 : "[{download_count}/{layer_count}] OK: {} bytes {ttid} {}",
140 : layer_metadata.file_size,
141 : layer_name
142 : );
143 : }
144 0 : Err(e) => {
145 0 : // Warn and continue: we will download what we can
146 0 : tracing::warn!("Download error: {e}");
147 0 : err = Some(e);
148 : }
149 : }
150 : }
151 0 : if let Some(e) = err {
152 0 : tracing::warn!("Some errors occurred downloading {ttid} layers, last error: {e}");
153 0 : Err(e)
154 : } else {
155 0 : Ok(())
156 : }
157 0 : }
158 :
159 0 : async fn download_timeline(
160 0 : &self,
161 0 : ttid: TenantShardTimelineId,
162 0 : index_part: IndexPart,
163 0 : index_part_generation: Generation,
164 0 : ancestor_layers: &mut HashMap<
165 0 : TenantShardTimelineId,
166 0 : HashMap<LayerName, IndexLayerMetadata>,
167 0 : >,
168 0 : ) -> anyhow::Result<()> {
169 0 : let index_bytes = serde_json::to_string(&index_part).unwrap();
170 0 :
171 0 : let layers = index_part
172 0 : .layer_metadata
173 0 : .into_iter()
174 0 : .filter_map(|(layer_name, layer_metadata)| {
175 0 : if layer_metadata.shard.shard_count != ttid.tenant_shard_id.shard_count {
176 : // Accumulate ancestor layers for later download
177 0 : let ancestor_ttid = TenantShardTimelineId::new(
178 0 : TenantShardId {
179 0 : tenant_id: ttid.tenant_shard_id.tenant_id,
180 0 : shard_number: layer_metadata.shard.shard_number,
181 0 : shard_count: layer_metadata.shard.shard_count,
182 0 : },
183 0 : ttid.timeline_id,
184 0 : );
185 0 : let ancestor_ttid_layers = ancestor_layers.entry(ancestor_ttid).or_default();
186 0 : use std::collections::hash_map::Entry;
187 0 : match ancestor_ttid_layers.entry(layer_name) {
188 0 : Entry::Occupied(entry) => {
189 0 : // Descendent shards that reference a layer from an ancestor should always have matching metadata,
190 0 : // as their siblings, because it is read atomically during a shard split.
191 0 : assert_eq!(entry.get(), &layer_metadata);
192 : }
193 0 : Entry::Vacant(entry) => {
194 0 : entry.insert(layer_metadata);
195 0 : }
196 : }
197 0 : None
198 : } else {
199 0 : Some((layer_name, layer_metadata))
200 : }
201 0 : })
202 0 : .collect();
203 :
204 0 : let download_result = self.download_layers(ttid, layers).await;
205 :
206 : // Write index last, once all the layers it references are downloaded
207 0 : let local_index_path = self.output_path.join(format!(
208 0 : "{}/timelines/{}/index_part.json{}",
209 0 : ttid.tenant_shard_id,
210 0 : ttid.timeline_id,
211 0 : index_part_generation.get_suffix()
212 0 : ));
213 0 : tokio::fs::write(&local_index_path, index_bytes)
214 0 : .await
215 0 : .context("writing index")?;
216 :
217 0 : download_result
218 0 : }
219 :
220 0 : pub async fn download(&self) -> anyhow::Result<()> {
221 0 : let (s3_client, target) = init_remote(self.bucket_config.clone(), NodeKind::Pageserver)?;
222 :
223 : // Generate a stream of TenantShardId
224 0 : let shards = stream_tenant_shards(&s3_client, &target, self.tenant_id).await?;
225 0 : let shards: Vec<TenantShardId> = shards.try_collect().await?;
226 :
227 : // Only read from shards that have the highest count: avoids redundantly downloading
228 : // from ancestor shards.
229 0 : let Some(shard_count) = shards.iter().map(|s| s.shard_count).max() else {
230 0 : anyhow::bail!("No shards found");
231 : };
232 :
233 : // We will build a collection of layers in anccestor shards to download (this will only
234 : // happen if this tenant has been split at some point)
235 0 : let mut ancestor_layers: HashMap<
236 0 : TenantShardTimelineId,
237 0 : HashMap<LayerName, IndexLayerMetadata>,
238 0 : > = Default::default();
239 :
240 0 : for shard in shards.into_iter().filter(|s| s.shard_count == shard_count) {
241 : // Generate a stream of TenantTimelineId
242 0 : let timelines = stream_tenant_timelines(&s3_client, &self.s3_root, shard).await?;
243 :
244 : // Generate a stream of S3TimelineBlobData
245 0 : async fn load_timeline_index(
246 0 : s3_client: &Client,
247 0 : target: &RootTarget,
248 0 : ttid: TenantShardTimelineId,
249 0 : ) -> anyhow::Result<(TenantShardTimelineId, S3TimelineBlobData)> {
250 0 : let data = list_timeline_blobs(s3_client, ttid, target).await?;
251 0 : Ok((ttid, data))
252 0 : }
253 0 : let timelines = timelines.map_ok(|ttid| load_timeline_index(&s3_client, &target, ttid));
254 0 : let mut timelines = std::pin::pin!(timelines.try_buffered(8));
255 :
256 0 : while let Some(i) = timelines.next().await {
257 0 : let (ttid, data) = i?;
258 0 : match data.blob_data {
259 : BlobDataParseResult::Parsed {
260 0 : index_part,
261 0 : index_part_generation,
262 0 : s3_layers: _,
263 0 : } => {
264 0 : self.download_timeline(
265 0 : ttid,
266 0 : index_part,
267 0 : index_part_generation,
268 0 : &mut ancestor_layers,
269 0 : )
270 0 : .await
271 0 : .context("Downloading timeline")?;
272 : }
273 0 : BlobDataParseResult::Relic => {}
274 : BlobDataParseResult::Incorrect(_) => {
275 0 : tracing::error!("Bad metadata in timeline {ttid}");
276 : }
277 : };
278 : }
279 : }
280 :
281 0 : for (ttid, layers) in ancestor_layers.into_iter() {
282 0 : tracing::info!(
283 0 : "Downloading {} layers from ancvestor timeline {ttid}...",
284 0 : layers.len()
285 : );
286 :
287 0 : self.download_layers(ttid, layers.into_iter().collect())
288 0 : .await?;
289 : }
290 :
291 0 : Ok(())
292 0 : }
293 : }
|