Line data Source code
1 : use std::collections::HashMap;
2 : use std::sync::Arc;
3 :
4 : use crate::checks::{list_timeline_blobs, BlobDataParseResult, S3TimelineBlobData};
5 : use crate::metadata_stream::{stream_tenant_shards, stream_tenant_timelines};
6 : use crate::{
7 : download_object_to_file, init_remote, BucketConfig, NodeKind, RootTarget, TenantShardTimelineId,
8 : };
9 : use anyhow::Context;
10 : use async_stream::stream;
11 : use aws_sdk_s3::Client;
12 : use camino::Utf8PathBuf;
13 : use futures::{StreamExt, TryStreamExt};
14 : use pageserver::tenant::remote_timeline_client::index::LayerFileMetadata;
15 : use pageserver::tenant::storage_layer::LayerName;
16 : use pageserver::tenant::IndexPart;
17 : use pageserver_api::shard::TenantShardId;
18 : use utils::generation::Generation;
19 : use utils::id::TenantId;
20 :
21 : pub struct SnapshotDownloader {
22 : s3_client: Arc<Client>,
23 : s3_root: RootTarget,
24 : bucket_config: BucketConfig,
25 : tenant_id: TenantId,
26 : output_path: Utf8PathBuf,
27 : concurrency: usize,
28 : }
29 :
30 : impl SnapshotDownloader {
31 0 : pub fn new(
32 0 : bucket_config: BucketConfig,
33 0 : tenant_id: TenantId,
34 0 : output_path: Utf8PathBuf,
35 0 : concurrency: usize,
36 0 : ) -> anyhow::Result<Self> {
37 0 : let (s3_client, s3_root) = init_remote(bucket_config.clone(), NodeKind::Pageserver)?;
38 0 : Ok(Self {
39 0 : s3_client,
40 0 : s3_root,
41 0 : bucket_config,
42 0 : tenant_id,
43 0 : output_path,
44 0 : concurrency,
45 0 : })
46 0 : }
47 :
48 0 : async fn download_layer(
49 0 : &self,
50 0 : ttid: TenantShardTimelineId,
51 0 : layer_name: LayerName,
52 0 : layer_metadata: LayerFileMetadata,
53 0 : ) -> anyhow::Result<(LayerName, LayerFileMetadata)> {
54 0 : // Note this is local as in a local copy of S3 data, not local as in the pageserver's local format. They use
55 0 : // different layer names (remote-style has the generation suffix)
56 0 : let local_path = self.output_path.join(format!(
57 0 : "{}/timelines/{}/{}{}",
58 0 : ttid.tenant_shard_id,
59 0 : ttid.timeline_id,
60 0 : layer_name,
61 0 : layer_metadata.generation.get_suffix()
62 0 : ));
63 0 :
64 0 : // We should only be called for layers that are owned by the input TTID
65 0 : assert_eq!(layer_metadata.shard, ttid.tenant_shard_id.to_index());
66 :
67 : // Assumption: we always write layer files atomically, and layer files are immutable. Therefore if the file
68 : // already exists on local disk, we assume it is fully correct and skip it.
69 0 : if tokio::fs::try_exists(&local_path).await? {
70 0 : tracing::debug!("{} already exists", local_path);
71 0 : return Ok((layer_name, layer_metadata));
72 : } else {
73 0 : tracing::debug!("{} requires download...", local_path);
74 :
75 0 : let timeline_root = self.s3_root.timeline_root(&ttid);
76 0 : let remote_layer_path = format!(
77 0 : "{}{}{}",
78 0 : timeline_root.prefix_in_bucket,
79 0 : layer_name,
80 0 : layer_metadata.generation.get_suffix()
81 0 : );
82 :
83 : // List versions: the object might be deleted.
84 0 : let versions = self
85 0 : .s3_client
86 0 : .list_object_versions()
87 0 : .bucket(self.bucket_config.bucket.clone())
88 0 : .prefix(&remote_layer_path)
89 0 : .send()
90 0 : .await?;
91 0 : let Some(version) = versions.versions.as_ref().and_then(|v| v.first()) else {
92 0 : return Err(anyhow::anyhow!("No versions found for {remote_layer_path}"));
93 : };
94 0 : download_object_to_file(
95 0 : &self.s3_client,
96 0 : &self.bucket_config.bucket,
97 0 : &remote_layer_path,
98 0 : version.version_id.as_deref(),
99 0 : &local_path,
100 0 : )
101 0 : .await?;
102 :
103 0 : tracing::debug!("Downloaded successfully to {local_path}");
104 : }
105 :
106 0 : Ok((layer_name, layer_metadata))
107 0 : }
108 :
109 : /// Download many layers belonging to the same TTID, with some concurrency
110 0 : async fn download_layers(
111 0 : &self,
112 0 : ttid: TenantShardTimelineId,
113 0 : layers: Vec<(LayerName, LayerFileMetadata)>,
114 0 : ) -> anyhow::Result<()> {
115 0 : let layer_count = layers.len();
116 0 : tracing::info!("Downloading {} layers for timeline {ttid}...", layer_count);
117 0 : let layers_stream = stream! {
118 : for (layer_name, layer_metadata) in layers {
119 : yield self.download_layer(ttid, layer_name, layer_metadata);
120 : }
121 : };
122 :
123 0 : tokio::fs::create_dir_all(self.output_path.join(format!(
124 0 : "{}/timelines/{}",
125 0 : ttid.tenant_shard_id, ttid.timeline_id
126 0 : )))
127 0 : .await?;
128 :
129 0 : let layer_results = layers_stream.buffered(self.concurrency);
130 0 : let mut layer_results = std::pin::pin!(layer_results);
131 0 :
132 0 : let mut err = None;
133 0 : let mut download_count = 0;
134 0 : while let Some(i) = layer_results.next().await {
135 0 : download_count += 1;
136 0 : match i {
137 0 : Ok((layer_name, layer_metadata)) => {
138 0 : tracing::info!(
139 0 : "[{download_count}/{layer_count}] OK: {} bytes {ttid} {}",
140 : layer_metadata.file_size,
141 : layer_name
142 : );
143 : }
144 0 : Err(e) => {
145 0 : // Warn and continue: we will download what we can
146 0 : tracing::warn!("Download error: {e}");
147 0 : err = Some(e);
148 : }
149 : }
150 : }
151 0 : if let Some(e) = err {
152 0 : tracing::warn!("Some errors occurred downloading {ttid} layers, last error: {e}");
153 0 : Err(e)
154 : } else {
155 0 : Ok(())
156 : }
157 0 : }
158 :
159 0 : async fn download_timeline(
160 0 : &self,
161 0 : ttid: TenantShardTimelineId,
162 0 : index_part: Box<IndexPart>,
163 0 : index_part_generation: Generation,
164 0 : ancestor_layers: &mut HashMap<TenantShardTimelineId, HashMap<LayerName, LayerFileMetadata>>,
165 0 : ) -> anyhow::Result<()> {
166 0 : let index_bytes = serde_json::to_string(&index_part).unwrap();
167 0 :
168 0 : let layers = index_part
169 0 : .layer_metadata
170 0 : .into_iter()
171 0 : .filter_map(|(layer_name, layer_metadata)| {
172 0 : if layer_metadata.shard.shard_count != ttid.tenant_shard_id.shard_count {
173 : // Accumulate ancestor layers for later download
174 0 : let ancestor_ttid = TenantShardTimelineId::new(
175 0 : TenantShardId {
176 0 : tenant_id: ttid.tenant_shard_id.tenant_id,
177 0 : shard_number: layer_metadata.shard.shard_number,
178 0 : shard_count: layer_metadata.shard.shard_count,
179 0 : },
180 0 : ttid.timeline_id,
181 0 : );
182 0 : let ancestor_ttid_layers = ancestor_layers.entry(ancestor_ttid).or_default();
183 0 : use std::collections::hash_map::Entry;
184 0 : match ancestor_ttid_layers.entry(layer_name) {
185 0 : Entry::Occupied(entry) => {
186 0 : // Descendent shards that reference a layer from an ancestor should always have matching metadata,
187 0 : // as their siblings, because it is read atomically during a shard split.
188 0 : assert_eq!(entry.get(), &layer_metadata);
189 : }
190 0 : Entry::Vacant(entry) => {
191 0 : entry.insert(layer_metadata);
192 0 : }
193 : }
194 0 : None
195 : } else {
196 0 : Some((layer_name, layer_metadata))
197 : }
198 0 : })
199 0 : .collect();
200 :
201 0 : let download_result = self.download_layers(ttid, layers).await;
202 :
203 : // Write index last, once all the layers it references are downloaded
204 0 : let local_index_path = self.output_path.join(format!(
205 0 : "{}/timelines/{}/index_part.json{}",
206 0 : ttid.tenant_shard_id,
207 0 : ttid.timeline_id,
208 0 : index_part_generation.get_suffix()
209 0 : ));
210 0 : tokio::fs::write(&local_index_path, index_bytes)
211 0 : .await
212 0 : .context("writing index")?;
213 :
214 0 : download_result
215 0 : }
216 :
217 0 : pub async fn download(&self) -> anyhow::Result<()> {
218 0 : let (s3_client, target) = init_remote(self.bucket_config.clone(), NodeKind::Pageserver)?;
219 :
220 : // Generate a stream of TenantShardId
221 0 : let shards = stream_tenant_shards(&s3_client, &target, self.tenant_id).await?;
222 0 : let shards: Vec<TenantShardId> = shards.try_collect().await?;
223 :
224 : // Only read from shards that have the highest count: avoids redundantly downloading
225 : // from ancestor shards.
226 0 : let Some(shard_count) = shards.iter().map(|s| s.shard_count).max() else {
227 0 : anyhow::bail!("No shards found");
228 : };
229 :
230 : // We will build a collection of layers in anccestor shards to download (this will only
231 : // happen if this tenant has been split at some point)
232 0 : let mut ancestor_layers: HashMap<
233 0 : TenantShardTimelineId,
234 0 : HashMap<LayerName, LayerFileMetadata>,
235 0 : > = Default::default();
236 :
237 0 : for shard in shards.into_iter().filter(|s| s.shard_count == shard_count) {
238 : // Generate a stream of TenantTimelineId
239 0 : let timelines = stream_tenant_timelines(&s3_client, &self.s3_root, shard).await?;
240 :
241 : // Generate a stream of S3TimelineBlobData
242 0 : async fn load_timeline_index(
243 0 : s3_client: &Client,
244 0 : target: &RootTarget,
245 0 : ttid: TenantShardTimelineId,
246 0 : ) -> anyhow::Result<(TenantShardTimelineId, S3TimelineBlobData)> {
247 0 : let data = list_timeline_blobs(s3_client, ttid, target).await?;
248 0 : Ok((ttid, data))
249 0 : }
250 0 : let timelines = timelines.map_ok(|ttid| load_timeline_index(&s3_client, &target, ttid));
251 0 : let mut timelines = std::pin::pin!(timelines.try_buffered(8));
252 :
253 0 : while let Some(i) = timelines.next().await {
254 0 : let (ttid, data) = i?;
255 0 : match data.blob_data {
256 : BlobDataParseResult::Parsed {
257 0 : index_part,
258 0 : index_part_generation,
259 0 : s3_layers: _,
260 0 : } => {
261 0 : self.download_timeline(
262 0 : ttid,
263 0 : index_part,
264 0 : index_part_generation,
265 0 : &mut ancestor_layers,
266 0 : )
267 0 : .await
268 0 : .context("Downloading timeline")?;
269 : }
270 0 : BlobDataParseResult::Relic => {}
271 : BlobDataParseResult::Incorrect(_) => {
272 0 : tracing::error!("Bad metadata in timeline {ttid}");
273 : }
274 : };
275 : }
276 : }
277 :
278 0 : for (ttid, layers) in ancestor_layers.into_iter() {
279 0 : tracing::info!(
280 0 : "Downloading {} layers from ancvestor timeline {ttid}...",
281 0 : layers.len()
282 : );
283 :
284 0 : self.download_layers(ttid, layers.into_iter().collect())
285 0 : .await?;
286 : }
287 :
288 0 : Ok(())
289 0 : }
290 : }
|