Line data Source code
1 : //! Helper functions to download files from remote storage with a RemoteStorage
2 : //!
3 : //! The functions in this module retry failed operations automatically, according
4 : //! to the FAILED_DOWNLOAD_RETRIES constant.
5 :
6 : use std::collections::HashSet;
7 : use std::future::Future;
8 : use std::str::FromStr;
9 : use std::time::SystemTime;
10 :
11 : use anyhow::{anyhow, Context};
12 : use camino::{Utf8Path, Utf8PathBuf};
13 : use pageserver_api::shard::TenantShardId;
14 : use tokio::fs::{self, File, OpenOptions};
15 : use tokio::io::{AsyncSeekExt, AsyncWriteExt};
16 : use tokio_util::io::StreamReader;
17 : use tokio_util::sync::CancellationToken;
18 : use tracing::warn;
19 : use utils::backoff;
20 :
21 : use crate::config::PageServerConf;
22 : use crate::context::RequestContext;
23 : use crate::span::{
24 : debug_assert_current_span_has_tenant_and_timeline_id, debug_assert_current_span_has_tenant_id,
25 : };
26 : use crate::tenant::remote_timeline_client::{remote_layer_path, remote_timelines_path};
27 : use crate::tenant::storage_layer::LayerName;
28 : use crate::tenant::Generation;
29 : use crate::virtual_file::{on_fatal_io_error, MaybeFatalIo, VirtualFile};
30 : use crate::TEMP_FILE_SUFFIX;
31 : use remote_storage::{
32 : DownloadError, DownloadKind, DownloadOpts, GenericRemoteStorage, ListingMode, RemotePath,
33 : };
34 : use utils::crashsafe::path_with_suffix_extension;
35 : use utils::id::{TenantId, TimelineId};
36 : use utils::pausable_failpoint;
37 :
38 : use super::index::{IndexPart, LayerFileMetadata};
39 : use super::manifest::TenantManifest;
40 : use super::{
41 : parse_remote_index_path, parse_remote_tenant_manifest_path, remote_index_path,
42 : remote_initdb_archive_path, remote_initdb_preserved_archive_path, remote_tenant_manifest_path,
43 : remote_tenant_manifest_prefix, remote_tenant_path, FAILED_DOWNLOAD_WARN_THRESHOLD,
44 : FAILED_REMOTE_OP_RETRIES, INITDB_PATH,
45 : };
46 :
47 : ///
48 : /// If 'metadata' is given, we will validate that the downloaded file's size matches that
49 : /// in the metadata. (In the future, we might do more cross-checks, like CRC validation)
50 : ///
51 : /// Returns the size of the downloaded file.
52 : #[allow(clippy::too_many_arguments)]
53 6 : pub async fn download_layer_file<'a>(
54 6 : conf: &'static PageServerConf,
55 6 : storage: &'a GenericRemoteStorage,
56 6 : tenant_shard_id: TenantShardId,
57 6 : timeline_id: TimelineId,
58 6 : layer_file_name: &'a LayerName,
59 6 : layer_metadata: &'a LayerFileMetadata,
60 6 : local_path: &Utf8Path,
61 6 : gate: &utils::sync::gate::Gate,
62 6 : cancel: &CancellationToken,
63 6 : ctx: &RequestContext,
64 6 : ) -> Result<u64, DownloadError> {
65 6 : debug_assert_current_span_has_tenant_and_timeline_id();
66 6 :
67 6 : let timeline_path = conf.timeline_path(&tenant_shard_id, &timeline_id);
68 6 :
69 6 : let remote_path = remote_layer_path(
70 6 : &tenant_shard_id.tenant_id,
71 6 : &timeline_id,
72 6 : layer_metadata.shard,
73 6 : layer_file_name,
74 6 : layer_metadata.generation,
75 6 : );
76 6 :
77 6 : // Perform a rename inspired by durable_rename from file_utils.c.
78 6 : // The sequence:
79 6 : // write(tmp)
80 6 : // fsync(tmp)
81 6 : // rename(tmp, new)
82 6 : // fsync(new)
83 6 : // fsync(parent)
84 6 : // For more context about durable_rename check this email from postgres mailing list:
85 6 : // https://www.postgresql.org/message-id/56583BDD.9060302@2ndquadrant.com
86 6 : // If pageserver crashes the temp file will be deleted on startup and re-downloaded.
87 6 : let temp_file_path = path_with_suffix_extension(local_path, TEMP_DOWNLOAD_EXTENSION);
88 :
89 6 : let bytes_amount = download_retry(
90 6 : || async {
91 6 : download_object(storage, &remote_path, &temp_file_path, gate, cancel, ctx).await
92 12 : },
93 6 : &format!("download {remote_path:?}"),
94 6 : cancel,
95 6 : )
96 6 : .await?;
97 :
98 6 : let expected = layer_metadata.file_size;
99 6 : if expected != bytes_amount {
100 0 : return Err(DownloadError::Other(anyhow!(
101 0 : "According to layer file metadata should have downloaded {expected} bytes but downloaded {bytes_amount} bytes into file {temp_file_path:?}",
102 0 : )));
103 6 : }
104 6 :
105 6 : fail::fail_point!("remote-storage-download-pre-rename", |_| {
106 0 : Err(DownloadError::Other(anyhow!(
107 0 : "remote-storage-download-pre-rename failpoint triggered"
108 0 : )))
109 6 : });
110 :
111 6 : fs::rename(&temp_file_path, &local_path)
112 6 : .await
113 6 : .with_context(|| format!("rename download layer file to {local_path}"))
114 6 : .map_err(DownloadError::Other)?;
115 :
116 : // We use fatal_err() below because the after the rename above,
117 : // the in-memory state of the filesystem already has the layer file in its final place,
118 : // and subsequent pageserver code could think it's durable while it really isn't.
119 6 : let work = {
120 6 : let ctx = ctx.detached_child(ctx.task_kind(), ctx.download_behavior());
121 6 : async move {
122 6 : let timeline_dir = VirtualFile::open(&timeline_path, &ctx)
123 6 : .await
124 6 : .fatal_err("VirtualFile::open for timeline dir fsync");
125 6 : timeline_dir
126 6 : .sync_all()
127 6 : .await
128 6 : .fatal_err("VirtualFile::sync_all timeline dir");
129 6 : }
130 : };
131 6 : crate::virtual_file::io_engine::get()
132 6 : .spawn_blocking_and_block_on_if_std(work)
133 6 : .await;
134 :
135 6 : tracing::debug!("download complete: {local_path}");
136 :
137 6 : Ok(bytes_amount)
138 6 : }
139 :
140 : /// Download the object `src_path` in the remote `storage` to local path `dst_path`.
141 : ///
142 : /// If Ok() is returned, the download succeeded and the inode & data have been made durable.
143 : /// (Note that the directory entry for the inode is not made durable.)
144 : /// The file size in bytes is returned.
145 : ///
146 : /// If Err() is returned, there was some error. The file at `dst_path` has been unlinked.
147 : /// The unlinking has _not_ been made durable.
148 6 : async fn download_object(
149 6 : storage: &GenericRemoteStorage,
150 6 : src_path: &RemotePath,
151 6 : dst_path: &Utf8PathBuf,
152 6 : #[cfg_attr(target_os = "macos", allow(unused_variables))] gate: &utils::sync::gate::Gate,
153 6 : cancel: &CancellationToken,
154 6 : #[cfg_attr(target_os = "macos", allow(unused_variables))] ctx: &RequestContext,
155 6 : ) -> Result<u64, DownloadError> {
156 6 : let res = match crate::virtual_file::io_engine::get() {
157 0 : crate::virtual_file::io_engine::IoEngine::NotSet => panic!("unset"),
158 : crate::virtual_file::io_engine::IoEngine::StdFs => {
159 3 : async {
160 3 : let destination_file = tokio::fs::File::create(dst_path)
161 3 : .await
162 3 : .with_context(|| format!("create a destination file for layer '{dst_path}'"))
163 3 : .map_err(DownloadError::Other)?;
164 :
165 3 : let download = storage
166 3 : .download(src_path, &DownloadOpts::default(), cancel)
167 3 : .await?;
168 :
169 3 : pausable_failpoint!("before-downloading-layer-stream-pausable");
170 :
171 3 : let mut buf_writer =
172 3 : tokio::io::BufWriter::with_capacity(super::BUFFER_SIZE, destination_file);
173 3 :
174 3 : let mut reader = tokio_util::io::StreamReader::new(download.download_stream);
175 :
176 3 : let bytes_amount = tokio::io::copy_buf(&mut reader, &mut buf_writer).await?;
177 3 : buf_writer.flush().await?;
178 :
179 3 : let mut destination_file = buf_writer.into_inner();
180 3 :
181 3 : // Tokio doc here: https://docs.rs/tokio/1.17.0/tokio/fs/struct.File.html states that:
182 3 : // A file will not be closed immediately when it goes out of scope if there are any IO operations
183 3 : // that have not yet completed. To ensure that a file is closed immediately when it is dropped,
184 3 : // you should call flush before dropping it.
185 3 : //
186 3 : // From the tokio code I see that it waits for pending operations to complete. There shouldt be any because
187 3 : // we assume that `destination_file` file is fully written. I e there is no pending .write(...).await operations.
188 3 : // But for additional safety lets check/wait for any pending operations.
189 3 : destination_file
190 3 : .flush()
191 3 : .await
192 3 : .maybe_fatal_err("download_object sync_all")
193 3 : .with_context(|| format!("flush source file at {dst_path}"))
194 3 : .map_err(DownloadError::Other)?;
195 :
196 : // not using sync_data because it can lose file size update
197 3 : destination_file
198 3 : .sync_all()
199 3 : .await
200 3 : .maybe_fatal_err("download_object sync_all")
201 3 : .with_context(|| format!("failed to fsync source file at {dst_path}"))
202 3 : .map_err(DownloadError::Other)?;
203 :
204 3 : Ok(bytes_amount)
205 3 : }
206 3 : .await
207 : }
208 : #[cfg(target_os = "linux")]
209 : crate::virtual_file::io_engine::IoEngine::TokioEpollUring => {
210 : use crate::virtual_file::owned_buffers_io;
211 : use crate::virtual_file::IoBufferMut;
212 : use std::sync::Arc;
213 3 : async {
214 3 : let destination_file = Arc::new(
215 3 : VirtualFile::create(dst_path, ctx)
216 3 : .await
217 3 : .with_context(|| {
218 0 : format!("create a destination file for layer '{dst_path}'")
219 3 : })
220 3 : .map_err(DownloadError::Other)?,
221 : );
222 :
223 3 : let mut download = storage
224 3 : .download(src_path, &DownloadOpts::default(), cancel)
225 3 : .await?;
226 :
227 3 : pausable_failpoint!("before-downloading-layer-stream-pausable");
228 :
229 3 : let mut buffered = owned_buffers_io::write::BufferedWriter::<IoBufferMut, _>::new(
230 3 : destination_file,
231 6 : || IoBufferMut::with_capacity(super::BUFFER_SIZE),
232 3 : gate.enter().map_err(|_| DownloadError::Cancelled)?,
233 3 : ctx,
234 : );
235 :
236 : // TODO: use vectored write (writev) once supported by tokio-epoll-uring.
237 : // There's chunks_vectored() on the stream.
238 3 : let (bytes_amount, destination_file) = async {
239 18 : while let Some(res) =
240 21 : futures::StreamExt::next(&mut download.download_stream).await
241 : {
242 18 : let chunk = match res {
243 18 : Ok(chunk) => chunk,
244 0 : Err(e) => return Err(e),
245 : };
246 18 : buffered.write_buffered_borrowed(&chunk, ctx).await?;
247 : }
248 3 : let inner = buffered.flush_and_into_inner(ctx).await?;
249 3 : Ok(inner)
250 3 : }
251 3 : .await?;
252 :
253 : // not using sync_data because it can lose file size update
254 3 : destination_file
255 3 : .sync_all()
256 3 : .await
257 3 : .maybe_fatal_err("download_object sync_all")
258 3 : .with_context(|| format!("failed to fsync source file at {dst_path}"))
259 3 : .map_err(DownloadError::Other)?;
260 :
261 3 : Ok(bytes_amount)
262 3 : }
263 3 : .await
264 : }
265 : };
266 :
267 : // in case the download failed, clean up
268 6 : match res {
269 6 : Ok(bytes_amount) => Ok(bytes_amount),
270 0 : Err(e) => {
271 0 : if let Err(e) = tokio::fs::remove_file(dst_path).await {
272 0 : if e.kind() != std::io::ErrorKind::NotFound {
273 0 : on_fatal_io_error(&e, &format!("Removing temporary file {dst_path}"));
274 0 : }
275 0 : }
276 0 : Err(e)
277 : }
278 : }
279 6 : }
280 :
281 : const TEMP_DOWNLOAD_EXTENSION: &str = "temp_download";
282 :
283 0 : pub(crate) fn is_temp_download_file(path: &Utf8Path) -> bool {
284 0 : let extension = path.extension();
285 0 : match extension {
286 0 : Some(TEMP_DOWNLOAD_EXTENSION) => true,
287 0 : Some(_) => false,
288 0 : None => false,
289 : }
290 0 : }
291 :
292 196 : async fn list_identifiers<T>(
293 196 : storage: &GenericRemoteStorage,
294 196 : prefix: RemotePath,
295 196 : cancel: CancellationToken,
296 196 : ) -> anyhow::Result<(HashSet<T>, HashSet<String>)>
297 196 : where
298 196 : T: FromStr + Eq + std::hash::Hash,
299 196 : {
300 196 : let listing = download_retry_forever(
301 196 : || storage.list(Some(&prefix), ListingMode::WithDelimiter, None, &cancel),
302 196 : &format!("list identifiers in prefix {prefix}"),
303 196 : &cancel,
304 196 : )
305 196 : .await?;
306 :
307 196 : let mut parsed_ids = HashSet::new();
308 196 : let mut other_prefixes = HashSet::new();
309 :
310 202 : for id_remote_storage_key in listing.prefixes {
311 6 : let object_name = id_remote_storage_key.object_name().ok_or_else(|| {
312 0 : anyhow::anyhow!("failed to get object name for key {id_remote_storage_key}")
313 6 : })?;
314 :
315 6 : match object_name.parse::<T>() {
316 6 : Ok(t) => parsed_ids.insert(t),
317 0 : Err(_) => other_prefixes.insert(object_name.to_string()),
318 : };
319 : }
320 :
321 196 : for object in listing.keys {
322 0 : let object_name = object
323 0 : .key
324 0 : .object_name()
325 0 : .ok_or_else(|| anyhow::anyhow!("object name for key {}", object.key))?;
326 0 : other_prefixes.insert(object_name.to_string());
327 : }
328 :
329 196 : Ok((parsed_ids, other_prefixes))
330 196 : }
331 :
332 : /// List shards of given tenant in remote storage
333 0 : pub(crate) async fn list_remote_tenant_shards(
334 0 : storage: &GenericRemoteStorage,
335 0 : tenant_id: TenantId,
336 0 : cancel: CancellationToken,
337 0 : ) -> anyhow::Result<(HashSet<TenantShardId>, HashSet<String>)> {
338 0 : let remote_path = remote_tenant_path(&TenantShardId::unsharded(tenant_id));
339 0 : list_identifiers::<TenantShardId>(storage, remote_path, cancel).await
340 0 : }
341 :
342 : /// List timelines of given tenant shard in remote storage
343 196 : pub async fn list_remote_timelines(
344 196 : storage: &GenericRemoteStorage,
345 196 : tenant_shard_id: TenantShardId,
346 196 : cancel: CancellationToken,
347 196 : ) -> anyhow::Result<(HashSet<TimelineId>, HashSet<String>)> {
348 196 : fail::fail_point!("storage-sync-list-remote-timelines", |_| {
349 0 : anyhow::bail!("storage-sync-list-remote-timelines");
350 196 : });
351 :
352 196 : let remote_path = remote_timelines_path(&tenant_shard_id).add_trailing_slash();
353 196 : list_identifiers::<TimelineId>(storage, remote_path, cancel).await
354 196 : }
355 :
356 622 : async fn do_download_remote_path_retry_forever(
357 622 : storage: &GenericRemoteStorage,
358 622 : remote_path: &RemotePath,
359 622 : download_opts: DownloadOpts,
360 622 : cancel: &CancellationToken,
361 622 : ) -> Result<(Vec<u8>, SystemTime), DownloadError> {
362 622 : download_retry_forever(
363 622 : || async {
364 622 : let download = storage
365 622 : .download(remote_path, &download_opts, cancel)
366 622 : .await?;
367 :
368 20 : let mut bytes = Vec::new();
369 20 :
370 20 : let stream = download.download_stream;
371 20 : let mut stream = StreamReader::new(stream);
372 20 :
373 20 : tokio::io::copy_buf(&mut stream, &mut bytes).await?;
374 :
375 20 : Ok((bytes, download.last_modified))
376 1244 : },
377 622 : &format!("download {remote_path:?}"),
378 622 : cancel,
379 622 : )
380 622 : .await
381 622 : }
382 :
383 588 : async fn do_download_tenant_manifest(
384 588 : storage: &GenericRemoteStorage,
385 588 : tenant_shard_id: &TenantShardId,
386 588 : _timeline_id: Option<&TimelineId>,
387 588 : generation: Generation,
388 588 : cancel: &CancellationToken,
389 588 : ) -> Result<(TenantManifest, Generation, SystemTime), DownloadError> {
390 588 : let remote_path = remote_tenant_manifest_path(tenant_shard_id, generation);
391 588 :
392 588 : let download_opts = DownloadOpts {
393 588 : kind: DownloadKind::Small,
394 588 : ..Default::default()
395 588 : };
396 :
397 0 : let (manifest_bytes, manifest_bytes_mtime) =
398 588 : do_download_remote_path_retry_forever(storage, &remote_path, download_opts, cancel).await?;
399 :
400 0 : let tenant_manifest = TenantManifest::from_json_bytes(&manifest_bytes)
401 0 : .with_context(|| format!("deserialize tenant manifest file at {remote_path:?}"))
402 0 : .map_err(DownloadError::Other)?;
403 :
404 0 : Ok((tenant_manifest, generation, manifest_bytes_mtime))
405 588 : }
406 :
407 34 : async fn do_download_index_part(
408 34 : storage: &GenericRemoteStorage,
409 34 : tenant_shard_id: &TenantShardId,
410 34 : timeline_id: Option<&TimelineId>,
411 34 : index_generation: Generation,
412 34 : cancel: &CancellationToken,
413 34 : ) -> Result<(IndexPart, Generation, SystemTime), DownloadError> {
414 34 : let timeline_id =
415 34 : timeline_id.expect("A timeline ID is always provided when downloading an index");
416 34 : let remote_path = remote_index_path(tenant_shard_id, timeline_id, index_generation);
417 34 :
418 34 : let download_opts = DownloadOpts {
419 34 : kind: DownloadKind::Small,
420 34 : ..Default::default()
421 34 : };
422 :
423 20 : let (index_part_bytes, index_part_mtime) =
424 34 : do_download_remote_path_retry_forever(storage, &remote_path, download_opts, cancel).await?;
425 :
426 20 : let index_part: IndexPart = serde_json::from_slice(&index_part_bytes)
427 20 : .with_context(|| format!("deserialize index part file at {remote_path:?}"))
428 20 : .map_err(DownloadError::Other)?;
429 :
430 20 : Ok((index_part, index_generation, index_part_mtime))
431 34 : }
432 :
433 : /// Metadata objects are "generationed", meaning that they include a generation suffix. This
434 : /// function downloads the object with the highest generation <= `my_generation`.
435 : ///
436 : /// Data objects (layer files) also include a generation in their path, but there is no equivalent
437 : /// search process, because their reference from an index includes the generation.
438 : ///
439 : /// An expensive object listing operation is only done if necessary: the typical fast path is to issue two
440 : /// GET operations, one to our own generation (stale attachment case), and one to the immediately preceding
441 : /// generation (normal case when migrating/restarting). Only if both of these return 404 do we fall back
442 : /// to listing objects.
443 : ///
444 : /// * `my_generation`: the value of `[crate::tenant::Tenant::generation]`
445 : /// * `what`: for logging, what object are we downloading
446 : /// * `prefix`: when listing objects, use this prefix (i.e. the part of the object path before the generation)
447 : /// * `do_download`: a GET of the object in a particular generation, which should **retry indefinitely** unless
448 : /// `cancel`` has fired. This function does not do its own retries of GET operations, and relies
449 : /// on the function passed in to do so.
450 : /// * `parse_path`: parse a fully qualified remote storage path to get the generation of the object.
451 : #[allow(clippy::too_many_arguments)]
452 216 : #[tracing::instrument(skip_all, fields(generation=?my_generation))]
453 : pub(crate) async fn download_generation_object<'a, T, DF, DFF, PF>(
454 : storage: &'a GenericRemoteStorage,
455 : tenant_shard_id: &'a TenantShardId,
456 : timeline_id: Option<&'a TimelineId>,
457 : my_generation: Generation,
458 : what: &str,
459 : prefix: RemotePath,
460 : do_download: DF,
461 : parse_path: PF,
462 : cancel: &'a CancellationToken,
463 : ) -> Result<(T, Generation, SystemTime), DownloadError>
464 : where
465 : DF: Fn(
466 : &'a GenericRemoteStorage,
467 : &'a TenantShardId,
468 : Option<&'a TimelineId>,
469 : Generation,
470 : &'a CancellationToken,
471 : ) -> DFF,
472 : DFF: Future<Output = Result<(T, Generation, SystemTime), DownloadError>>,
473 : PF: Fn(RemotePath) -> Option<Generation>,
474 : T: 'static,
475 : {
476 : debug_assert_current_span_has_tenant_id();
477 :
478 : if my_generation.is_none() {
479 : // Operating without generations: just fetch the generation-less path
480 : return do_download(storage, tenant_shard_id, timeline_id, my_generation, cancel).await;
481 : }
482 :
483 : // Stale case: If we were intentionally attached in a stale generation, the remote object may already
484 : // exist in our generation.
485 : //
486 : // This is an optimization to avoid doing the listing for the general case below.
487 : let res = do_download(storage, tenant_shard_id, timeline_id, my_generation, cancel).await;
488 : match res {
489 : Ok(decoded) => {
490 : tracing::debug!("Found {what} from current generation (this is a stale attachment)");
491 : return Ok(decoded);
492 : }
493 : Err(DownloadError::NotFound) => {}
494 : Err(e) => return Err(e),
495 : };
496 :
497 : // Typical case: the previous generation of this tenant was running healthily, and had uploaded the object
498 : // we are seeking in that generation. We may safely start from this index without doing a listing, because:
499 : // - We checked for current generation case above
500 : // - generations > my_generation are to be ignored
501 : // - any other objects that exist would have an older generation than `previous_gen`, and
502 : // we want to find the most recent object from a previous generation.
503 : //
504 : // This is an optimization to avoid doing the listing for the general case below.
505 : let res = do_download(
506 : storage,
507 : tenant_shard_id,
508 : timeline_id,
509 : my_generation.previous(),
510 : cancel,
511 : )
512 : .await;
513 : match res {
514 : Ok(decoded) => {
515 : tracing::debug!("Found {what} from previous generation");
516 : return Ok(decoded);
517 : }
518 : Err(DownloadError::NotFound) => {
519 : tracing::debug!("No {what} found from previous generation, falling back to listing");
520 : }
521 : Err(e) => {
522 : return Err(e);
523 : }
524 : }
525 :
526 : // General case/fallback: if there is no index at my_generation or prev_generation, then list all index_part.json
527 : // objects, and select the highest one with a generation <= my_generation. Constructing the prefix is equivalent
528 : // to constructing a full index path with no generation, because the generation is a suffix.
529 : let paths = download_retry(
530 202 : || async {
531 202 : storage
532 202 : .list(Some(&prefix), ListingMode::NoDelimiter, None, cancel)
533 202 : .await
534 404 : },
535 : "list index_part files",
536 : cancel,
537 : )
538 : .await?
539 : .keys;
540 :
541 : // General case logic for which index to use: the latest index whose generation
542 : // is <= our own. See "Finding the remote indices for timelines" in docs/rfcs/025-generation-numbers.md
543 : let max_previous_generation = paths
544 : .into_iter()
545 18 : .filter_map(|o| parse_path(o.key))
546 12 : .filter(|g| g <= &my_generation)
547 : .max();
548 :
549 : match max_previous_generation {
550 : Some(g) => {
551 : tracing::debug!("Found {what} in generation {g:?}");
552 : do_download(storage, tenant_shard_id, timeline_id, g, cancel).await
553 : }
554 : None => {
555 : // Migration from legacy pre-generation state: we have a generation but no prior
556 : // attached pageservers did. Try to load from a no-generation path.
557 : tracing::debug!("No {what}* found");
558 : do_download(
559 : storage,
560 : tenant_shard_id,
561 : timeline_id,
562 : Generation::none(),
563 : cancel,
564 : )
565 : .await
566 : }
567 : }
568 : }
569 :
570 : /// index_part.json objects are suffixed with a generation number, so we cannot
571 : /// directly GET the latest index part without doing some probing.
572 : ///
573 : /// In this function we probe for the most recent index in a generation <= our current generation.
574 : /// See "Finding the remote indices for timelines" in docs/rfcs/025-generation-numbers.md
575 20 : pub(crate) async fn download_index_part(
576 20 : storage: &GenericRemoteStorage,
577 20 : tenant_shard_id: &TenantShardId,
578 20 : timeline_id: &TimelineId,
579 20 : my_generation: Generation,
580 20 : cancel: &CancellationToken,
581 20 : ) -> Result<(IndexPart, Generation, SystemTime), DownloadError> {
582 20 : debug_assert_current_span_has_tenant_and_timeline_id();
583 20 :
584 20 : let index_prefix = remote_index_path(tenant_shard_id, timeline_id, Generation::none());
585 20 : download_generation_object(
586 20 : storage,
587 20 : tenant_shard_id,
588 20 : Some(timeline_id),
589 20 : my_generation,
590 20 : "index_part",
591 20 : index_prefix,
592 20 : do_download_index_part,
593 20 : parse_remote_index_path,
594 20 : cancel,
595 20 : )
596 20 : .await
597 20 : }
598 :
599 196 : pub(crate) async fn download_tenant_manifest(
600 196 : storage: &GenericRemoteStorage,
601 196 : tenant_shard_id: &TenantShardId,
602 196 : my_generation: Generation,
603 196 : cancel: &CancellationToken,
604 196 : ) -> Result<(TenantManifest, Generation, SystemTime), DownloadError> {
605 196 : let manifest_prefix = remote_tenant_manifest_prefix(tenant_shard_id);
606 196 :
607 196 : download_generation_object(
608 196 : storage,
609 196 : tenant_shard_id,
610 196 : None,
611 196 : my_generation,
612 196 : "tenant-manifest",
613 196 : manifest_prefix,
614 196 : do_download_tenant_manifest,
615 196 : parse_remote_tenant_manifest_path,
616 196 : cancel,
617 196 : )
618 196 : .await
619 196 : }
620 :
621 2 : pub(crate) async fn download_initdb_tar_zst(
622 2 : conf: &'static PageServerConf,
623 2 : storage: &GenericRemoteStorage,
624 2 : tenant_shard_id: &TenantShardId,
625 2 : timeline_id: &TimelineId,
626 2 : cancel: &CancellationToken,
627 2 : ) -> Result<(Utf8PathBuf, File), DownloadError> {
628 2 : debug_assert_current_span_has_tenant_and_timeline_id();
629 2 :
630 2 : let remote_path = remote_initdb_archive_path(&tenant_shard_id.tenant_id, timeline_id);
631 2 :
632 2 : let remote_preserved_path =
633 2 : remote_initdb_preserved_archive_path(&tenant_shard_id.tenant_id, timeline_id);
634 2 :
635 2 : let timeline_path = conf.timelines_path(tenant_shard_id);
636 2 :
637 2 : if !timeline_path.exists() {
638 0 : tokio::fs::create_dir_all(&timeline_path)
639 0 : .await
640 0 : .with_context(|| format!("timeline dir creation {timeline_path}"))
641 0 : .map_err(DownloadError::Other)?;
642 2 : }
643 2 : let temp_path = timeline_path.join(format!(
644 2 : "{INITDB_PATH}.download-{timeline_id}.{TEMP_FILE_SUFFIX}"
645 2 : ));
646 :
647 2 : let file = download_retry(
648 2 : || async {
649 2 : let file = OpenOptions::new()
650 2 : .create(true)
651 2 : .truncate(true)
652 2 : .read(true)
653 2 : .write(true)
654 2 : .open(&temp_path)
655 2 : .await
656 2 : .with_context(|| format!("tempfile creation {temp_path}"))
657 2 : .map_err(DownloadError::Other)?;
658 :
659 2 : let download = match storage
660 2 : .download(&remote_path, &DownloadOpts::default(), cancel)
661 2 : .await
662 : {
663 2 : Ok(dl) => dl,
664 : Err(DownloadError::NotFound) => {
665 0 : storage
666 0 : .download(&remote_preserved_path, &DownloadOpts::default(), cancel)
667 0 : .await?
668 : }
669 0 : Err(other) => Err(other)?,
670 : };
671 2 : let mut download = tokio_util::io::StreamReader::new(download.download_stream);
672 2 : let mut writer = tokio::io::BufWriter::with_capacity(super::BUFFER_SIZE, file);
673 2 :
674 2 : tokio::io::copy_buf(&mut download, &mut writer).await?;
675 :
676 2 : let mut file = writer.into_inner();
677 2 :
678 2 : file.seek(std::io::SeekFrom::Start(0))
679 2 : .await
680 2 : .with_context(|| format!("rewinding initdb.tar.zst at: {remote_path:?}"))
681 2 : .map_err(DownloadError::Other)?;
682 :
683 2 : Ok(file)
684 4 : },
685 2 : &format!("download {remote_path}"),
686 2 : cancel,
687 2 : )
688 2 : .await
689 2 : .inspect_err(|_e| {
690 : // Do a best-effort attempt at deleting the temporary file upon encountering an error.
691 : // We don't have async here nor do we want to pile on any extra errors.
692 0 : if let Err(e) = std::fs::remove_file(&temp_path) {
693 0 : if e.kind() != std::io::ErrorKind::NotFound {
694 0 : warn!("error deleting temporary file {temp_path}: {e}");
695 0 : }
696 0 : }
697 2 : })?;
698 :
699 2 : Ok((temp_path, file))
700 2 : }
701 :
702 : /// Helper function to handle retries for a download operation.
703 : ///
704 : /// Remote operations can fail due to rate limits (S3), spurious network
705 : /// problems, or other external reasons. Retry FAILED_DOWNLOAD_RETRIES times,
706 : /// with backoff.
707 : ///
708 : /// (See similar logic for uploads in `perform_upload_task`)
709 210 : pub(super) async fn download_retry<T, O, F>(
710 210 : op: O,
711 210 : description: &str,
712 210 : cancel: &CancellationToken,
713 210 : ) -> Result<T, DownloadError>
714 210 : where
715 210 : O: FnMut() -> F,
716 210 : F: Future<Output = Result<T, DownloadError>>,
717 210 : {
718 210 : backoff::retry(
719 210 : op,
720 210 : DownloadError::is_permanent,
721 210 : FAILED_DOWNLOAD_WARN_THRESHOLD,
722 210 : FAILED_REMOTE_OP_RETRIES,
723 210 : description,
724 210 : cancel,
725 210 : )
726 210 : .await
727 210 : .ok_or_else(|| DownloadError::Cancelled)
728 210 : .and_then(|x| x)
729 210 : }
730 :
731 818 : pub(crate) async fn download_retry_forever<T, O, F>(
732 818 : op: O,
733 818 : description: &str,
734 818 : cancel: &CancellationToken,
735 818 : ) -> Result<T, DownloadError>
736 818 : where
737 818 : O: FnMut() -> F,
738 818 : F: Future<Output = Result<T, DownloadError>>,
739 818 : {
740 818 : backoff::retry(
741 818 : op,
742 818 : DownloadError::is_permanent,
743 818 : FAILED_DOWNLOAD_WARN_THRESHOLD,
744 818 : u32::MAX,
745 818 : description,
746 818 : cancel,
747 818 : )
748 818 : .await
749 818 : .ok_or_else(|| DownloadError::Cancelled)
750 818 : .and_then(|x| x)
751 818 : }
|