Line data Source code
1 : use std::{collections::HashSet, sync::Arc};
2 :
3 : use super::{layer_manager::LayerManager, FlushLayerError, Timeline};
4 : use crate::{
5 : context::{DownloadBehavior, RequestContext},
6 : task_mgr::TaskKind,
7 : tenant::{
8 : remote_timeline_client::index::GcBlockingReason::DetachAncestor,
9 : storage_layer::{
10 : layer::local_layer_path, AsLayerDesc as _, DeltaLayerWriter, Layer, ResidentLayer,
11 : },
12 : Tenant,
13 : },
14 : virtual_file::{MaybeFatalIo, VirtualFile},
15 : };
16 : use anyhow::Context;
17 : use http_utils::error::ApiError;
18 : use pageserver_api::{models::detach_ancestor::AncestorDetached, shard::ShardIdentity};
19 : use tokio::sync::Semaphore;
20 : use tokio_util::sync::CancellationToken;
21 : use tracing::Instrument;
22 : use utils::{completion, generation::Generation, id::TimelineId, lsn::Lsn};
23 :
24 : #[derive(Debug, thiserror::Error)]
25 : pub(crate) enum Error {
26 : #[error("no ancestors")]
27 : NoAncestor,
28 :
29 : #[error("too many ancestors")]
30 : TooManyAncestors,
31 :
32 : #[error("shutting down, please retry later")]
33 : ShuttingDown,
34 :
35 : #[error("archived: {}", .0)]
36 : Archived(TimelineId),
37 :
38 : #[error(transparent)]
39 : NotFound(crate::tenant::GetTimelineError),
40 :
41 : #[error("failed to reparent all candidate timelines, please retry")]
42 : FailedToReparentAll,
43 :
44 : #[error("ancestor is already being detached by: {}", .0)]
45 : OtherTimelineDetachOngoing(TimelineId),
46 :
47 : #[error("preparing to timeline ancestor detach failed")]
48 : Prepare(#[source] anyhow::Error),
49 :
50 : #[error("detaching and reparenting failed")]
51 : DetachReparent(#[source] anyhow::Error),
52 :
53 : #[error("completing ancestor detach failed")]
54 : Complete(#[source] anyhow::Error),
55 :
56 : #[error("failpoint: {}", .0)]
57 : Failpoint(&'static str),
58 : }
59 :
60 : impl Error {
61 : /// Try to catch cancellation from within the `anyhow::Error`, or wrap the anyhow as the given
62 : /// variant or fancier `or_else`.
63 0 : fn launder<F>(e: anyhow::Error, or_else: F) -> Error
64 0 : where
65 0 : F: Fn(anyhow::Error) -> Error,
66 0 : {
67 : use crate::tenant::remote_timeline_client::WaitCompletionError;
68 : use crate::tenant::upload_queue::NotInitialized;
69 : use remote_storage::TimeoutOrCancel;
70 :
71 0 : if e.is::<NotInitialized>()
72 0 : || TimeoutOrCancel::caused_by_cancel(&e)
73 0 : || e.downcast_ref::<remote_storage::DownloadError>()
74 0 : .is_some_and(|e| e.is_cancelled())
75 0 : || e.is::<WaitCompletionError>()
76 : {
77 0 : Error::ShuttingDown
78 : } else {
79 0 : or_else(e)
80 : }
81 0 : }
82 : }
83 :
84 : impl From<Error> for ApiError {
85 0 : fn from(value: Error) -> Self {
86 0 : match value {
87 0 : Error::NoAncestor => ApiError::Conflict(value.to_string()),
88 0 : Error::TooManyAncestors => ApiError::BadRequest(anyhow::anyhow!("{value}")),
89 0 : Error::ShuttingDown => ApiError::ShuttingDown,
90 0 : Error::Archived(_) => ApiError::BadRequest(anyhow::anyhow!("{value}")),
91 : Error::OtherTimelineDetachOngoing(_) | Error::FailedToReparentAll => {
92 0 : ApiError::ResourceUnavailable(value.to_string().into())
93 : }
94 0 : Error::NotFound(e) => ApiError::from(e),
95 : // these variants should have no cancellation errors because of Error::launder
96 : Error::Prepare(_)
97 : | Error::DetachReparent(_)
98 : | Error::Complete(_)
99 0 : | Error::Failpoint(_) => ApiError::InternalServerError(value.into()),
100 : }
101 0 : }
102 : }
103 :
104 : impl From<crate::tenant::upload_queue::NotInitialized> for Error {
105 0 : fn from(_: crate::tenant::upload_queue::NotInitialized) -> Self {
106 0 : // treat all as shutting down signals, even though that is not entirely correct
107 0 : // (uninitialized state)
108 0 : Error::ShuttingDown
109 0 : }
110 : }
111 : impl From<super::layer_manager::Shutdown> for Error {
112 0 : fn from(_: super::layer_manager::Shutdown) -> Self {
113 0 : Error::ShuttingDown
114 0 : }
115 : }
116 :
117 : pub(crate) enum Progress {
118 : Prepared(Attempt, PreparedTimelineDetach),
119 : Done(AncestorDetached),
120 : }
121 :
122 : pub(crate) struct PreparedTimelineDetach {
123 : layers: Vec<Layer>,
124 : }
125 :
126 : /// TODO: this should be part of PageserverConf because we cannot easily modify cplane arguments.
127 : #[derive(Debug)]
128 : pub(crate) struct Options {
129 : pub(crate) rewrite_concurrency: std::num::NonZeroUsize,
130 : pub(crate) copy_concurrency: std::num::NonZeroUsize,
131 : }
132 :
133 : impl Default for Options {
134 0 : fn default() -> Self {
135 0 : Self {
136 0 : rewrite_concurrency: std::num::NonZeroUsize::new(2).unwrap(),
137 0 : copy_concurrency: std::num::NonZeroUsize::new(100).unwrap(),
138 0 : }
139 0 : }
140 : }
141 :
142 : /// Represents an across tenant reset exclusive single attempt to detach ancestor.
143 : #[derive(Debug)]
144 : pub(crate) struct Attempt {
145 : pub(crate) timeline_id: TimelineId,
146 :
147 : _guard: completion::Completion,
148 : gate_entered: Option<utils::sync::gate::GateGuard>,
149 : }
150 :
151 : impl Attempt {
152 0 : pub(crate) fn before_reset_tenant(&mut self) {
153 0 : let taken = self.gate_entered.take();
154 0 : assert!(taken.is_some());
155 0 : }
156 :
157 0 : pub(crate) fn new_barrier(&self) -> completion::Barrier {
158 0 : self._guard.barrier()
159 0 : }
160 : }
161 :
162 : /// See [`Timeline::prepare_to_detach_from_ancestor`]
163 0 : pub(super) async fn prepare(
164 0 : detached: &Arc<Timeline>,
165 0 : tenant: &Tenant,
166 0 : options: Options,
167 0 : ctx: &RequestContext,
168 0 : ) -> Result<Progress, Error> {
169 : use Error::*;
170 :
171 0 : let Some((ancestor, ancestor_lsn)) = detached
172 0 : .ancestor_timeline
173 0 : .as_ref()
174 0 : .map(|tl| (tl.clone(), detached.ancestor_lsn))
175 : else {
176 0 : let still_in_progress = {
177 0 : let accessor = detached.remote_client.initialized_upload_queue()?;
178 :
179 : // we are safe to inspect the latest uploaded, because we can only witness this after
180 : // restart is complete and ancestor is no more.
181 0 : let latest = accessor.latest_uploaded_index_part();
182 0 : if latest.lineage.detached_previous_ancestor().is_none() {
183 0 : return Err(NoAncestor);
184 0 : };
185 0 :
186 0 : latest
187 0 : .gc_blocking
188 0 : .as_ref()
189 0 : .is_some_and(|b| b.blocked_by(DetachAncestor))
190 0 : };
191 0 :
192 0 : if still_in_progress {
193 : // gc is still blocked, we can still reparent and complete.
194 : // we are safe to reparent remaining, because they were locked in in the beginning.
195 0 : let attempt = continue_with_blocked_gc(detached, tenant).await?;
196 :
197 : // because the ancestor of detached is already set to none, we have published all
198 : // of the layers, so we are still "prepared."
199 0 : return Ok(Progress::Prepared(
200 0 : attempt,
201 0 : PreparedTimelineDetach { layers: Vec::new() },
202 0 : ));
203 0 : }
204 :
205 0 : let reparented_timelines = reparented_direct_children(detached, tenant)?;
206 0 : return Ok(Progress::Done(AncestorDetached {
207 0 : reparented_timelines,
208 0 : }));
209 : };
210 :
211 0 : if detached.is_archived() != Some(false) {
212 0 : return Err(Archived(detached.timeline_id));
213 0 : }
214 0 :
215 0 : if !ancestor_lsn.is_valid() {
216 : // rare case, probably wouldn't even load
217 0 : tracing::error!("ancestor is set, but ancestor_lsn is invalid, this timeline needs fixing");
218 0 : return Err(NoAncestor);
219 0 : }
220 0 :
221 0 : check_no_archived_children_of_ancestor(tenant, detached, &ancestor, ancestor_lsn)?;
222 :
223 0 : if ancestor.ancestor_timeline.is_some() {
224 : // non-technical requirement; we could flatten N ancestors just as easily but we chose
225 : // not to, at least initially
226 0 : return Err(TooManyAncestors);
227 0 : }
228 :
229 0 : let attempt = start_new_attempt(detached, tenant).await?;
230 :
231 0 : utils::pausable_failpoint!("timeline-detach-ancestor::before_starting_after_locking-pausable");
232 :
233 0 : fail::fail_point!(
234 0 : "timeline-detach-ancestor::before_starting_after_locking",
235 0 : |_| Err(Error::Failpoint(
236 0 : "timeline-detach-ancestor::before_starting_after_locking"
237 0 : ))
238 0 : );
239 :
240 0 : if ancestor_lsn >= ancestor.get_disk_consistent_lsn() {
241 0 : let span =
242 0 : tracing::info_span!("freeze_and_flush", ancestor_timeline_id=%ancestor.timeline_id);
243 0 : async {
244 0 : let started_at = std::time::Instant::now();
245 0 : let freeze_and_flush = ancestor.freeze_and_flush0();
246 0 : let mut freeze_and_flush = std::pin::pin!(freeze_and_flush);
247 :
248 0 : let res =
249 0 : tokio::time::timeout(std::time::Duration::from_secs(1), &mut freeze_and_flush)
250 0 : .await;
251 :
252 0 : let res = match res {
253 0 : Ok(res) => res,
254 0 : Err(_elapsed) => {
255 0 : tracing::info!("freezing and flushing ancestor is still ongoing");
256 0 : freeze_and_flush.await
257 : }
258 : };
259 :
260 0 : res.map_err(|e| {
261 : use FlushLayerError::*;
262 0 : match e {
263 : Cancelled | NotRunning(_) => {
264 : // FIXME(#6424): technically statically unreachable right now, given how we never
265 : // drop the sender
266 0 : Error::ShuttingDown
267 : }
268 0 : CreateImageLayersError(_) | Other(_) => Error::Prepare(e.into()),
269 : }
270 0 : })?;
271 :
272 : // we do not need to wait for uploads to complete but we do need `struct Layer`,
273 : // copying delta prefix is unsupported currently for `InMemoryLayer`.
274 0 : tracing::info!(
275 0 : elapsed_ms = started_at.elapsed().as_millis(),
276 0 : "froze and flushed the ancestor"
277 : );
278 0 : Ok::<_, Error>(())
279 0 : }
280 0 : .instrument(span)
281 0 : .await?;
282 0 : }
283 :
284 0 : let end_lsn = ancestor_lsn + 1;
285 :
286 0 : let (filtered_layers, straddling_branchpoint, rest_of_historic) = {
287 : // we do not need to start from our layers, because they can only be layers that come
288 : // *after* ancestor_lsn
289 0 : let layers = tokio::select! {
290 0 : guard = ancestor.layers.read() => guard,
291 0 : _ = detached.cancel.cancelled() => {
292 0 : return Err(ShuttingDown);
293 : }
294 0 : _ = ancestor.cancel.cancelled() => {
295 0 : return Err(ShuttingDown);
296 : }
297 : };
298 :
299 : // between retries, these can change if compaction or gc ran in between. this will mean
300 : // we have to redo work.
301 0 : partition_work(ancestor_lsn, &layers)?
302 : };
303 :
304 : // TODO: layers are already sorted by something: use that to determine how much of remote
305 : // copies are already done -- gc is blocked, but a compaction could had happened on ancestor,
306 : // which is something to keep in mind if copy skipping is implemented.
307 0 : tracing::info!(filtered=%filtered_layers, to_rewrite = straddling_branchpoint.len(), historic=%rest_of_historic.len(), "collected layers");
308 :
309 : // TODO: copying and lsn prefix copying could be done at the same time with a single fsync after
310 0 : let mut new_layers: Vec<Layer> =
311 0 : Vec::with_capacity(straddling_branchpoint.len() + rest_of_historic.len());
312 0 :
313 0 : {
314 0 : tracing::debug!(to_rewrite = %straddling_branchpoint.len(), "copying prefix of delta layers");
315 :
316 0 : let mut tasks = tokio::task::JoinSet::new();
317 0 :
318 0 : let mut wrote_any = false;
319 0 :
320 0 : let limiter = Arc::new(Semaphore::new(options.rewrite_concurrency.get()));
321 :
322 0 : for layer in straddling_branchpoint {
323 0 : let limiter = limiter.clone();
324 0 : let timeline = detached.clone();
325 0 : let ctx = ctx.detached_child(TaskKind::DetachAncestor, DownloadBehavior::Download);
326 :
327 0 : let span = tracing::info_span!("upload_rewritten_layer", %layer);
328 0 : tasks.spawn(
329 0 : async move {
330 0 : let _permit = limiter.acquire().await;
331 0 : let copied =
332 0 : upload_rewritten_layer(end_lsn, &layer, &timeline, &timeline.cancel, &ctx)
333 0 : .await?;
334 0 : if let Some(copied) = copied.as_ref() {
335 0 : tracing::info!(%copied, "rewrote and uploaded");
336 0 : }
337 0 : Ok(copied)
338 0 : }
339 0 : .instrument(span),
340 0 : );
341 0 : }
342 :
343 0 : while let Some(res) = tasks.join_next().await {
344 0 : match res {
345 0 : Ok(Ok(Some(copied))) => {
346 0 : wrote_any = true;
347 0 : new_layers.push(copied);
348 0 : }
349 0 : Ok(Ok(None)) => {}
350 0 : Ok(Err(e)) => return Err(e),
351 0 : Err(je) => return Err(Error::Prepare(je.into())),
352 : }
353 : }
354 :
355 : // FIXME: the fsync should be mandatory, after both rewrites and copies
356 0 : if wrote_any {
357 0 : fsync_timeline_dir(detached, ctx).await;
358 0 : }
359 : }
360 :
361 0 : let mut tasks = tokio::task::JoinSet::new();
362 0 : let limiter = Arc::new(Semaphore::new(options.copy_concurrency.get()));
363 :
364 0 : for adopted in rest_of_historic {
365 0 : let limiter = limiter.clone();
366 0 : let timeline = detached.clone();
367 0 :
368 0 : tasks.spawn(
369 0 : async move {
370 0 : let _permit = limiter.acquire().await;
371 0 : let (owned, did_hardlink) = remote_copy(
372 0 : &adopted,
373 0 : &timeline,
374 0 : timeline.generation,
375 0 : timeline.shard_identity,
376 0 : &timeline.cancel,
377 0 : )
378 0 : .await?;
379 0 : tracing::info!(layer=%owned, did_hard_link=%did_hardlink, "remote copied");
380 0 : Ok((owned, did_hardlink))
381 0 : }
382 0 : .in_current_span(),
383 0 : );
384 0 : }
385 :
386 0 : let mut should_fsync = false;
387 0 : while let Some(res) = tasks.join_next().await {
388 0 : match res {
389 0 : Ok(Ok((owned, did_hardlink))) => {
390 0 : if did_hardlink {
391 0 : should_fsync = true;
392 0 : }
393 0 : new_layers.push(owned);
394 : }
395 0 : Ok(Err(failed)) => {
396 0 : return Err(failed);
397 : }
398 0 : Err(je) => return Err(Error::Prepare(je.into())),
399 : }
400 : }
401 :
402 : // fsync directory again if we hardlinked something
403 0 : if should_fsync {
404 0 : fsync_timeline_dir(detached, ctx).await;
405 0 : }
406 :
407 0 : let prepared = PreparedTimelineDetach { layers: new_layers };
408 0 :
409 0 : Ok(Progress::Prepared(attempt, prepared))
410 0 : }
411 :
412 0 : async fn start_new_attempt(detached: &Timeline, tenant: &Tenant) -> Result<Attempt, Error> {
413 0 : let attempt = obtain_exclusive_attempt(detached, tenant)?;
414 :
415 : // insert the block in the index_part.json, if not already there.
416 0 : let _dont_care = tenant
417 0 : .gc_block
418 0 : .insert(
419 0 : detached,
420 0 : crate::tenant::remote_timeline_client::index::GcBlockingReason::DetachAncestor,
421 0 : )
422 0 : .await
423 0 : .map_err(|e| Error::launder(e, Error::Prepare))?;
424 :
425 0 : Ok(attempt)
426 0 : }
427 :
428 0 : async fn continue_with_blocked_gc(detached: &Timeline, tenant: &Tenant) -> Result<Attempt, Error> {
429 0 : // FIXME: it would be nice to confirm that there is an in-memory version, since we've just
430 0 : // verified there is a persistent one?
431 0 : obtain_exclusive_attempt(detached, tenant)
432 0 : }
433 :
434 0 : fn obtain_exclusive_attempt(detached: &Timeline, tenant: &Tenant) -> Result<Attempt, Error> {
435 : use Error::{OtherTimelineDetachOngoing, ShuttingDown};
436 :
437 : // ensure we are the only active attempt for this tenant
438 0 : let (guard, barrier) = completion::channel();
439 0 : {
440 0 : let mut guard = tenant.ongoing_timeline_detach.lock().unwrap();
441 0 : if let Some((tl, other)) = guard.as_ref() {
442 0 : if !other.is_ready() {
443 0 : return Err(OtherTimelineDetachOngoing(*tl));
444 0 : }
445 : // FIXME: no test enters here
446 0 : }
447 0 : *guard = Some((detached.timeline_id, barrier));
448 : }
449 :
450 : // ensure the gate is still open
451 0 : let _gate_entered = detached.gate.enter().map_err(|_| ShuttingDown)?;
452 :
453 0 : Ok(Attempt {
454 0 : timeline_id: detached.timeline_id,
455 0 : _guard: guard,
456 0 : gate_entered: Some(_gate_entered),
457 0 : })
458 0 : }
459 :
460 0 : fn reparented_direct_children(
461 0 : detached: &Arc<Timeline>,
462 0 : tenant: &Tenant,
463 0 : ) -> Result<HashSet<TimelineId>, Error> {
464 0 : let mut all_direct_children = tenant
465 0 : .timelines
466 0 : .lock()
467 0 : .unwrap()
468 0 : .values()
469 0 : .filter_map(|tl| {
470 0 : let is_direct_child = matches!(tl.ancestor_timeline.as_ref(), Some(ancestor) if Arc::ptr_eq(ancestor, detached));
471 :
472 0 : if is_direct_child {
473 0 : Some(tl.clone())
474 : } else {
475 0 : if let Some(timeline) = tl.ancestor_timeline.as_ref() {
476 0 : assert_ne!(timeline.timeline_id, detached.timeline_id, "we cannot have two timelines with the same timeline_id live");
477 0 : }
478 0 : None
479 : }
480 0 : })
481 0 : // Collect to avoid lock taking order problem with Tenant::timelines and
482 0 : // Timeline::remote_client
483 0 : .collect::<Vec<_>>();
484 0 :
485 0 : let mut any_shutdown = false;
486 0 :
487 0 : all_direct_children.retain(|tl| match tl.remote_client.initialized_upload_queue() {
488 0 : Ok(accessor) => accessor
489 0 : .latest_uploaded_index_part()
490 0 : .lineage
491 0 : .is_reparented(),
492 0 : Err(_shutdownalike) => {
493 0 : // not 100% a shutdown, but let's bail early not to give inconsistent results in
494 0 : // sharded enviroment.
495 0 : any_shutdown = true;
496 0 : true
497 : }
498 0 : });
499 0 :
500 0 : if any_shutdown {
501 : // it could be one or many being deleted; have client retry
502 0 : return Err(Error::ShuttingDown);
503 0 : }
504 0 :
505 0 : Ok(all_direct_children
506 0 : .into_iter()
507 0 : .map(|tl| tl.timeline_id)
508 0 : .collect())
509 0 : }
510 :
511 0 : fn partition_work(
512 0 : ancestor_lsn: Lsn,
513 0 : source: &LayerManager,
514 0 : ) -> Result<(usize, Vec<Layer>, Vec<Layer>), Error> {
515 0 : let mut straddling_branchpoint = vec![];
516 0 : let mut rest_of_historic = vec![];
517 0 :
518 0 : let mut later_by_lsn = 0;
519 :
520 0 : for desc in source.layer_map()?.iter_historic_layers() {
521 : // off by one chances here:
522 : // - start is inclusive
523 : // - end is exclusive
524 0 : if desc.lsn_range.start > ancestor_lsn {
525 0 : later_by_lsn += 1;
526 0 : continue;
527 0 : }
528 :
529 0 : let target = if desc.lsn_range.start <= ancestor_lsn
530 0 : && desc.lsn_range.end > ancestor_lsn
531 0 : && desc.is_delta
532 : {
533 : // TODO: image layer at Lsn optimization
534 0 : &mut straddling_branchpoint
535 : } else {
536 0 : &mut rest_of_historic
537 : };
538 :
539 0 : target.push(source.get_from_desc(&desc));
540 : }
541 :
542 0 : Ok((later_by_lsn, straddling_branchpoint, rest_of_historic))
543 0 : }
544 :
545 0 : async fn upload_rewritten_layer(
546 0 : end_lsn: Lsn,
547 0 : layer: &Layer,
548 0 : target: &Arc<Timeline>,
549 0 : cancel: &CancellationToken,
550 0 : ctx: &RequestContext,
551 0 : ) -> Result<Option<Layer>, Error> {
552 0 : let copied = copy_lsn_prefix(end_lsn, layer, target, ctx).await?;
553 :
554 0 : let Some(copied) = copied else {
555 0 : return Ok(None);
556 : };
557 :
558 0 : target
559 0 : .remote_client
560 0 : .upload_layer_file(&copied, cancel)
561 0 : .await
562 0 : .map_err(|e| Error::launder(e, Error::Prepare))?;
563 :
564 0 : Ok(Some(copied.into()))
565 0 : }
566 :
567 0 : async fn copy_lsn_prefix(
568 0 : end_lsn: Lsn,
569 0 : layer: &Layer,
570 0 : target_timeline: &Arc<Timeline>,
571 0 : ctx: &RequestContext,
572 0 : ) -> Result<Option<ResidentLayer>, Error> {
573 0 : if target_timeline.cancel.is_cancelled() {
574 0 : return Err(Error::ShuttingDown);
575 0 : }
576 0 :
577 0 : tracing::debug!(%layer, %end_lsn, "copying lsn prefix");
578 :
579 0 : let mut writer = DeltaLayerWriter::new(
580 0 : target_timeline.conf,
581 0 : target_timeline.timeline_id,
582 0 : target_timeline.tenant_shard_id,
583 0 : layer.layer_desc().key_range.start,
584 0 : layer.layer_desc().lsn_range.start..end_lsn,
585 0 : ctx,
586 0 : )
587 0 : .await
588 0 : .with_context(|| format!("prepare to copy lsn prefix of ancestors {layer}"))
589 0 : .map_err(Error::Prepare)?;
590 :
591 0 : let resident = layer.download_and_keep_resident().await.map_err(|e| {
592 0 : if e.is_cancelled() {
593 0 : Error::ShuttingDown
594 : } else {
595 0 : Error::Prepare(e.into())
596 : }
597 0 : })?;
598 :
599 0 : let records = resident
600 0 : .copy_delta_prefix(&mut writer, end_lsn, ctx)
601 0 : .await
602 0 : .with_context(|| format!("copy lsn prefix of ancestors {layer}"))
603 0 : .map_err(Error::Prepare)?;
604 :
605 0 : drop(resident);
606 0 :
607 0 : tracing::debug!(%layer, records, "copied records");
608 :
609 0 : if records == 0 {
610 0 : drop(writer);
611 0 : // TODO: we might want to store an empty marker in remote storage for this
612 0 : // layer so that we will not needlessly walk `layer` on repeated attempts.
613 0 : Ok(None)
614 : } else {
615 : // reuse the key instead of adding more holes between layers by using the real
616 : // highest key in the layer.
617 0 : let reused_highest_key = layer.layer_desc().key_range.end;
618 0 : let (desc, path) = writer
619 0 : .finish(reused_highest_key, ctx)
620 0 : .await
621 0 : .map_err(Error::Prepare)?;
622 0 : let copied = Layer::finish_creating(target_timeline.conf, target_timeline, desc, &path)
623 0 : .map_err(Error::Prepare)?;
624 :
625 0 : tracing::debug!(%layer, %copied, "new layer produced");
626 :
627 0 : Ok(Some(copied))
628 : }
629 0 : }
630 :
631 : /// Creates a new Layer instance for the adopted layer, and ensures it is found in the remote
632 : /// storage on successful return. without the adopted layer being added to `index_part.json`.
633 : /// Returns (Layer, did hardlink)
634 0 : async fn remote_copy(
635 0 : adopted: &Layer,
636 0 : adoptee: &Arc<Timeline>,
637 0 : generation: Generation,
638 0 : shard_identity: ShardIdentity,
639 0 : cancel: &CancellationToken,
640 0 : ) -> Result<(Layer, bool), Error> {
641 0 : let mut metadata = adopted.metadata();
642 0 : debug_assert!(metadata.generation <= generation);
643 0 : metadata.generation = generation;
644 0 : metadata.shard = shard_identity.shard_index();
645 0 :
646 0 : let conf = adoptee.conf;
647 0 : let file_name = adopted.layer_desc().layer_name();
648 :
649 : // depending if Layer::keep_resident, do a hardlink
650 : let did_hardlink;
651 0 : let owned = if let Some(adopted_resident) = adopted.keep_resident().await {
652 0 : let adopted_path = adopted_resident.local_path();
653 0 : let adoptee_path = local_layer_path(
654 0 : conf,
655 0 : &adoptee.tenant_shard_id,
656 0 : &adoptee.timeline_id,
657 0 : &file_name,
658 0 : &metadata.generation,
659 0 : );
660 0 : std::fs::hard_link(adopted_path, &adoptee_path)
661 0 : .map_err(|e| Error::launder(e.into(), Error::Prepare))?;
662 0 : did_hardlink = true;
663 0 : Layer::for_resident(conf, adoptee, adoptee_path, file_name, metadata).drop_eviction_guard()
664 : } else {
665 0 : did_hardlink = false;
666 0 : Layer::for_evicted(conf, adoptee, file_name, metadata)
667 : };
668 :
669 0 : let layer = adoptee
670 0 : .remote_client
671 0 : .copy_timeline_layer(adopted, &owned, cancel)
672 0 : .await
673 0 : .map(move |()| owned)
674 0 : .map_err(|e| Error::launder(e, Error::Prepare))?;
675 :
676 0 : Ok((layer, did_hardlink))
677 0 : }
678 :
679 : pub(crate) enum DetachingAndReparenting {
680 : /// All of the following timeline ids were reparented and the timeline ancestor detach must be
681 : /// marked as completed.
682 : Reparented(HashSet<TimelineId>),
683 :
684 : /// Some of the reparentings failed. The timeline ancestor detach must **not** be marked as
685 : /// completed.
686 : ///
687 : /// Nested `must_reset_tenant` is set to true when any restart requiring changes were made.
688 : SomeReparentingFailed { must_reset_tenant: bool },
689 :
690 : /// Detaching and reparentings were completed in a previous attempt. Timeline ancestor detach
691 : /// must be marked as completed.
692 : AlreadyDone(HashSet<TimelineId>),
693 : }
694 :
695 : impl DetachingAndReparenting {
696 0 : pub(crate) fn reset_tenant_required(&self) -> bool {
697 : use DetachingAndReparenting::*;
698 0 : match self {
699 0 : Reparented(_) => true,
700 0 : SomeReparentingFailed { must_reset_tenant } => *must_reset_tenant,
701 0 : AlreadyDone(_) => false,
702 : }
703 0 : }
704 :
705 0 : pub(crate) fn completed(self) -> Option<HashSet<TimelineId>> {
706 : use DetachingAndReparenting::*;
707 0 : match self {
708 0 : Reparented(x) | AlreadyDone(x) => Some(x),
709 0 : SomeReparentingFailed { .. } => None,
710 : }
711 0 : }
712 : }
713 :
714 : /// See [`Timeline::detach_from_ancestor_and_reparent`].
715 0 : pub(super) async fn detach_and_reparent(
716 0 : detached: &Arc<Timeline>,
717 0 : tenant: &Tenant,
718 0 : prepared: PreparedTimelineDetach,
719 0 : _ctx: &RequestContext,
720 0 : ) -> Result<DetachingAndReparenting, Error> {
721 0 : let PreparedTimelineDetach { layers } = prepared;
722 :
723 : #[derive(Debug)]
724 : enum Ancestor {
725 : NotDetached(Arc<Timeline>, Lsn),
726 : Detached(Arc<Timeline>, Lsn),
727 : }
728 :
729 0 : let (recorded_branchpoint, still_ongoing) = {
730 0 : let access = detached.remote_client.initialized_upload_queue()?;
731 0 : let latest = access.latest_uploaded_index_part();
732 0 :
733 0 : (
734 0 : latest.lineage.detached_previous_ancestor(),
735 0 : latest
736 0 : .gc_blocking
737 0 : .as_ref()
738 0 : .is_some_and(|b| b.blocked_by(DetachAncestor)),
739 0 : )
740 0 : };
741 0 : assert!(
742 0 : still_ongoing,
743 0 : "cannot (detach? reparent)? complete if the operation is not still ongoing"
744 : );
745 :
746 0 : let ancestor = match (detached.ancestor_timeline.as_ref(), recorded_branchpoint) {
747 0 : (Some(ancestor), None) => {
748 0 : assert!(
749 0 : !layers.is_empty(),
750 0 : "there should always be at least one layer to inherit"
751 : );
752 0 : Ancestor::NotDetached(ancestor.clone(), detached.ancestor_lsn)
753 : }
754 : (Some(_), Some(_)) => {
755 0 : panic!(
756 0 : "it should be impossible to get to here without having gone through the tenant reset; if the tenant was reset, then the ancestor_timeline would be None"
757 0 : );
758 : }
759 0 : (None, Some((ancestor_id, ancestor_lsn))) => {
760 0 : // it has been either:
761 0 : // - detached but still exists => we can try reparenting
762 0 : // - detached and deleted
763 0 : //
764 0 : // either way, we must complete
765 0 : assert!(
766 0 : layers.is_empty(),
767 0 : "no layers should had been copied as detach is done"
768 : );
769 :
770 0 : let existing = tenant.timelines.lock().unwrap().get(&ancestor_id).cloned();
771 :
772 0 : if let Some(ancestor) = existing {
773 0 : Ancestor::Detached(ancestor, ancestor_lsn)
774 : } else {
775 0 : let direct_children = reparented_direct_children(detached, tenant)?;
776 0 : return Ok(DetachingAndReparenting::AlreadyDone(direct_children));
777 : }
778 : }
779 : (None, None) => {
780 : // TODO: make sure there are no `?` before tenant_reset from after a questionmark from
781 : // here.
782 0 : panic!(
783 0 : "bug: detach_and_reparent called on a timeline which has not been detached or which has no live ancestor"
784 0 : );
785 : }
786 : };
787 :
788 : // publish the prepared layers before we reparent any of the timelines, so that on restart
789 : // reparented timelines find layers. also do the actual detaching.
790 : //
791 : // if we crash after this operation, a retry will allow reparenting the remaining timelines as
792 : // gc is blocked.
793 :
794 0 : let (ancestor, ancestor_lsn, was_detached) = match ancestor {
795 0 : Ancestor::NotDetached(ancestor, ancestor_lsn) => {
796 0 : // this has to complete before any reparentings because otherwise they would not have
797 0 : // layers on the new parent.
798 0 : detached
799 0 : .remote_client
800 0 : .schedule_adding_existing_layers_to_index_detach_and_wait(
801 0 : &layers,
802 0 : (ancestor.timeline_id, ancestor_lsn),
803 0 : )
804 0 : .await
805 0 : .context("publish layers and detach ancestor")
806 0 : .map_err(|e| Error::launder(e, Error::DetachReparent))?;
807 :
808 0 : tracing::info!(
809 0 : ancestor=%ancestor.timeline_id,
810 0 : %ancestor_lsn,
811 0 : inherited_layers=%layers.len(),
812 0 : "detached from ancestor"
813 : );
814 0 : (ancestor, ancestor_lsn, true)
815 : }
816 0 : Ancestor::Detached(ancestor, ancestor_lsn) => (ancestor, ancestor_lsn, false),
817 : };
818 :
819 0 : let mut tasks = tokio::task::JoinSet::new();
820 0 :
821 0 : // Returns a single permit semaphore which will be used to make one reparenting succeed,
822 0 : // others will fail as if those timelines had been stopped for whatever reason.
823 0 : #[cfg(feature = "testing")]
824 0 : let failpoint_sem = || -> Option<Arc<Semaphore>> {
825 0 : fail::fail_point!("timeline-detach-ancestor::allow_one_reparented", |_| Some(
826 0 : Arc::new(Semaphore::new(1))
827 0 : ));
828 0 : None
829 0 : }();
830 0 :
831 0 : // because we are now keeping the slot in progress, it is unlikely that there will be any
832 0 : // timeline deletions during this time. if we raced one, then we'll just ignore it.
833 0 : {
834 0 : let g = tenant.timelines.lock().unwrap();
835 0 : reparentable_timelines(g.values(), detached, &ancestor, ancestor_lsn)
836 0 : .cloned()
837 0 : .for_each(|timeline| {
838 : // important in this scope: we are holding the Tenant::timelines lock
839 0 : let span = tracing::info_span!("reparent", reparented=%timeline.timeline_id);
840 0 : let new_parent = detached.timeline_id;
841 0 : #[cfg(feature = "testing")]
842 0 : let failpoint_sem = failpoint_sem.clone();
843 0 :
844 0 : tasks.spawn(
845 0 : async move {
846 0 : let res = async {
847 : #[cfg(feature = "testing")]
848 0 : if let Some(failpoint_sem) = failpoint_sem {
849 0 : let _permit = failpoint_sem.acquire().await.map_err(|_| {
850 0 : anyhow::anyhow!(
851 0 : "failpoint: timeline-detach-ancestor::allow_one_reparented",
852 0 : )
853 0 : })?;
854 0 : failpoint_sem.close();
855 0 : }
856 :
857 0 : timeline
858 0 : .remote_client
859 0 : .schedule_reparenting_and_wait(&new_parent)
860 0 : .await
861 0 : }
862 0 : .await;
863 :
864 0 : match res {
865 : Ok(()) => {
866 0 : tracing::info!("reparented");
867 0 : Some(timeline)
868 : }
869 0 : Err(e) => {
870 0 : // with the use of tenant slot, raced timeline deletion is the most
871 0 : // likely reason.
872 0 : tracing::warn!("reparenting failed: {e:#}");
873 0 : None
874 : }
875 : }
876 0 : }
877 0 : .instrument(span),
878 0 : );
879 0 : });
880 0 : }
881 0 :
882 0 : let reparenting_candidates = tasks.len();
883 0 : let mut reparented = HashSet::with_capacity(tasks.len());
884 :
885 0 : while let Some(res) = tasks.join_next().await {
886 0 : match res {
887 0 : Ok(Some(timeline)) => {
888 0 : assert!(
889 0 : reparented.insert(timeline.timeline_id),
890 0 : "duplicate reparenting? timeline_id={}",
891 0 : timeline.timeline_id
892 : );
893 : }
894 0 : Err(je) if je.is_cancelled() => unreachable!("not used"),
895 : // just ignore failures now, we can retry
896 0 : Ok(None) => {}
897 0 : Err(je) if je.is_panic() => {}
898 0 : Err(je) => tracing::error!("unexpected join error: {je:?}"),
899 : }
900 : }
901 :
902 0 : let reparented_all = reparenting_candidates == reparented.len();
903 0 :
904 0 : if reparented_all {
905 0 : Ok(DetachingAndReparenting::Reparented(reparented))
906 : } else {
907 0 : tracing::info!(
908 0 : reparented = reparented.len(),
909 0 : candidates = reparenting_candidates,
910 0 : "failed to reparent all candidates; they can be retried after the tenant_reset",
911 : );
912 :
913 0 : let must_reset_tenant = !reparented.is_empty() || was_detached;
914 0 : Ok(DetachingAndReparenting::SomeReparentingFailed { must_reset_tenant })
915 : }
916 0 : }
917 :
918 0 : pub(super) async fn complete(
919 0 : detached: &Arc<Timeline>,
920 0 : tenant: &Tenant,
921 0 : mut attempt: Attempt,
922 0 : _ctx: &RequestContext,
923 0 : ) -> Result<(), Error> {
924 0 : assert_eq!(detached.timeline_id, attempt.timeline_id);
925 :
926 0 : if attempt.gate_entered.is_none() {
927 0 : let entered = detached.gate.enter().map_err(|_| Error::ShuttingDown)?;
928 0 : attempt.gate_entered = Some(entered);
929 0 : } else {
930 0 : // Some(gate_entered) means the tenant was not restarted, as is not required
931 0 : }
932 :
933 0 : assert!(detached.ancestor_timeline.is_none());
934 :
935 : // this should be an 503 at least...?
936 0 : fail::fail_point!(
937 0 : "timeline-detach-ancestor::complete_before_uploading",
938 0 : |_| Err(Error::Failpoint(
939 0 : "timeline-detach-ancestor::complete_before_uploading"
940 0 : ))
941 0 : );
942 :
943 0 : tenant
944 0 : .gc_block
945 0 : .remove(
946 0 : detached,
947 0 : crate::tenant::remote_timeline_client::index::GcBlockingReason::DetachAncestor,
948 0 : )
949 0 : .await
950 0 : .map_err(|e| Error::launder(e, Error::Complete))?;
951 :
952 0 : Ok(())
953 0 : }
954 :
955 : /// Query against a locked `Tenant::timelines`.
956 0 : fn reparentable_timelines<'a, I>(
957 0 : timelines: I,
958 0 : detached: &'a Arc<Timeline>,
959 0 : ancestor: &'a Arc<Timeline>,
960 0 : ancestor_lsn: Lsn,
961 0 : ) -> impl Iterator<Item = &'a Arc<Timeline>> + 'a
962 0 : where
963 0 : I: Iterator<Item = &'a Arc<Timeline>> + 'a,
964 0 : {
965 0 : timelines.filter_map(move |tl| {
966 0 : if Arc::ptr_eq(tl, detached) {
967 0 : return None;
968 0 : }
969 :
970 0 : let tl_ancestor = tl.ancestor_timeline.as_ref()?;
971 0 : let is_same = Arc::ptr_eq(ancestor, tl_ancestor);
972 0 : let is_earlier = tl.get_ancestor_lsn() <= ancestor_lsn;
973 0 :
974 0 : let is_deleting = tl
975 0 : .delete_progress
976 0 : .try_lock()
977 0 : .map(|flow| !flow.is_not_started())
978 0 : .unwrap_or(true);
979 0 :
980 0 : if is_same && is_earlier && !is_deleting {
981 0 : Some(tl)
982 : } else {
983 0 : None
984 : }
985 0 : })
986 0 : }
987 :
988 0 : fn check_no_archived_children_of_ancestor(
989 0 : tenant: &Tenant,
990 0 : detached: &Arc<Timeline>,
991 0 : ancestor: &Arc<Timeline>,
992 0 : ancestor_lsn: Lsn,
993 0 : ) -> Result<(), Error> {
994 0 : let timelines = tenant.timelines.lock().unwrap();
995 0 : let timelines_offloaded = tenant.timelines_offloaded.lock().unwrap();
996 0 : for timeline in reparentable_timelines(timelines.values(), detached, ancestor, ancestor_lsn) {
997 0 : if timeline.is_archived() == Some(true) {
998 0 : return Err(Error::Archived(timeline.timeline_id));
999 0 : }
1000 : }
1001 0 : for timeline_offloaded in timelines_offloaded.values() {
1002 0 : if timeline_offloaded.ancestor_timeline_id != Some(ancestor.timeline_id) {
1003 0 : continue;
1004 0 : }
1005 : // This forbids the detach ancestor feature if flattened timelines are present,
1006 : // even if the ancestor_lsn is from after the branchpoint of the detached timeline.
1007 : // But as per current design, we don't record the ancestor_lsn of flattened timelines.
1008 : // This is a bit unfortunate, but as of writing this we don't support flattening
1009 : // anyway. Maybe we can evolve the data model in the future.
1010 0 : if let Some(retain_lsn) = timeline_offloaded.ancestor_retain_lsn {
1011 0 : let is_earlier = retain_lsn <= ancestor_lsn;
1012 0 : if !is_earlier {
1013 0 : continue;
1014 0 : }
1015 0 : }
1016 0 : return Err(Error::Archived(timeline_offloaded.timeline_id));
1017 : }
1018 0 : Ok(())
1019 0 : }
1020 :
1021 0 : async fn fsync_timeline_dir(timeline: &Timeline, ctx: &RequestContext) {
1022 0 : let path = &timeline
1023 0 : .conf
1024 0 : .timeline_path(&timeline.tenant_shard_id, &timeline.timeline_id);
1025 0 : let timeline_dir = VirtualFile::open(&path, ctx)
1026 0 : .await
1027 0 : .fatal_err("VirtualFile::open for timeline dir fsync");
1028 0 : timeline_dir
1029 0 : .sync_all()
1030 0 : .await
1031 0 : .fatal_err("VirtualFile::sync_all timeline dir");
1032 0 : }
|