Line data Source code
1 : mod deleter;
2 : mod list_writer;
3 : mod validator;
4 :
5 : use std::collections::HashMap;
6 : use std::sync::Arc;
7 : use std::time::Duration;
8 :
9 : use crate::control_plane_client::ControlPlaneGenerationsApi;
10 : use crate::metrics;
11 : use crate::tenant::remote_timeline_client::remote_layer_path;
12 : use crate::tenant::remote_timeline_client::remote_timeline_path;
13 : use crate::tenant::remote_timeline_client::LayerFileMetadata;
14 : use crate::virtual_file::MaybeFatalIo;
15 : use crate::virtual_file::VirtualFile;
16 : use anyhow::Context;
17 : use camino::Utf8PathBuf;
18 : use pageserver_api::shard::TenantShardId;
19 : use remote_storage::{GenericRemoteStorage, RemotePath};
20 : use serde::Deserialize;
21 : use serde::Serialize;
22 : use thiserror::Error;
23 : use tokio_util::sync::CancellationToken;
24 : use tracing::Instrument;
25 : use tracing::{debug, error};
26 : use utils::crashsafe::path_with_suffix_extension;
27 : use utils::generation::Generation;
28 : use utils::id::TimelineId;
29 : use utils::lsn::AtomicLsn;
30 : use utils::lsn::Lsn;
31 :
32 : use self::deleter::Deleter;
33 : use self::list_writer::DeletionOp;
34 : use self::list_writer::ListWriter;
35 : use self::list_writer::RecoverOp;
36 : use self::validator::Validator;
37 : use deleter::DeleterMessage;
38 : use list_writer::ListWriterQueueMessage;
39 : use validator::ValidatorQueueMessage;
40 :
41 : use crate::{config::PageServerConf, tenant::storage_layer::LayerName};
42 :
43 : // TODO: configurable for how long to wait before executing deletions
44 :
45 : /// We aggregate object deletions from many tenants in one place, for several reasons:
46 : /// - Coalesce deletions into fewer DeleteObjects calls
47 : /// - Enable Tenant/Timeline lifetimes to be shorter than the time it takes
48 : /// to flush any outstanding deletions.
49 : /// - Globally control throughput of deletions, as these are a low priority task: do
50 : /// not compete with the same S3 clients/connections used for higher priority uploads.
51 : /// - Enable gating deletions on validation of a tenant's generation number, to make
52 : /// it safe to multi-attach tenants (see docs/rfcs/025-generation-numbers.md)
53 : ///
54 : /// There are two kinds of deletion: deferred and immediate. A deferred deletion
55 : /// may be intentionally delayed to protect passive readers of S3 data, and is
56 : /// subject to a generation number validation step. An immediate deletion is
57 : /// ready to execute immediately, and is only queued up so that it can be coalesced
58 : /// with other deletions in flight.
59 : ///
60 : /// Deferred deletions pass through three steps:
61 : /// - ListWriter: accumulate deletion requests from Timelines, and batch them up into
62 : /// DeletionLists, which are persisted to disk.
63 : /// - Validator: accumulate deletion lists, and validate them en-masse prior to passing
64 : /// the keys in the list onward for actual deletion. Also validate remote_consistent_lsn
65 : /// updates for running timelines.
66 : /// - Deleter: accumulate object keys that the validator has validated, and execute them in
67 : /// batches of 1000 keys via DeleteObjects.
68 : ///
69 : /// Non-deferred deletions, such as during timeline deletion, bypass the first
70 : /// two stages and are passed straight into the Deleter.
71 : ///
72 : /// Internally, each stage is joined by a channel to the next. On disk, there is only
73 : /// one queue (of DeletionLists), which is written by the frontend and consumed
74 : /// by the backend.
75 : #[derive(Clone)]
76 : pub struct DeletionQueue {
77 : client: DeletionQueueClient,
78 :
79 : // Parent cancellation token for the tokens passed into background workers
80 : cancel: CancellationToken,
81 : }
82 :
83 : /// Opaque wrapper around individual worker tasks, to avoid making the
84 : /// worker objects themselves public
85 : pub struct DeletionQueueWorkers<C>
86 : where
87 : C: ControlPlaneGenerationsApi + Send + Sync,
88 : {
89 : frontend: ListWriter,
90 : backend: Validator<C>,
91 : executor: Deleter,
92 : }
93 :
94 : impl<C> DeletionQueueWorkers<C>
95 : where
96 : C: ControlPlaneGenerationsApi + Send + Sync + 'static,
97 : {
98 8 : pub fn spawn_with(mut self, runtime: &tokio::runtime::Handle) -> tokio::task::JoinHandle<()> {
99 8 : let jh_frontend = runtime.spawn(async move {
100 8 : self.frontend
101 8 : .background()
102 8 : .instrument(tracing::info_span!(parent:None, "deletion frontend"))
103 52 : .await
104 8 : });
105 8 : let jh_backend = runtime.spawn(async move {
106 8 : self.backend
107 8 : .background()
108 8 : .instrument(tracing::info_span!(parent:None, "deletion backend"))
109 58 : .await
110 8 : });
111 8 : let jh_executor = runtime.spawn(async move {
112 8 : self.executor
113 8 : .background()
114 8 : .instrument(tracing::info_span!(parent:None, "deletion executor"))
115 28 : .await
116 8 : });
117 8 :
118 8 : runtime.spawn({
119 8 : async move {
120 8 : jh_frontend.await.expect("error joining frontend worker");
121 2 : jh_backend.await.expect("error joining backend worker");
122 2 : drop(jh_executor.await.expect("error joining executor worker"));
123 8 : }
124 8 : })
125 8 : }
126 : }
127 :
128 : /// A FlushOp is just a oneshot channel, where we send the transmit side down
129 : /// another channel, and the receive side will receive a message when the channel
130 : /// we're flushing has reached the FlushOp we sent into it.
131 : ///
132 : /// The only extra behavior beyond the channel is that the notify() method does not
133 : /// return an error when the receive side has been dropped, because in this use case
134 : /// it is harmless (the code that initiated the flush no longer cares about the result).
135 : #[derive(Debug)]
136 : struct FlushOp {
137 : tx: tokio::sync::oneshot::Sender<()>,
138 : }
139 :
140 : impl FlushOp {
141 42 : fn new() -> (Self, tokio::sync::oneshot::Receiver<()>) {
142 42 : let (tx, rx) = tokio::sync::oneshot::channel::<()>();
143 42 : (Self { tx }, rx)
144 42 : }
145 :
146 42 : fn notify(self) {
147 42 : if self.tx.send(()).is_err() {
148 : // oneshot channel closed. This is legal: a client could be destroyed while waiting for a flush.
149 0 : debug!("deletion queue flush from dropped client");
150 42 : };
151 42 : }
152 : }
153 :
154 : #[derive(Clone, Debug)]
155 : pub struct DeletionQueueClient {
156 : tx: tokio::sync::mpsc::UnboundedSender<ListWriterQueueMessage>,
157 : executor_tx: tokio::sync::mpsc::Sender<DeleterMessage>,
158 :
159 : lsn_table: Arc<std::sync::RwLock<VisibleLsnUpdates>>,
160 : }
161 :
162 18 : #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
163 : struct TenantDeletionList {
164 : /// For each Timeline, a list of key fragments to append to the timeline remote path
165 : /// when reconstructing a full key
166 : timelines: HashMap<TimelineId, Vec<String>>,
167 :
168 : /// The generation in which this deletion was emitted: note that this may not be the
169 : /// same as the generation of any layers being deleted. The generation of the layer
170 : /// has already been absorbed into the keys in `objects`
171 : generation: Generation,
172 : }
173 :
174 : impl TenantDeletionList {
175 10 : pub(crate) fn len(&self) -> usize {
176 10 : self.timelines.values().map(|v| v.len()).sum()
177 10 : }
178 : }
179 :
180 : /// Files ending with this suffix will be ignored and erased
181 : /// during recovery as startup.
182 : const TEMP_SUFFIX: &str = "tmp";
183 :
184 30 : #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
185 : struct DeletionList {
186 : /// Serialization version, for future use
187 : version: u8,
188 :
189 : /// Used for constructing a unique key for each deletion list we write out.
190 : sequence: u64,
191 :
192 : /// To avoid repeating tenant/timeline IDs in every key, we store keys in
193 : /// nested HashMaps by TenantTimelineID. Each Tenant only appears once
194 : /// with one unique generation ID: if someone tries to push a second generation
195 : /// ID for the same tenant, we will start a new DeletionList.
196 : tenants: HashMap<TenantShardId, TenantDeletionList>,
197 :
198 : /// Avoid having to walk `tenants` to calculate the number of keys in
199 : /// the nested deletion lists
200 : size: usize,
201 :
202 : /// Set to true when the list has undergone validation with the control
203 : /// plane and the remaining contents of `tenants` are valid. A list may
204 : /// also be implicitly marked valid by DeletionHeader.validated_sequence
205 : /// advancing to >= DeletionList.sequence
206 : #[serde(default)]
207 : #[serde(skip_serializing_if = "std::ops::Not::not")]
208 : validated: bool,
209 : }
210 :
211 0 : #[derive(Debug, Serialize, Deserialize)]
212 : struct DeletionHeader {
213 : /// Serialization version, for future use
214 : version: u8,
215 :
216 : /// The highest sequence number (inclusive) that has been validated. All deletion
217 : /// lists on disk with a sequence <= this value are safe to execute.
218 : validated_sequence: u64,
219 : }
220 :
221 : impl DeletionHeader {
222 : const VERSION_LATEST: u8 = 1;
223 :
224 8 : fn new(validated_sequence: u64) -> Self {
225 8 : Self {
226 8 : version: Self::VERSION_LATEST,
227 8 : validated_sequence,
228 8 : }
229 8 : }
230 :
231 8 : async fn save(&self, conf: &'static PageServerConf) -> anyhow::Result<()> {
232 8 : debug!("Saving deletion list header {:?}", self);
233 8 : let header_bytes = serde_json::to_vec(self).context("serialize deletion header")?;
234 8 : let header_path = conf.deletion_header_path();
235 8 : let temp_path = path_with_suffix_extension(&header_path, TEMP_SUFFIX);
236 8 : VirtualFile::crashsafe_overwrite(header_path, temp_path, header_bytes)
237 8 : .await
238 8 : .maybe_fatal_err("save deletion header")?;
239 :
240 8 : Ok(())
241 8 : }
242 : }
243 :
244 : impl DeletionList {
245 : const VERSION_LATEST: u8 = 1;
246 20 : fn new(sequence: u64) -> Self {
247 20 : Self {
248 20 : version: Self::VERSION_LATEST,
249 20 : sequence,
250 20 : tenants: HashMap::new(),
251 20 : size: 0,
252 20 : validated: false,
253 20 : }
254 20 : }
255 :
256 28 : fn is_empty(&self) -> bool {
257 28 : self.tenants.is_empty()
258 28 : }
259 :
260 60 : fn len(&self) -> usize {
261 60 : self.size
262 60 : }
263 :
264 : /// Returns true if the push was accepted, false if the caller must start a new
265 : /// deletion list.
266 14 : fn push(
267 14 : &mut self,
268 14 : tenant: &TenantShardId,
269 14 : timeline: &TimelineId,
270 14 : generation: Generation,
271 14 : objects: &mut Vec<RemotePath>,
272 14 : ) -> bool {
273 14 : if objects.is_empty() {
274 : // Avoid inserting an empty TimelineDeletionList: this preserves the property
275 : // that if we have no keys, then self.objects is empty (used in Self::is_empty)
276 0 : return true;
277 14 : }
278 14 :
279 14 : let tenant_entry = self
280 14 : .tenants
281 14 : .entry(*tenant)
282 14 : .or_insert_with(|| TenantDeletionList {
283 12 : timelines: HashMap::new(),
284 12 : generation,
285 14 : });
286 14 :
287 14 : if tenant_entry.generation != generation {
288 : // Only one generation per tenant per list: signal to
289 : // caller to start a new list.
290 2 : return false;
291 12 : }
292 12 :
293 12 : let timeline_entry = tenant_entry.timelines.entry(*timeline).or_default();
294 12 :
295 12 : let timeline_remote_path = remote_timeline_path(tenant, timeline);
296 12 :
297 12 : self.size += objects.len();
298 12 : timeline_entry.extend(objects.drain(..).map(|p| {
299 12 : p.strip_prefix(&timeline_remote_path)
300 12 : .expect("Timeline paths always start with the timeline prefix")
301 12 : .to_string()
302 12 : }));
303 12 : true
304 14 : }
305 :
306 10 : fn into_remote_paths(self) -> Vec<RemotePath> {
307 10 : let mut result = Vec::new();
308 10 : for (tenant, tenant_deletions) in self.tenants.into_iter() {
309 6 : for (timeline, timeline_layers) in tenant_deletions.timelines.into_iter() {
310 6 : let timeline_remote_path = remote_timeline_path(&tenant, &timeline);
311 6 : result.extend(
312 6 : timeline_layers
313 6 : .into_iter()
314 6 : .map(|l| timeline_remote_path.join(&Utf8PathBuf::from(l))),
315 6 : );
316 6 : }
317 : }
318 :
319 10 : result
320 10 : }
321 :
322 14 : async fn save(&self, conf: &'static PageServerConf) -> anyhow::Result<()> {
323 14 : let path = conf.deletion_list_path(self.sequence);
324 14 : let temp_path = path_with_suffix_extension(&path, TEMP_SUFFIX);
325 14 :
326 14 : let bytes = serde_json::to_vec(self).expect("Failed to serialize deletion list");
327 14 :
328 14 : VirtualFile::crashsafe_overwrite(path, temp_path, bytes)
329 14 : .await
330 14 : .maybe_fatal_err("save deletion list")
331 14 : .map_err(Into::into)
332 14 : }
333 : }
334 :
335 : impl std::fmt::Display for DeletionList {
336 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
337 0 : write!(
338 0 : f,
339 0 : "DeletionList<seq={}, tenants={}, keys={}>",
340 0 : self.sequence,
341 0 : self.tenants.len(),
342 0 : self.size
343 0 : )
344 0 : }
345 : }
346 :
347 : struct PendingLsn {
348 : projected: Lsn,
349 : result_slot: Arc<AtomicLsn>,
350 : }
351 :
352 : struct TenantLsnState {
353 : timelines: HashMap<TimelineId, PendingLsn>,
354 :
355 : // In what generation was the most recent update proposed?
356 : generation: Generation,
357 : }
358 :
359 : #[derive(Default)]
360 : struct VisibleLsnUpdates {
361 : tenants: HashMap<TenantShardId, TenantLsnState>,
362 : }
363 :
364 : impl VisibleLsnUpdates {
365 126 : fn new() -> Self {
366 126 : Self {
367 126 : tenants: HashMap::new(),
368 126 : }
369 126 : }
370 : }
371 :
372 : impl std::fmt::Debug for VisibleLsnUpdates {
373 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
374 0 : write!(f, "VisibleLsnUpdates({} tenants)", self.tenants.len())
375 0 : }
376 : }
377 :
378 0 : #[derive(Error, Debug)]
379 : pub enum DeletionQueueError {
380 : #[error("Deletion queue unavailable during shutdown")]
381 : ShuttingDown,
382 : }
383 :
384 : impl DeletionQueueClient {
385 0 : pub(crate) fn broken() -> Self {
386 0 : // Channels whose receivers are immediately dropped.
387 0 : let (tx, _rx) = tokio::sync::mpsc::unbounded_channel();
388 0 : let (executor_tx, _executor_rx) = tokio::sync::mpsc::channel(1);
389 0 : Self {
390 0 : tx,
391 0 : executor_tx,
392 0 : lsn_table: Arc::default(),
393 0 : }
394 0 : }
395 :
396 : /// This is cancel-safe. If you drop the future before it completes, the message
397 : /// is not pushed, although in the context of the deletion queue it doesn't matter: once
398 : /// we decide to do a deletion the decision is always final.
399 212 : fn do_push<T>(
400 212 : &self,
401 212 : queue: &tokio::sync::mpsc::UnboundedSender<T>,
402 212 : msg: T,
403 212 : ) -> Result<(), DeletionQueueError> {
404 212 : match queue.send(msg) {
405 212 : Ok(_) => Ok(()),
406 0 : Err(e) => {
407 0 : // This shouldn't happen, we should shut down all tenants before
408 0 : // we shut down the global delete queue. If we encounter a bug like this,
409 0 : // we may leak objects as deletions won't be processed.
410 0 : error!("Deletion queue closed while pushing, shutting down? ({e})");
411 0 : Err(DeletionQueueError::ShuttingDown)
412 : }
413 : }
414 212 : }
415 :
416 8 : pub(crate) fn recover(
417 8 : &self,
418 8 : attached_tenants: HashMap<TenantShardId, Generation>,
419 8 : ) -> Result<(), DeletionQueueError> {
420 8 : self.do_push(
421 8 : &self.tx,
422 8 : ListWriterQueueMessage::Recover(RecoverOp { attached_tenants }),
423 8 : )
424 8 : }
425 :
426 : /// When a Timeline wishes to update the remote_consistent_lsn that it exposes to the outside
427 : /// world, it must validate its generation number before doing so. Rather than do this synchronously,
428 : /// we allow the timeline to publish updates at will via this API, and then read back what LSN was most
429 : /// recently validated separately.
430 : ///
431 : /// In this function we publish the LSN to the `projected` field of the timeline's entry in the VisibleLsnUpdates. The
432 : /// backend will later wake up and notice that the tenant's generation requires validation.
433 1144 : pub(crate) async fn update_remote_consistent_lsn(
434 1144 : &self,
435 1144 : tenant_shard_id: TenantShardId,
436 1144 : timeline_id: TimelineId,
437 1144 : current_generation: Generation,
438 1144 : lsn: Lsn,
439 1144 : result_slot: Arc<AtomicLsn>,
440 1144 : ) {
441 1144 : let mut locked = self
442 1144 : .lsn_table
443 1144 : .write()
444 1144 : .expect("Lock should never be poisoned");
445 1144 :
446 1144 : let tenant_entry = locked
447 1144 : .tenants
448 1144 : .entry(tenant_shard_id)
449 1144 : .or_insert(TenantLsnState {
450 1144 : timelines: HashMap::new(),
451 1144 : generation: current_generation,
452 1144 : });
453 1144 :
454 1144 : if tenant_entry.generation != current_generation {
455 0 : // Generation might have changed if we were detached and then re-attached: in this case,
456 0 : // state from the previous generation cannot be trusted.
457 0 : tenant_entry.timelines.clear();
458 0 : tenant_entry.generation = current_generation;
459 1144 : }
460 :
461 1144 : tenant_entry.timelines.insert(
462 1144 : timeline_id,
463 1144 : PendingLsn {
464 1144 : projected: lsn,
465 1144 : result_slot,
466 1144 : },
467 1144 : );
468 1144 : }
469 :
470 : /// Submit a list of layers for deletion: this function will return before the deletion is
471 : /// persistent, but it may be executed at any time after this function enters: do not push
472 : /// layers until you're sure they can be deleted safely (i.e. remote metadata no longer
473 : /// references them).
474 : ///
475 : /// The `current_generation` is the generation of this pageserver's current attachment. The
476 : /// generations in `layers` are the generations in which those layers were written.
477 180 : pub(crate) async fn push_layers(
478 180 : &self,
479 180 : tenant_shard_id: TenantShardId,
480 180 : timeline_id: TimelineId,
481 180 : current_generation: Generation,
482 180 : layers: Vec<(LayerName, LayerFileMetadata)>,
483 180 : ) -> Result<(), DeletionQueueError> {
484 180 : if current_generation.is_none() {
485 0 : debug!("Enqueuing deletions in legacy mode, skipping queue");
486 :
487 0 : let mut layer_paths = Vec::new();
488 0 : for (layer, meta) in layers {
489 0 : layer_paths.push(remote_layer_path(
490 0 : &tenant_shard_id.tenant_id,
491 0 : &timeline_id,
492 0 : meta.shard,
493 0 : &layer,
494 0 : meta.generation,
495 0 : ));
496 0 : }
497 0 : self.push_immediate(layer_paths).await?;
498 0 : return self.flush_immediate().await;
499 180 : }
500 180 :
501 180 : self.push_layers_sync(tenant_shard_id, timeline_id, current_generation, layers)
502 180 : }
503 :
504 : /// When a Tenant has a generation, push_layers is always synchronous because
505 : /// the ListValidator channel is an unbounded channel.
506 : ///
507 : /// This can be merged into push_layers when we remove the Generation-less mode
508 : /// support (`<https://github.com/neondatabase/neon/issues/5395>`)
509 180 : pub(crate) fn push_layers_sync(
510 180 : &self,
511 180 : tenant_shard_id: TenantShardId,
512 180 : timeline_id: TimelineId,
513 180 : current_generation: Generation,
514 180 : layers: Vec<(LayerName, LayerFileMetadata)>,
515 180 : ) -> Result<(), DeletionQueueError> {
516 180 : metrics::DELETION_QUEUE
517 180 : .keys_submitted
518 180 : .inc_by(layers.len() as u64);
519 180 : self.do_push(
520 180 : &self.tx,
521 180 : ListWriterQueueMessage::Delete(DeletionOp {
522 180 : tenant_shard_id,
523 180 : timeline_id,
524 180 : layers,
525 180 : generation: current_generation,
526 180 : objects: Vec::new(),
527 180 : }),
528 180 : )
529 180 : }
530 :
531 : /// This is cancel-safe. If you drop the future the flush may still happen in the background.
532 24 : async fn do_flush<T>(
533 24 : &self,
534 24 : queue: &tokio::sync::mpsc::UnboundedSender<T>,
535 24 : msg: T,
536 24 : rx: tokio::sync::oneshot::Receiver<()>,
537 24 : ) -> Result<(), DeletionQueueError> {
538 24 : self.do_push(queue, msg)?;
539 24 : if rx.await.is_err() {
540 : // This shouldn't happen if tenants are shut down before deletion queue. If we
541 : // encounter a bug like this, then a flusher will incorrectly believe it has flushed
542 : // when it hasn't, possibly leading to leaking objects.
543 0 : error!("Deletion queue dropped flush op while client was still waiting");
544 0 : Err(DeletionQueueError::ShuttingDown)
545 : } else {
546 24 : Ok(())
547 : }
548 24 : }
549 :
550 : /// Wait until all previous deletions are persistent (either executed, or written to a DeletionList)
551 : ///
552 : /// This is cancel-safe. If you drop the future the flush may still happen in the background.
553 14 : pub async fn flush(&self) -> Result<(), DeletionQueueError> {
554 14 : let (flush_op, rx) = FlushOp::new();
555 14 : self.do_flush(&self.tx, ListWriterQueueMessage::Flush(flush_op), rx)
556 14 : .await
557 14 : }
558 :
559 : /// Issue a flush without waiting for it to complete. This is useful on advisory flushes where
560 : /// the caller wants to avoid the risk of waiting for lots of enqueued work, such as on tenant
561 : /// detach where flushing is nice but not necessary.
562 : ///
563 : /// This function provides no guarantees of work being done.
564 0 : pub fn flush_advisory(&self) {
565 0 : let (flush_op, _) = FlushOp::new();
566 0 :
567 0 : // Transmit the flush message, ignoring any result (such as a closed channel during shutdown).
568 0 : drop(self.tx.send(ListWriterQueueMessage::FlushExecute(flush_op)));
569 0 : }
570 :
571 : // Wait until all previous deletions are executed
572 10 : pub(crate) async fn flush_execute(&self) -> Result<(), DeletionQueueError> {
573 10 : debug!("flush_execute: flushing to deletion lists...");
574 : // Flush any buffered work to deletion lists
575 10 : self.flush().await?;
576 :
577 : // Flush the backend into the executor of deletion lists
578 10 : let (flush_op, rx) = FlushOp::new();
579 10 : debug!("flush_execute: flushing backend...");
580 10 : self.do_flush(&self.tx, ListWriterQueueMessage::FlushExecute(flush_op), rx)
581 10 : .await?;
582 10 : debug!("flush_execute: finished flushing backend...");
583 :
584 : // Flush any immediate-mode deletions (the above backend flush will only flush
585 : // the executor if deletions had flowed through the backend)
586 10 : debug!("flush_execute: flushing execution...");
587 10 : self.flush_immediate().await?;
588 10 : debug!("flush_execute: finished flushing execution...");
589 10 : Ok(())
590 10 : }
591 :
592 : /// This interface bypasses the persistent deletion queue, and any validation
593 : /// that this pageserver is still elegible to execute the deletions. It is for
594 : /// use in timeline deletions, where the control plane is telling us we may
595 : /// delete everything in the timeline.
596 : ///
597 : /// DO NOT USE THIS FROM GC OR COMPACTION CODE. Use the regular `push_layers`.
598 0 : pub(crate) async fn push_immediate(
599 0 : &self,
600 0 : objects: Vec<RemotePath>,
601 0 : ) -> Result<(), DeletionQueueError> {
602 0 : metrics::DELETION_QUEUE
603 0 : .keys_submitted
604 0 : .inc_by(objects.len() as u64);
605 0 : self.executor_tx
606 0 : .send(DeleterMessage::Delete(objects))
607 0 : .await
608 0 : .map_err(|_| DeletionQueueError::ShuttingDown)
609 0 : }
610 :
611 : /// Companion to push_immediate. When this returns Ok, all prior objects sent
612 : /// into push_immediate have been deleted from remote storage.
613 10 : pub(crate) async fn flush_immediate(&self) -> Result<(), DeletionQueueError> {
614 10 : let (flush_op, rx) = FlushOp::new();
615 10 : self.executor_tx
616 10 : .send(DeleterMessage::Flush(flush_op))
617 0 : .await
618 10 : .map_err(|_| DeletionQueueError::ShuttingDown)?;
619 :
620 10 : rx.await.map_err(|_| DeletionQueueError::ShuttingDown)
621 10 : }
622 : }
623 :
624 : impl DeletionQueue {
625 8 : pub fn new_client(&self) -> DeletionQueueClient {
626 8 : self.client.clone()
627 8 : }
628 :
629 : /// Caller may use the returned object to construct clients with new_client.
630 : /// Caller should tokio::spawn the background() members of the two worker objects returned:
631 : /// we don't spawn those inside new() so that the caller can use their runtime/spans of choice.
632 : ///
633 : /// If remote_storage is None, then the returned workers will also be None.
634 8 : pub fn new<C>(
635 8 : remote_storage: Option<GenericRemoteStorage>,
636 8 : control_plane_client: Option<C>,
637 8 : conf: &'static PageServerConf,
638 8 : ) -> (Self, Option<DeletionQueueWorkers<C>>)
639 8 : where
640 8 : C: ControlPlaneGenerationsApi + Send + Sync,
641 8 : {
642 8 : // Unbounded channel: enables non-async functions to submit deletions. The actual length is
643 8 : // constrained by how promptly the ListWriter wakes up and drains it, which should be frequent
644 8 : // enough to avoid this taking pathologically large amount of memory.
645 8 : let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
646 8 :
647 8 : // Shallow channel: it carries DeletionLists which each contain up to thousands of deletions
648 8 : let (backend_tx, backend_rx) = tokio::sync::mpsc::channel(16);
649 8 :
650 8 : // Shallow channel: it carries lists of paths, and we expect the main queueing to
651 8 : // happen in the backend (persistent), not in this queue.
652 8 : let (executor_tx, executor_rx) = tokio::sync::mpsc::channel(16);
653 8 :
654 8 : let lsn_table = Arc::new(std::sync::RwLock::new(VisibleLsnUpdates::new()));
655 8 :
656 8 : // The deletion queue has an independent cancellation token to
657 8 : // the general pageserver shutdown token, because it stays alive a bit
658 8 : // longer to flush after Tenants have all been torn down.
659 8 : let cancel = CancellationToken::new();
660 :
661 8 : let remote_storage = match remote_storage {
662 : None => {
663 0 : return (
664 0 : Self {
665 0 : client: DeletionQueueClient {
666 0 : tx,
667 0 : executor_tx,
668 0 : lsn_table: lsn_table.clone(),
669 0 : },
670 0 : cancel,
671 0 : },
672 0 : None,
673 0 : )
674 : }
675 8 : Some(r) => r,
676 8 : };
677 8 :
678 8 : (
679 8 : Self {
680 8 : client: DeletionQueueClient {
681 8 : tx,
682 8 : executor_tx: executor_tx.clone(),
683 8 : lsn_table: lsn_table.clone(),
684 8 : },
685 8 : cancel: cancel.clone(),
686 8 : },
687 8 : Some(DeletionQueueWorkers {
688 8 : frontend: ListWriter::new(conf, rx, backend_tx, cancel.clone()),
689 8 : backend: Validator::new(
690 8 : conf,
691 8 : backend_rx,
692 8 : executor_tx,
693 8 : control_plane_client,
694 8 : lsn_table.clone(),
695 8 : cancel.clone(),
696 8 : ),
697 8 : executor: Deleter::new(remote_storage, executor_rx, cancel.clone()),
698 8 : }),
699 8 : )
700 8 : }
701 :
702 0 : pub async fn shutdown(&mut self, timeout: Duration) {
703 0 : match tokio::time::timeout(timeout, self.client.flush()).await {
704 : Ok(Ok(())) => {
705 0 : tracing::info!("Deletion queue flushed successfully on shutdown")
706 : }
707 : Ok(Err(DeletionQueueError::ShuttingDown)) => {
708 : // This is not harmful for correctness, but is unexpected: the deletion
709 : // queue's workers should stay alive as long as there are any client handles instantiated.
710 0 : tracing::warn!("Deletion queue stopped prematurely");
711 : }
712 0 : Err(_timeout) => {
713 0 : tracing::warn!("Timed out flushing deletion queue on shutdown")
714 : }
715 : }
716 :
717 : // We only cancel _after_ flushing: otherwise we would be shutting down the
718 : // components that do the flush.
719 0 : self.cancel.cancel();
720 0 : }
721 : }
722 :
723 : #[cfg(test)]
724 : mod test {
725 : use camino::Utf8Path;
726 : use hex_literal::hex;
727 : use pageserver_api::{shard::ShardIndex, upcall_api::ReAttachResponseTenant};
728 : use std::{io::ErrorKind, time::Duration};
729 : use tracing::info;
730 :
731 : use remote_storage::{RemoteStorageConfig, RemoteStorageKind};
732 : use tokio::task::JoinHandle;
733 :
734 : use crate::{
735 : control_plane_client::RetryForeverError,
736 : repository::Key,
737 : tenant::{harness::TenantHarness, storage_layer::DeltaLayerName},
738 : };
739 :
740 : use super::*;
741 : pub const TIMELINE_ID: TimelineId =
742 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
743 :
744 : pub const EXAMPLE_LAYER_NAME: LayerName = LayerName::Delta(DeltaLayerName {
745 : key_range: Key::from_i128(0x0)..Key::from_i128(0xFFFFFFFFFFFFFFFF),
746 : lsn_range: Lsn(0x00000000016B59D8)..Lsn(0x00000000016B5A51),
747 : });
748 :
749 : // When you need a second layer in a test.
750 : pub const EXAMPLE_LAYER_NAME_ALT: LayerName = LayerName::Delta(DeltaLayerName {
751 : key_range: Key::from_i128(0x0)..Key::from_i128(0xFFFFFFFFFFFFFFFF),
752 : lsn_range: Lsn(0x00000000016B5A51)..Lsn(0x00000000016B5A61),
753 : });
754 :
755 : struct TestSetup {
756 : harness: TenantHarness,
757 : remote_fs_dir: Utf8PathBuf,
758 : storage: GenericRemoteStorage,
759 : mock_control_plane: MockControlPlane,
760 : deletion_queue: DeletionQueue,
761 : worker_join: JoinHandle<()>,
762 : }
763 :
764 : impl TestSetup {
765 : /// Simulate a pageserver restart by destroying and recreating the deletion queue
766 2 : async fn restart(&mut self) {
767 2 : let (deletion_queue, workers) = DeletionQueue::new(
768 2 : Some(self.storage.clone()),
769 2 : Some(self.mock_control_plane.clone()),
770 2 : self.harness.conf,
771 2 : );
772 2 :
773 2 : tracing::debug!("Spawning worker for new queue queue");
774 2 : let worker_join = workers
775 2 : .unwrap()
776 2 : .spawn_with(&tokio::runtime::Handle::current());
777 2 :
778 2 : let old_worker_join = std::mem::replace(&mut self.worker_join, worker_join);
779 2 : let old_deletion_queue = std::mem::replace(&mut self.deletion_queue, deletion_queue);
780 2 :
781 2 : tracing::debug!("Joining worker from previous queue");
782 2 : old_deletion_queue.cancel.cancel();
783 2 : old_worker_join
784 2 : .await
785 2 : .expect("Failed to join workers for previous deletion queue");
786 2 : }
787 :
788 6 : fn set_latest_generation(&self, gen: Generation) {
789 6 : let tenant_shard_id = self.harness.tenant_shard_id;
790 6 : self.mock_control_plane
791 6 : .latest_generation
792 6 : .lock()
793 6 : .unwrap()
794 6 : .insert(tenant_shard_id, gen);
795 6 : }
796 :
797 : /// Returns remote layer file name, suitable for use in assert_remote_files
798 6 : fn write_remote_layer(
799 6 : &self,
800 6 : file_name: LayerName,
801 6 : gen: Generation,
802 6 : ) -> anyhow::Result<String> {
803 6 : let tenant_shard_id = self.harness.tenant_shard_id;
804 6 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
805 6 : let remote_timeline_path = self.remote_fs_dir.join(relative_remote_path.get_path());
806 6 : std::fs::create_dir_all(&remote_timeline_path)?;
807 6 : let remote_layer_file_name = format!("{}{}", file_name, gen.get_suffix());
808 6 :
809 6 : let content: Vec<u8> = format!("placeholder contents of {file_name}").into();
810 6 :
811 6 : std::fs::write(
812 6 : remote_timeline_path.join(remote_layer_file_name.clone()),
813 6 : content,
814 6 : )?;
815 :
816 6 : Ok(remote_layer_file_name)
817 6 : }
818 : }
819 :
820 : #[derive(Debug, Clone)]
821 : struct MockControlPlane {
822 : pub latest_generation: std::sync::Arc<std::sync::Mutex<HashMap<TenantShardId, Generation>>>,
823 : }
824 :
825 : impl MockControlPlane {
826 6 : fn new() -> Self {
827 6 : Self {
828 6 : latest_generation: Arc::default(),
829 6 : }
830 6 : }
831 : }
832 :
833 : impl ControlPlaneGenerationsApi for MockControlPlane {
834 0 : async fn re_attach(
835 0 : &self,
836 0 : _conf: &PageServerConf,
837 0 : ) -> Result<HashMap<TenantShardId, ReAttachResponseTenant>, RetryForeverError> {
838 0 : unimplemented!()
839 : }
840 :
841 8 : async fn validate(
842 8 : &self,
843 8 : tenants: Vec<(TenantShardId, Generation)>,
844 8 : ) -> Result<HashMap<TenantShardId, bool>, RetryForeverError> {
845 8 : let mut result = HashMap::new();
846 8 :
847 8 : let latest_generation = self.latest_generation.lock().unwrap();
848 :
849 16 : for (tenant_shard_id, generation) in tenants {
850 8 : if let Some(latest) = latest_generation.get(&tenant_shard_id) {
851 8 : result.insert(tenant_shard_id, *latest == generation);
852 8 : }
853 : }
854 :
855 8 : Ok(result)
856 8 : }
857 : }
858 :
859 6 : fn setup(test_name: &str) -> anyhow::Result<TestSetup> {
860 6 : let test_name = Box::leak(Box::new(format!("deletion_queue__{test_name}")));
861 6 : let harness = TenantHarness::create(test_name)?;
862 :
863 : // We do not load() the harness: we only need its config and remote_storage
864 :
865 : // Set up a GenericRemoteStorage targetting a directory
866 6 : let remote_fs_dir = harness.conf.workdir.join("remote_fs");
867 6 : std::fs::create_dir_all(remote_fs_dir)?;
868 6 : let remote_fs_dir = harness.conf.workdir.join("remote_fs").canonicalize_utf8()?;
869 6 : let storage_config = RemoteStorageConfig {
870 6 : storage: RemoteStorageKind::LocalFs(remote_fs_dir.clone()),
871 6 : timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
872 6 : };
873 6 : let storage = GenericRemoteStorage::from_config(&storage_config).unwrap();
874 6 :
875 6 : let mock_control_plane = MockControlPlane::new();
876 6 :
877 6 : let (deletion_queue, worker) = DeletionQueue::new(
878 6 : Some(storage.clone()),
879 6 : Some(mock_control_plane.clone()),
880 6 : harness.conf,
881 6 : );
882 6 :
883 6 : let worker = worker.unwrap();
884 6 : let worker_join = worker.spawn_with(&tokio::runtime::Handle::current());
885 6 :
886 6 : Ok(TestSetup {
887 6 : harness,
888 6 : remote_fs_dir,
889 6 : storage,
890 6 : mock_control_plane,
891 6 : deletion_queue,
892 6 : worker_join,
893 6 : })
894 6 : }
895 :
896 : // TODO: put this in a common location so that we can share with remote_timeline_client's tests
897 18 : fn assert_remote_files(expected: &[&str], remote_path: &Utf8Path) {
898 18 : let mut expected: Vec<String> = expected.iter().map(|x| String::from(*x)).collect();
899 18 : expected.sort();
900 18 :
901 18 : let mut found: Vec<String> = Vec::new();
902 18 : let dir = match std::fs::read_dir(remote_path) {
903 18 : Ok(d) => d,
904 0 : Err(e) => {
905 0 : if e.kind() == ErrorKind::NotFound {
906 0 : if expected.is_empty() {
907 : // We are asserting prefix is empty: it is expected that the dir is missing
908 0 : return;
909 : } else {
910 0 : assert_eq!(expected, Vec::<String>::new());
911 0 : unreachable!();
912 : }
913 : } else {
914 0 : panic!("Unexpected error listing {remote_path}: {e}");
915 : }
916 : }
917 : };
918 :
919 18 : for entry in dir.flatten() {
920 16 : let entry_name = entry.file_name();
921 16 : let fname = entry_name.to_str().unwrap();
922 16 : found.push(String::from(fname));
923 16 : }
924 18 : found.sort();
925 18 :
926 18 : assert_eq!(expected, found);
927 18 : }
928 :
929 10 : fn assert_local_files(expected: &[&str], directory: &Utf8Path) {
930 10 : let dir = match std::fs::read_dir(directory) {
931 8 : Ok(d) => d,
932 : Err(_) => {
933 2 : assert_eq!(expected, &Vec::<String>::new());
934 2 : return;
935 : }
936 : };
937 8 : let mut found = Vec::new();
938 18 : for dentry in dir {
939 10 : let dentry = dentry.unwrap();
940 10 : let file_name = dentry.file_name();
941 10 : let file_name_str = file_name.to_string_lossy();
942 10 : found.push(file_name_str.to_string());
943 10 : }
944 8 : found.sort();
945 8 : assert_eq!(expected, found);
946 10 : }
947 :
948 : #[tokio::test]
949 2 : async fn deletion_queue_smoke() -> anyhow::Result<()> {
950 2 : // Basic test that the deletion queue processes the deletions we pass into it
951 2 : let ctx = setup("deletion_queue_smoke").expect("Failed test setup");
952 2 : let client = ctx.deletion_queue.new_client();
953 2 : client.recover(HashMap::new())?;
954 2 :
955 2 : let layer_file_name_1: LayerName = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap();
956 2 : let tenant_shard_id = ctx.harness.tenant_shard_id;
957 2 :
958 2 : let content: Vec<u8> = "victim1 contents".into();
959 2 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
960 2 : let remote_timeline_path = ctx.remote_fs_dir.join(relative_remote_path.get_path());
961 2 : let deletion_prefix = ctx.harness.conf.deletion_prefix();
962 2 :
963 2 : // Exercise the distinction between the generation of the layers
964 2 : // we delete, and the generation of the running Tenant.
965 2 : let layer_generation = Generation::new(0xdeadbeef);
966 2 : let now_generation = Generation::new(0xfeedbeef);
967 2 : let layer_metadata =
968 2 : LayerFileMetadata::new(0xf00, layer_generation, ShardIndex::unsharded());
969 2 :
970 2 : let remote_layer_file_name_1 =
971 2 : format!("{}{}", layer_file_name_1, layer_generation.get_suffix());
972 2 :
973 2 : // Set mock control plane state to valid for our generation
974 2 : ctx.set_latest_generation(now_generation);
975 2 :
976 2 : // Inject a victim file to remote storage
977 2 : info!("Writing");
978 2 : std::fs::create_dir_all(&remote_timeline_path)?;
979 2 : std::fs::write(
980 2 : remote_timeline_path.join(remote_layer_file_name_1.clone()),
981 2 : content,
982 2 : )?;
983 2 : assert_remote_files(&[&remote_layer_file_name_1], &remote_timeline_path);
984 2 :
985 2 : // File should still be there after we push it to the queue (we haven't pushed enough to flush anything)
986 2 : info!("Pushing");
987 2 : client
988 2 : .push_layers(
989 2 : tenant_shard_id,
990 2 : TIMELINE_ID,
991 2 : now_generation,
992 2 : [(layer_file_name_1.clone(), layer_metadata)].to_vec(),
993 2 : )
994 2 : .await?;
995 2 : assert_remote_files(&[&remote_layer_file_name_1], &remote_timeline_path);
996 2 :
997 2 : assert_local_files(&[], &deletion_prefix);
998 2 :
999 2 : // File should still be there after we write a deletion list (we haven't pushed enough to execute anything)
1000 2 : info!("Flushing");
1001 2 : client.flush().await?;
1002 2 : assert_remote_files(&[&remote_layer_file_name_1], &remote_timeline_path);
1003 2 : assert_local_files(&["0000000000000001-01.list"], &deletion_prefix);
1004 2 :
1005 2 : // File should go away when we execute
1006 2 : info!("Flush-executing");
1007 6 : client.flush_execute().await?;
1008 2 : assert_remote_files(&[], &remote_timeline_path);
1009 2 : assert_local_files(&["header-01"], &deletion_prefix);
1010 2 :
1011 2 : // Flushing on an empty queue should succeed immediately, and not write any lists
1012 2 : info!("Flush-executing on empty");
1013 6 : client.flush_execute().await?;
1014 2 : assert_local_files(&["header-01"], &deletion_prefix);
1015 2 :
1016 2 : Ok(())
1017 2 : }
1018 :
1019 : #[tokio::test]
1020 2 : async fn deletion_queue_validation() -> anyhow::Result<()> {
1021 2 : let ctx = setup("deletion_queue_validation").expect("Failed test setup");
1022 2 : let client = ctx.deletion_queue.new_client();
1023 2 : client.recover(HashMap::new())?;
1024 2 :
1025 2 : // Generation that the control plane thinks is current
1026 2 : let latest_generation = Generation::new(0xdeadbeef);
1027 2 : // Generation that our DeletionQueue thinks the tenant is running with
1028 2 : let stale_generation = latest_generation.previous();
1029 2 : // Generation that our example layer file was written with
1030 2 : let layer_generation = stale_generation.previous();
1031 2 : let layer_metadata =
1032 2 : LayerFileMetadata::new(0xf00, layer_generation, ShardIndex::unsharded());
1033 2 :
1034 2 : ctx.set_latest_generation(latest_generation);
1035 2 :
1036 2 : let tenant_shard_id = ctx.harness.tenant_shard_id;
1037 2 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
1038 2 : let remote_timeline_path = ctx.remote_fs_dir.join(relative_remote_path.get_path());
1039 2 :
1040 2 : // Initial state: a remote layer exists
1041 2 : let remote_layer_name = ctx.write_remote_layer(EXAMPLE_LAYER_NAME, layer_generation)?;
1042 2 : assert_remote_files(&[&remote_layer_name], &remote_timeline_path);
1043 2 :
1044 2 : tracing::debug!("Pushing...");
1045 2 : client
1046 2 : .push_layers(
1047 2 : tenant_shard_id,
1048 2 : TIMELINE_ID,
1049 2 : stale_generation,
1050 2 : [(EXAMPLE_LAYER_NAME.clone(), layer_metadata.clone())].to_vec(),
1051 2 : )
1052 2 : .await?;
1053 2 :
1054 2 : // We enqueued the operation in a stale generation: it should have failed validation
1055 2 : tracing::debug!("Flushing...");
1056 6 : tokio::time::timeout(Duration::from_secs(5), client.flush_execute()).await??;
1057 2 : assert_remote_files(&[&remote_layer_name], &remote_timeline_path);
1058 2 :
1059 2 : tracing::debug!("Pushing...");
1060 2 : client
1061 2 : .push_layers(
1062 2 : tenant_shard_id,
1063 2 : TIMELINE_ID,
1064 2 : latest_generation,
1065 2 : [(EXAMPLE_LAYER_NAME.clone(), layer_metadata.clone())].to_vec(),
1066 2 : )
1067 2 : .await?;
1068 2 :
1069 2 : // We enqueued the operation in a fresh generation: it should have passed validation
1070 2 : tracing::debug!("Flushing...");
1071 6 : tokio::time::timeout(Duration::from_secs(5), client.flush_execute()).await??;
1072 2 : assert_remote_files(&[], &remote_timeline_path);
1073 2 :
1074 2 : Ok(())
1075 2 : }
1076 :
1077 : #[tokio::test]
1078 2 : async fn deletion_queue_recovery() -> anyhow::Result<()> {
1079 2 : // Basic test that the deletion queue processes the deletions we pass into it
1080 2 : let mut ctx = setup("deletion_queue_recovery").expect("Failed test setup");
1081 2 : let client = ctx.deletion_queue.new_client();
1082 2 : client.recover(HashMap::new())?;
1083 2 :
1084 2 : let tenant_shard_id = ctx.harness.tenant_shard_id;
1085 2 :
1086 2 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
1087 2 : let remote_timeline_path = ctx.remote_fs_dir.join(relative_remote_path.get_path());
1088 2 : let deletion_prefix = ctx.harness.conf.deletion_prefix();
1089 2 :
1090 2 : let layer_generation = Generation::new(0xdeadbeef);
1091 2 : let now_generation = Generation::new(0xfeedbeef);
1092 2 : let layer_metadata =
1093 2 : LayerFileMetadata::new(0xf00, layer_generation, ShardIndex::unsharded());
1094 2 :
1095 2 : // Inject a deletion in the generation before generation_now: after restart,
1096 2 : // this deletion should _not_ get executed (only the immediately previous
1097 2 : // generation gets that treatment)
1098 2 : let remote_layer_file_name_historical =
1099 2 : ctx.write_remote_layer(EXAMPLE_LAYER_NAME, layer_generation)?;
1100 2 : client
1101 2 : .push_layers(
1102 2 : tenant_shard_id,
1103 2 : TIMELINE_ID,
1104 2 : now_generation.previous(),
1105 2 : [(EXAMPLE_LAYER_NAME.clone(), layer_metadata.clone())].to_vec(),
1106 2 : )
1107 2 : .await?;
1108 2 :
1109 2 : // Inject a deletion in the generation before generation_now: after restart,
1110 2 : // this deletion should get executed, because we execute deletions in the
1111 2 : // immediately previous generation on the same node.
1112 2 : let remote_layer_file_name_previous =
1113 2 : ctx.write_remote_layer(EXAMPLE_LAYER_NAME_ALT, layer_generation)?;
1114 2 : client
1115 2 : .push_layers(
1116 2 : tenant_shard_id,
1117 2 : TIMELINE_ID,
1118 2 : now_generation,
1119 2 : [(EXAMPLE_LAYER_NAME_ALT.clone(), layer_metadata.clone())].to_vec(),
1120 2 : )
1121 2 : .await?;
1122 2 :
1123 2 : client.flush().await?;
1124 2 : assert_remote_files(
1125 2 : &[
1126 2 : &remote_layer_file_name_historical,
1127 2 : &remote_layer_file_name_previous,
1128 2 : ],
1129 2 : &remote_timeline_path,
1130 2 : );
1131 2 :
1132 2 : // Different generatinos for the same tenant will cause two separate
1133 2 : // deletion lists to be emitted.
1134 2 : assert_local_files(
1135 2 : &["0000000000000001-01.list", "0000000000000002-01.list"],
1136 2 : &deletion_prefix,
1137 2 : );
1138 2 :
1139 2 : // Simulate a node restart: the latest generation advances
1140 2 : let now_generation = now_generation.next();
1141 2 : ctx.set_latest_generation(now_generation);
1142 2 :
1143 2 : // Restart the deletion queue
1144 2 : drop(client);
1145 2 : ctx.restart().await;
1146 2 : let client = ctx.deletion_queue.new_client();
1147 2 : client.recover(HashMap::from([(tenant_shard_id, now_generation)]))?;
1148 2 :
1149 2 : info!("Flush-executing");
1150 6 : client.flush_execute().await?;
1151 2 : // The deletion from immediately prior generation was executed, the one from
1152 2 : // an older generation was not.
1153 2 : assert_remote_files(&[&remote_layer_file_name_historical], &remote_timeline_path);
1154 2 : Ok(())
1155 2 : }
1156 : }
1157 :
1158 : /// A lightweight queue which can issue ordinary DeletionQueueClient objects, but doesn't do any persistence
1159 : /// or coalescing, and doesn't actually execute any deletions unless you call pump() to kick it.
1160 : #[cfg(test)]
1161 : pub(crate) mod mock {
1162 : use tracing::info;
1163 :
1164 : use super::*;
1165 : use std::sync::atomic::{AtomicUsize, Ordering};
1166 :
1167 : pub struct ConsumerState {
1168 : rx: tokio::sync::mpsc::UnboundedReceiver<ListWriterQueueMessage>,
1169 : executor_rx: tokio::sync::mpsc::Receiver<DeleterMessage>,
1170 : cancel: CancellationToken,
1171 : }
1172 :
1173 : impl ConsumerState {
1174 2 : async fn consume(&mut self, remote_storage: &GenericRemoteStorage) -> usize {
1175 2 : let mut executed = 0;
1176 2 :
1177 2 : info!("Executing all pending deletions");
1178 :
1179 : // Transform all executor messages to generic frontend messages
1180 2 : while let Ok(msg) = self.executor_rx.try_recv() {
1181 0 : match msg {
1182 0 : DeleterMessage::Delete(objects) => {
1183 0 : for path in objects {
1184 0 : match remote_storage.delete(&path, &self.cancel).await {
1185 : Ok(_) => {
1186 0 : debug!("Deleted {path}");
1187 : }
1188 0 : Err(e) => {
1189 0 : error!("Failed to delete {path}, leaking object! ({e})");
1190 : }
1191 : }
1192 0 : executed += 1;
1193 : }
1194 : }
1195 0 : DeleterMessage::Flush(flush_op) => {
1196 0 : flush_op.notify();
1197 0 : }
1198 : }
1199 : }
1200 :
1201 4 : while let Ok(msg) = self.rx.try_recv() {
1202 2 : match msg {
1203 2 : ListWriterQueueMessage::Delete(op) => {
1204 2 : let mut objects = op.objects;
1205 4 : for (layer, meta) in op.layers {
1206 2 : objects.push(remote_layer_path(
1207 2 : &op.tenant_shard_id.tenant_id,
1208 2 : &op.timeline_id,
1209 2 : meta.shard,
1210 2 : &layer,
1211 2 : meta.generation,
1212 2 : ));
1213 2 : }
1214 :
1215 4 : for path in objects {
1216 2 : info!("Executing deletion {path}");
1217 2 : match remote_storage.delete(&path, &self.cancel).await {
1218 : Ok(_) => {
1219 2 : debug!("Deleted {path}");
1220 : }
1221 0 : Err(e) => {
1222 0 : error!("Failed to delete {path}, leaking object! ({e})");
1223 : }
1224 : }
1225 2 : executed += 1;
1226 : }
1227 : }
1228 0 : ListWriterQueueMessage::Flush(op) => {
1229 0 : op.notify();
1230 0 : }
1231 0 : ListWriterQueueMessage::FlushExecute(op) => {
1232 0 : // We have already executed all prior deletions because mock does them inline
1233 0 : op.notify();
1234 0 : }
1235 0 : ListWriterQueueMessage::Recover(_) => {
1236 0 : // no-op in mock
1237 0 : }
1238 : }
1239 2 : info!("All pending deletions have been executed");
1240 : }
1241 :
1242 2 : executed
1243 2 : }
1244 : }
1245 :
1246 : pub struct MockDeletionQueue {
1247 : tx: tokio::sync::mpsc::UnboundedSender<ListWriterQueueMessage>,
1248 : executor_tx: tokio::sync::mpsc::Sender<DeleterMessage>,
1249 : executed: Arc<AtomicUsize>,
1250 : remote_storage: Option<GenericRemoteStorage>,
1251 : consumer: std::sync::Mutex<ConsumerState>,
1252 : lsn_table: Arc<std::sync::RwLock<VisibleLsnUpdates>>,
1253 : }
1254 :
1255 : impl MockDeletionQueue {
1256 118 : pub fn new(remote_storage: Option<GenericRemoteStorage>) -> Self {
1257 118 : let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
1258 118 : let (executor_tx, executor_rx) = tokio::sync::mpsc::channel(16384);
1259 118 :
1260 118 : let executed = Arc::new(AtomicUsize::new(0));
1261 118 :
1262 118 : Self {
1263 118 : tx,
1264 118 : executor_tx,
1265 118 : executed,
1266 118 : remote_storage,
1267 118 : consumer: std::sync::Mutex::new(ConsumerState {
1268 118 : rx,
1269 118 : executor_rx,
1270 118 : cancel: CancellationToken::new(),
1271 118 : }),
1272 118 : lsn_table: Arc::new(std::sync::RwLock::new(VisibleLsnUpdates::new())),
1273 118 : }
1274 118 : }
1275 :
1276 : #[allow(clippy::await_holding_lock)]
1277 2 : pub async fn pump(&self) {
1278 2 : if let Some(remote_storage) = &self.remote_storage {
1279 : // Permit holding mutex across await, because this is only ever
1280 : // called once at a time in tests.
1281 2 : let mut locked = self.consumer.lock().unwrap();
1282 2 : let count = locked.consume(remote_storage).await;
1283 2 : self.executed.fetch_add(count, Ordering::Relaxed);
1284 0 : }
1285 2 : }
1286 :
1287 128 : pub(crate) fn new_client(&self) -> DeletionQueueClient {
1288 128 : DeletionQueueClient {
1289 128 : tx: self.tx.clone(),
1290 128 : executor_tx: self.executor_tx.clone(),
1291 128 : lsn_table: self.lsn_table.clone(),
1292 128 : }
1293 128 : }
1294 : }
1295 :
1296 : /// Test round-trip serialization/deserialization, and test stability of the format
1297 : /// vs. a static expected string for the serialized version.
1298 : #[test]
1299 2 : fn deletion_list_serialization() -> anyhow::Result<()> {
1300 2 : let tenant_id = "ad6c1a56f5680419d3a16ff55d97ec3c"
1301 2 : .to_string()
1302 2 : .parse::<TenantShardId>()?;
1303 2 : let timeline_id = "be322c834ed9e709e63b5c9698691910"
1304 2 : .to_string()
1305 2 : .parse::<TimelineId>()?;
1306 2 : let generation = Generation::new(123);
1307 :
1308 2 : let object =
1309 2 : RemotePath::from_string(&format!("tenants/{tenant_id}/timelines/{timeline_id}/foo"))?;
1310 2 : let mut objects = [object].to_vec();
1311 2 :
1312 2 : let mut example = DeletionList::new(1);
1313 2 : example.push(&tenant_id, &timeline_id, generation, &mut objects);
1314 :
1315 2 : let encoded = serde_json::to_string(&example)?;
1316 :
1317 2 : let expected = "{\"version\":1,\"sequence\":1,\"tenants\":{\"ad6c1a56f5680419d3a16ff55d97ec3c\":{\"timelines\":{\"be322c834ed9e709e63b5c9698691910\":[\"foo\"]},\"generation\":123}},\"size\":1}".to_string();
1318 2 : assert_eq!(encoded, expected);
1319 :
1320 2 : let decoded = serde_json::from_str::<DeletionList>(&encoded)?;
1321 2 : assert_eq!(example, decoded);
1322 :
1323 2 : Ok(())
1324 2 : }
1325 : }
|