Line data Source code
1 : mod deleter;
2 : mod list_writer;
3 : mod validator;
4 :
5 : use std::collections::HashMap;
6 : use std::sync::Arc;
7 : use std::time::Duration;
8 :
9 : use crate::control_plane_client::ControlPlaneGenerationsApi;
10 : use crate::metrics;
11 : use crate::tenant::remote_timeline_client::remote_layer_path;
12 : use crate::tenant::remote_timeline_client::remote_timeline_path;
13 : use crate::tenant::remote_timeline_client::LayerFileMetadata;
14 : use crate::virtual_file::MaybeFatalIo;
15 : use crate::virtual_file::VirtualFile;
16 : use anyhow::Context;
17 : use camino::Utf8PathBuf;
18 : use pageserver_api::shard::TenantShardId;
19 : use remote_storage::{GenericRemoteStorage, RemotePath};
20 : use serde::Deserialize;
21 : use serde::Serialize;
22 : use thiserror::Error;
23 : use tokio_util::sync::CancellationToken;
24 : use tracing::Instrument;
25 : use tracing::{debug, error};
26 : use utils::crashsafe::path_with_suffix_extension;
27 : use utils::generation::Generation;
28 : use utils::id::TimelineId;
29 : use utils::lsn::AtomicLsn;
30 : use utils::lsn::Lsn;
31 :
32 : use self::deleter::Deleter;
33 : use self::list_writer::DeletionOp;
34 : use self::list_writer::ListWriter;
35 : use self::list_writer::RecoverOp;
36 : use self::validator::Validator;
37 : use deleter::DeleterMessage;
38 : use list_writer::ListWriterQueueMessage;
39 : use validator::ValidatorQueueMessage;
40 :
41 : use crate::{config::PageServerConf, tenant::storage_layer::LayerName};
42 :
43 : // TODO: configurable for how long to wait before executing deletions
44 :
45 : /// We aggregate object deletions from many tenants in one place, for several reasons:
46 : /// - Coalesce deletions into fewer DeleteObjects calls
47 : /// - Enable Tenant/Timeline lifetimes to be shorter than the time it takes
48 : /// to flush any outstanding deletions.
49 : /// - Globally control throughput of deletions, as these are a low priority task: do
50 : /// not compete with the same S3 clients/connections used for higher priority uploads.
51 : /// - Enable gating deletions on validation of a tenant's generation number, to make
52 : /// it safe to multi-attach tenants (see docs/rfcs/025-generation-numbers.md)
53 : ///
54 : /// There are two kinds of deletion: deferred and immediate. A deferred deletion
55 : /// may be intentionally delayed to protect passive readers of S3 data, and is
56 : /// subject to a generation number validation step. An immediate deletion is
57 : /// ready to execute immediately, and is only queued up so that it can be coalesced
58 : /// with other deletions in flight.
59 : ///
60 : /// Deferred deletions pass through three steps:
61 : /// - ListWriter: accumulate deletion requests from Timelines, and batch them up into
62 : /// DeletionLists, which are persisted to disk.
63 : /// - Validator: accumulate deletion lists, and validate them en-masse prior to passing
64 : /// the keys in the list onward for actual deletion. Also validate remote_consistent_lsn
65 : /// updates for running timelines.
66 : /// - Deleter: accumulate object keys that the validator has validated, and execute them in
67 : /// batches of 1000 keys via DeleteObjects.
68 : ///
69 : /// Non-deferred deletions, such as during timeline deletion, bypass the first
70 : /// two stages and are passed straight into the Deleter.
71 : ///
72 : /// Internally, each stage is joined by a channel to the next. On disk, there is only
73 : /// one queue (of DeletionLists), which is written by the frontend and consumed
74 : /// by the backend.
75 : #[derive(Clone)]
76 : pub struct DeletionQueue {
77 : client: DeletionQueueClient,
78 :
79 : // Parent cancellation token for the tokens passed into background workers
80 : cancel: CancellationToken,
81 : }
82 :
83 : /// Opaque wrapper around individual worker tasks, to avoid making the
84 : /// worker objects themselves public
85 : pub struct DeletionQueueWorkers<C>
86 : where
87 : C: ControlPlaneGenerationsApi + Send + Sync,
88 : {
89 : frontend: ListWriter,
90 : backend: Validator<C>,
91 : executor: Deleter,
92 : }
93 :
94 : impl<C> DeletionQueueWorkers<C>
95 : where
96 : C: ControlPlaneGenerationsApi + Send + Sync + 'static,
97 : {
98 8 : pub fn spawn_with(mut self, runtime: &tokio::runtime::Handle) -> tokio::task::JoinHandle<()> {
99 8 : let jh_frontend = runtime.spawn(async move {
100 8 : self.frontend
101 8 : .background()
102 8 : .instrument(tracing::info_span!(parent:None, "deletion frontend"))
103 50 : .await
104 8 : });
105 8 : let jh_backend = runtime.spawn(async move {
106 8 : self.backend
107 8 : .background()
108 8 : .instrument(tracing::info_span!(parent:None, "deletion backend"))
109 58 : .await
110 8 : });
111 8 : let jh_executor = runtime.spawn(async move {
112 8 : self.executor
113 8 : .background()
114 8 : .instrument(tracing::info_span!(parent:None, "deletion executor"))
115 28 : .await
116 8 : });
117 8 :
118 8 : runtime.spawn({
119 8 : async move {
120 8 : jh_frontend.await.expect("error joining frontend worker");
121 2 : jh_backend.await.expect("error joining backend worker");
122 2 : drop(jh_executor.await.expect("error joining executor worker"));
123 8 : }
124 8 : })
125 8 : }
126 : }
127 :
128 : /// A FlushOp is just a oneshot channel, where we send the transmit side down
129 : /// another channel, and the receive side will receive a message when the channel
130 : /// we're flushing has reached the FlushOp we sent into it.
131 : ///
132 : /// The only extra behavior beyond the channel is that the notify() method does not
133 : /// return an error when the receive side has been dropped, because in this use case
134 : /// it is harmless (the code that initiated the flush no longer cares about the result).
135 : #[derive(Debug)]
136 : struct FlushOp {
137 : tx: tokio::sync::oneshot::Sender<()>,
138 : }
139 :
140 : impl FlushOp {
141 42 : fn new() -> (Self, tokio::sync::oneshot::Receiver<()>) {
142 42 : let (tx, rx) = tokio::sync::oneshot::channel::<()>();
143 42 : (Self { tx }, rx)
144 42 : }
145 :
146 42 : fn notify(self) {
147 42 : if self.tx.send(()).is_err() {
148 : // oneshot channel closed. This is legal: a client could be destroyed while waiting for a flush.
149 0 : debug!("deletion queue flush from dropped client");
150 42 : };
151 42 : }
152 : }
153 :
154 : #[derive(Clone, Debug)]
155 : pub struct DeletionQueueClient {
156 : tx: tokio::sync::mpsc::UnboundedSender<ListWriterQueueMessage>,
157 : executor_tx: tokio::sync::mpsc::Sender<DeleterMessage>,
158 :
159 : lsn_table: Arc<std::sync::RwLock<VisibleLsnUpdates>>,
160 : }
161 :
162 18 : #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
163 : struct TenantDeletionList {
164 : /// For each Timeline, a list of key fragments to append to the timeline remote path
165 : /// when reconstructing a full key
166 : timelines: HashMap<TimelineId, Vec<String>>,
167 :
168 : /// The generation in which this deletion was emitted: note that this may not be the
169 : /// same as the generation of any layers being deleted. The generation of the layer
170 : /// has already been absorbed into the keys in `objects`
171 : generation: Generation,
172 : }
173 :
174 : impl TenantDeletionList {
175 10 : pub(crate) fn len(&self) -> usize {
176 10 : self.timelines.values().map(|v| v.len()).sum()
177 10 : }
178 : }
179 :
180 : /// Files ending with this suffix will be ignored and erased
181 : /// during recovery as startup.
182 : const TEMP_SUFFIX: &str = "tmp";
183 :
184 30 : #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
185 : struct DeletionList {
186 : /// Serialization version, for future use
187 : version: u8,
188 :
189 : /// Used for constructing a unique key for each deletion list we write out.
190 : sequence: u64,
191 :
192 : /// To avoid repeating tenant/timeline IDs in every key, we store keys in
193 : /// nested HashMaps by TenantTimelineID. Each Tenant only appears once
194 : /// with one unique generation ID: if someone tries to push a second generation
195 : /// ID for the same tenant, we will start a new DeletionList.
196 : tenants: HashMap<TenantShardId, TenantDeletionList>,
197 :
198 : /// Avoid having to walk `tenants` to calculate the number of keys in
199 : /// the nested deletion lists
200 : size: usize,
201 :
202 : /// Set to true when the list has undergone validation with the control
203 : /// plane and the remaining contents of `tenants` are valid. A list may
204 : /// also be implicitly marked valid by DeletionHeader.validated_sequence
205 : /// advancing to >= DeletionList.sequence
206 : #[serde(default)]
207 : #[serde(skip_serializing_if = "std::ops::Not::not")]
208 : validated: bool,
209 : }
210 :
211 0 : #[derive(Debug, Serialize, Deserialize)]
212 : struct DeletionHeader {
213 : /// Serialization version, for future use
214 : version: u8,
215 :
216 : /// The highest sequence number (inclusive) that has been validated. All deletion
217 : /// lists on disk with a sequence <= this value are safe to execute.
218 : validated_sequence: u64,
219 : }
220 :
221 : impl DeletionHeader {
222 : const VERSION_LATEST: u8 = 1;
223 :
224 8 : fn new(validated_sequence: u64) -> Self {
225 8 : Self {
226 8 : version: Self::VERSION_LATEST,
227 8 : validated_sequence,
228 8 : }
229 8 : }
230 :
231 8 : async fn save(&self, conf: &'static PageServerConf) -> anyhow::Result<()> {
232 8 : debug!("Saving deletion list header {:?}", self);
233 8 : let header_bytes = serde_json::to_vec(self).context("serialize deletion header")?;
234 8 : let header_path = conf.deletion_header_path();
235 8 : let temp_path = path_with_suffix_extension(&header_path, TEMP_SUFFIX);
236 8 : VirtualFile::crashsafe_overwrite(header_path, temp_path, header_bytes)
237 8 : .await
238 8 : .maybe_fatal_err("save deletion header")?;
239 :
240 8 : Ok(())
241 8 : }
242 : }
243 :
244 : impl DeletionList {
245 : const VERSION_LATEST: u8 = 1;
246 20 : fn new(sequence: u64) -> Self {
247 20 : Self {
248 20 : version: Self::VERSION_LATEST,
249 20 : sequence,
250 20 : tenants: HashMap::new(),
251 20 : size: 0,
252 20 : validated: false,
253 20 : }
254 20 : }
255 :
256 26 : fn is_empty(&self) -> bool {
257 26 : self.tenants.is_empty()
258 26 : }
259 :
260 60 : fn len(&self) -> usize {
261 60 : self.size
262 60 : }
263 :
264 : /// Returns true if the push was accepted, false if the caller must start a new
265 : /// deletion list.
266 14 : fn push(
267 14 : &mut self,
268 14 : tenant: &TenantShardId,
269 14 : timeline: &TimelineId,
270 14 : generation: Generation,
271 14 : objects: &mut Vec<RemotePath>,
272 14 : ) -> bool {
273 14 : if objects.is_empty() {
274 : // Avoid inserting an empty TimelineDeletionList: this preserves the property
275 : // that if we have no keys, then self.objects is empty (used in Self::is_empty)
276 0 : return true;
277 14 : }
278 14 :
279 14 : let tenant_entry = self
280 14 : .tenants
281 14 : .entry(*tenant)
282 14 : .or_insert_with(|| TenantDeletionList {
283 12 : timelines: HashMap::new(),
284 12 : generation,
285 14 : });
286 14 :
287 14 : if tenant_entry.generation != generation {
288 : // Only one generation per tenant per list: signal to
289 : // caller to start a new list.
290 2 : return false;
291 12 : }
292 12 :
293 12 : let timeline_entry = tenant_entry.timelines.entry(*timeline).or_default();
294 12 :
295 12 : let timeline_remote_path = remote_timeline_path(tenant, timeline);
296 12 :
297 12 : self.size += objects.len();
298 12 : timeline_entry.extend(objects.drain(..).map(|p| {
299 12 : p.strip_prefix(&timeline_remote_path)
300 12 : .expect("Timeline paths always start with the timeline prefix")
301 12 : .to_string()
302 12 : }));
303 12 : true
304 14 : }
305 :
306 10 : fn into_remote_paths(self) -> Vec<RemotePath> {
307 10 : let mut result = Vec::new();
308 10 : for (tenant, tenant_deletions) in self.tenants.into_iter() {
309 6 : for (timeline, timeline_layers) in tenant_deletions.timelines.into_iter() {
310 6 : let timeline_remote_path = remote_timeline_path(&tenant, &timeline);
311 6 : result.extend(
312 6 : timeline_layers
313 6 : .into_iter()
314 6 : .map(|l| timeline_remote_path.join(Utf8PathBuf::from(l))),
315 6 : );
316 6 : }
317 : }
318 :
319 10 : result
320 10 : }
321 :
322 14 : async fn save(&self, conf: &'static PageServerConf) -> anyhow::Result<()> {
323 14 : let path = conf.deletion_list_path(self.sequence);
324 14 : let temp_path = path_with_suffix_extension(&path, TEMP_SUFFIX);
325 14 :
326 14 : let bytes = serde_json::to_vec(self).expect("Failed to serialize deletion list");
327 14 :
328 14 : VirtualFile::crashsafe_overwrite(path, temp_path, bytes)
329 14 : .await
330 14 : .maybe_fatal_err("save deletion list")
331 14 : .map_err(Into::into)
332 14 : }
333 : }
334 :
335 : impl std::fmt::Display for DeletionList {
336 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
337 0 : write!(
338 0 : f,
339 0 : "DeletionList<seq={}, tenants={}, keys={}>",
340 0 : self.sequence,
341 0 : self.tenants.len(),
342 0 : self.size
343 0 : )
344 0 : }
345 : }
346 :
347 : struct PendingLsn {
348 : projected: Lsn,
349 : result_slot: Arc<AtomicLsn>,
350 : }
351 :
352 : struct TenantLsnState {
353 : timelines: HashMap<TimelineId, PendingLsn>,
354 :
355 : // In what generation was the most recent update proposed?
356 : generation: Generation,
357 : }
358 :
359 : #[derive(Default)]
360 : struct VisibleLsnUpdates {
361 : tenants: HashMap<TenantShardId, TenantLsnState>,
362 : }
363 :
364 : impl VisibleLsnUpdates {
365 165 : fn new() -> Self {
366 165 : Self {
367 165 : tenants: HashMap::new(),
368 165 : }
369 165 : }
370 : }
371 :
372 : impl std::fmt::Debug for VisibleLsnUpdates {
373 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
374 0 : write!(f, "VisibleLsnUpdates({} tenants)", self.tenants.len())
375 0 : }
376 : }
377 :
378 0 : #[derive(Error, Debug)]
379 : pub enum DeletionQueueError {
380 : #[error("Deletion queue unavailable during shutdown")]
381 : ShuttingDown,
382 : }
383 :
384 : impl DeletionQueueClient {
385 0 : pub(crate) fn broken() -> Self {
386 0 : // Channels whose receivers are immediately dropped.
387 0 : let (tx, _rx) = tokio::sync::mpsc::unbounded_channel();
388 0 : let (executor_tx, _executor_rx) = tokio::sync::mpsc::channel(1);
389 0 : Self {
390 0 : tx,
391 0 : executor_tx,
392 0 : lsn_table: Arc::default(),
393 0 : }
394 0 : }
395 :
396 : /// This is cancel-safe. If you drop the future before it completes, the message
397 : /// is not pushed, although in the context of the deletion queue it doesn't matter: once
398 : /// we decide to do a deletion the decision is always final.
399 265 : fn do_push<T>(
400 265 : &self,
401 265 : queue: &tokio::sync::mpsc::UnboundedSender<T>,
402 265 : msg: T,
403 265 : ) -> Result<(), DeletionQueueError> {
404 265 : match queue.send(msg) {
405 265 : Ok(_) => Ok(()),
406 0 : Err(e) => {
407 0 : // This shouldn't happen, we should shut down all tenants before
408 0 : // we shut down the global delete queue. If we encounter a bug like this,
409 0 : // we may leak objects as deletions won't be processed.
410 0 : error!("Deletion queue closed while pushing, shutting down? ({e})");
411 0 : Err(DeletionQueueError::ShuttingDown)
412 : }
413 : }
414 265 : }
415 :
416 8 : pub(crate) fn recover(
417 8 : &self,
418 8 : attached_tenants: HashMap<TenantShardId, Generation>,
419 8 : ) -> Result<(), DeletionQueueError> {
420 8 : self.do_push(
421 8 : &self.tx,
422 8 : ListWriterQueueMessage::Recover(RecoverOp { attached_tenants }),
423 8 : )
424 8 : }
425 :
426 : /// When a Timeline wishes to update the remote_consistent_lsn that it exposes to the outside
427 : /// world, it must validate its generation number before doing so. Rather than do this synchronously,
428 : /// we allow the timeline to publish updates at will via this API, and then read back what LSN was most
429 : /// recently validated separately.
430 : ///
431 : /// In this function we publish the LSN to the `projected` field of the timeline's entry in the VisibleLsnUpdates. The
432 : /// backend will later wake up and notice that the tenant's generation requires validation.
433 1341 : pub(crate) async fn update_remote_consistent_lsn(
434 1341 : &self,
435 1341 : tenant_shard_id: TenantShardId,
436 1341 : timeline_id: TimelineId,
437 1341 : current_generation: Generation,
438 1341 : lsn: Lsn,
439 1341 : result_slot: Arc<AtomicLsn>,
440 1341 : ) {
441 1341 : let mut locked = self
442 1341 : .lsn_table
443 1341 : .write()
444 1341 : .expect("Lock should never be poisoned");
445 1341 :
446 1341 : let tenant_entry = locked
447 1341 : .tenants
448 1341 : .entry(tenant_shard_id)
449 1341 : .or_insert(TenantLsnState {
450 1341 : timelines: HashMap::new(),
451 1341 : generation: current_generation,
452 1341 : });
453 1341 :
454 1341 : if tenant_entry.generation != current_generation {
455 0 : // Generation might have changed if we were detached and then re-attached: in this case,
456 0 : // state from the previous generation cannot be trusted.
457 0 : tenant_entry.timelines.clear();
458 0 : tenant_entry.generation = current_generation;
459 1341 : }
460 :
461 1341 : tenant_entry.timelines.insert(
462 1341 : timeline_id,
463 1341 : PendingLsn {
464 1341 : projected: lsn,
465 1341 : result_slot,
466 1341 : },
467 1341 : );
468 1341 : }
469 :
470 : /// Submit a list of layers for deletion: this function will return before the deletion is
471 : /// persistent, but it may be executed at any time after this function enters: do not push
472 : /// layers until you're sure they can be deleted safely (i.e. remote metadata no longer
473 : /// references them).
474 : ///
475 : /// The `current_generation` is the generation of this pageserver's current attachment. The
476 : /// generations in `layers` are the generations in which those layers were written.
477 233 : pub(crate) async fn push_layers(
478 233 : &self,
479 233 : tenant_shard_id: TenantShardId,
480 233 : timeline_id: TimelineId,
481 233 : current_generation: Generation,
482 233 : layers: Vec<(LayerName, LayerFileMetadata)>,
483 233 : ) -> Result<(), DeletionQueueError> {
484 233 : if current_generation.is_none() {
485 0 : debug!("Enqueuing deletions in legacy mode, skipping queue");
486 :
487 0 : let mut layer_paths = Vec::new();
488 0 : for (layer, meta) in layers {
489 0 : layer_paths.push(remote_layer_path(
490 0 : &tenant_shard_id.tenant_id,
491 0 : &timeline_id,
492 0 : meta.shard,
493 0 : &layer,
494 0 : meta.generation,
495 0 : ));
496 0 : }
497 0 : self.push_immediate(layer_paths).await?;
498 0 : return self.flush_immediate().await;
499 233 : }
500 233 :
501 233 : self.push_layers_sync(tenant_shard_id, timeline_id, current_generation, layers)
502 233 : }
503 :
504 : /// When a Tenant has a generation, push_layers is always synchronous because
505 : /// the ListValidator channel is an unbounded channel.
506 : ///
507 : /// This can be merged into push_layers when we remove the Generation-less mode
508 : /// support (`<https://github.com/neondatabase/neon/issues/5395>`)
509 233 : pub(crate) fn push_layers_sync(
510 233 : &self,
511 233 : tenant_shard_id: TenantShardId,
512 233 : timeline_id: TimelineId,
513 233 : current_generation: Generation,
514 233 : layers: Vec<(LayerName, LayerFileMetadata)>,
515 233 : ) -> Result<(), DeletionQueueError> {
516 233 : metrics::DELETION_QUEUE
517 233 : .keys_submitted
518 233 : .inc_by(layers.len() as u64);
519 233 : self.do_push(
520 233 : &self.tx,
521 233 : ListWriterQueueMessage::Delete(DeletionOp {
522 233 : tenant_shard_id,
523 233 : timeline_id,
524 233 : layers,
525 233 : generation: current_generation,
526 233 : objects: Vec::new(),
527 233 : }),
528 233 : )
529 233 : }
530 :
531 : /// This is cancel-safe. If you drop the future the flush may still happen in the background.
532 24 : async fn do_flush<T>(
533 24 : &self,
534 24 : queue: &tokio::sync::mpsc::UnboundedSender<T>,
535 24 : msg: T,
536 24 : rx: tokio::sync::oneshot::Receiver<()>,
537 24 : ) -> Result<(), DeletionQueueError> {
538 24 : self.do_push(queue, msg)?;
539 24 : if rx.await.is_err() {
540 : // This shouldn't happen if tenants are shut down before deletion queue. If we
541 : // encounter a bug like this, then a flusher will incorrectly believe it has flushed
542 : // when it hasn't, possibly leading to leaking objects.
543 0 : error!("Deletion queue dropped flush op while client was still waiting");
544 0 : Err(DeletionQueueError::ShuttingDown)
545 : } else {
546 24 : Ok(())
547 : }
548 24 : }
549 :
550 : /// Wait until all previous deletions are persistent (either executed, or written to a DeletionList)
551 : ///
552 : /// This is cancel-safe. If you drop the future the flush may still happen in the background.
553 14 : pub async fn flush(&self) -> Result<(), DeletionQueueError> {
554 14 : let (flush_op, rx) = FlushOp::new();
555 14 : self.do_flush(&self.tx, ListWriterQueueMessage::Flush(flush_op), rx)
556 14 : .await
557 14 : }
558 :
559 : /// Issue a flush without waiting for it to complete. This is useful on advisory flushes where
560 : /// the caller wants to avoid the risk of waiting for lots of enqueued work, such as on tenant
561 : /// detach where flushing is nice but not necessary.
562 : ///
563 : /// This function provides no guarantees of work being done.
564 0 : pub fn flush_advisory(&self) {
565 0 : let (flush_op, _) = FlushOp::new();
566 0 :
567 0 : // Transmit the flush message, ignoring any result (such as a closed channel during shutdown).
568 0 : drop(self.tx.send(ListWriterQueueMessage::FlushExecute(flush_op)));
569 0 : }
570 :
571 : // Wait until all previous deletions are executed
572 10 : pub(crate) async fn flush_execute(&self) -> Result<(), DeletionQueueError> {
573 10 : debug!("flush_execute: flushing to deletion lists...");
574 : // Flush any buffered work to deletion lists
575 10 : self.flush().await?;
576 :
577 : // Flush the backend into the executor of deletion lists
578 10 : let (flush_op, rx) = FlushOp::new();
579 10 : debug!("flush_execute: flushing backend...");
580 10 : self.do_flush(&self.tx, ListWriterQueueMessage::FlushExecute(flush_op), rx)
581 10 : .await?;
582 10 : debug!("flush_execute: finished flushing backend...");
583 :
584 : // Flush any immediate-mode deletions (the above backend flush will only flush
585 : // the executor if deletions had flowed through the backend)
586 10 : debug!("flush_execute: flushing execution...");
587 10 : self.flush_immediate().await?;
588 10 : debug!("flush_execute: finished flushing execution...");
589 10 : Ok(())
590 10 : }
591 :
592 : /// This interface bypasses the persistent deletion queue, and any validation
593 : /// that this pageserver is still elegible to execute the deletions. It is for
594 : /// use in timeline deletions, where the control plane is telling us we may
595 : /// delete everything in the timeline.
596 : ///
597 : /// DO NOT USE THIS FROM GC OR COMPACTION CODE. Use the regular `push_layers`.
598 0 : pub(crate) async fn push_immediate(
599 0 : &self,
600 0 : objects: Vec<RemotePath>,
601 0 : ) -> Result<(), DeletionQueueError> {
602 0 : metrics::DELETION_QUEUE
603 0 : .keys_submitted
604 0 : .inc_by(objects.len() as u64);
605 0 : self.executor_tx
606 0 : .send(DeleterMessage::Delete(objects))
607 0 : .await
608 0 : .map_err(|_| DeletionQueueError::ShuttingDown)
609 0 : }
610 :
611 : /// Companion to push_immediate. When this returns Ok, all prior objects sent
612 : /// into push_immediate have been deleted from remote storage.
613 10 : pub(crate) async fn flush_immediate(&self) -> Result<(), DeletionQueueError> {
614 10 : let (flush_op, rx) = FlushOp::new();
615 10 : self.executor_tx
616 10 : .send(DeleterMessage::Flush(flush_op))
617 0 : .await
618 10 : .map_err(|_| DeletionQueueError::ShuttingDown)?;
619 :
620 10 : rx.await.map_err(|_| DeletionQueueError::ShuttingDown)
621 10 : }
622 : }
623 :
624 : impl DeletionQueue {
625 8 : pub fn new_client(&self) -> DeletionQueueClient {
626 8 : self.client.clone()
627 8 : }
628 :
629 : /// Caller may use the returned object to construct clients with new_client.
630 : /// Caller should tokio::spawn the background() members of the two worker objects returned:
631 : /// we don't spawn those inside new() so that the caller can use their runtime/spans of choice.
632 : ///
633 : /// If remote_storage is None, then the returned workers will also be None.
634 8 : pub fn new<C>(
635 8 : remote_storage: GenericRemoteStorage,
636 8 : control_plane_client: Option<C>,
637 8 : conf: &'static PageServerConf,
638 8 : ) -> (Self, Option<DeletionQueueWorkers<C>>)
639 8 : where
640 8 : C: ControlPlaneGenerationsApi + Send + Sync,
641 8 : {
642 8 : // Unbounded channel: enables non-async functions to submit deletions. The actual length is
643 8 : // constrained by how promptly the ListWriter wakes up and drains it, which should be frequent
644 8 : // enough to avoid this taking pathologically large amount of memory.
645 8 : let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
646 8 :
647 8 : // Shallow channel: it carries DeletionLists which each contain up to thousands of deletions
648 8 : let (backend_tx, backend_rx) = tokio::sync::mpsc::channel(16);
649 8 :
650 8 : // Shallow channel: it carries lists of paths, and we expect the main queueing to
651 8 : // happen in the backend (persistent), not in this queue.
652 8 : let (executor_tx, executor_rx) = tokio::sync::mpsc::channel(16);
653 8 :
654 8 : let lsn_table = Arc::new(std::sync::RwLock::new(VisibleLsnUpdates::new()));
655 8 :
656 8 : // The deletion queue has an independent cancellation token to
657 8 : // the general pageserver shutdown token, because it stays alive a bit
658 8 : // longer to flush after Tenants have all been torn down.
659 8 : let cancel = CancellationToken::new();
660 8 :
661 8 : (
662 8 : Self {
663 8 : client: DeletionQueueClient {
664 8 : tx,
665 8 : executor_tx: executor_tx.clone(),
666 8 : lsn_table: lsn_table.clone(),
667 8 : },
668 8 : cancel: cancel.clone(),
669 8 : },
670 8 : Some(DeletionQueueWorkers {
671 8 : frontend: ListWriter::new(conf, rx, backend_tx, cancel.clone()),
672 8 : backend: Validator::new(
673 8 : conf,
674 8 : backend_rx,
675 8 : executor_tx,
676 8 : control_plane_client,
677 8 : lsn_table.clone(),
678 8 : cancel.clone(),
679 8 : ),
680 8 : executor: Deleter::new(remote_storage, executor_rx, cancel.clone()),
681 8 : }),
682 8 : )
683 8 : }
684 :
685 0 : pub async fn shutdown(&mut self, timeout: Duration) {
686 0 : match tokio::time::timeout(timeout, self.client.flush()).await {
687 : Ok(Ok(())) => {
688 0 : tracing::info!("Deletion queue flushed successfully on shutdown")
689 : }
690 : Ok(Err(DeletionQueueError::ShuttingDown)) => {
691 : // This is not harmful for correctness, but is unexpected: the deletion
692 : // queue's workers should stay alive as long as there are any client handles instantiated.
693 0 : tracing::warn!("Deletion queue stopped prematurely");
694 : }
695 0 : Err(_timeout) => {
696 0 : tracing::warn!("Timed out flushing deletion queue on shutdown")
697 : }
698 : }
699 :
700 : // We only cancel _after_ flushing: otherwise we would be shutting down the
701 : // components that do the flush.
702 0 : self.cancel.cancel();
703 0 : }
704 : }
705 :
706 : #[cfg(test)]
707 : mod test {
708 : use camino::Utf8Path;
709 : use hex_literal::hex;
710 : use pageserver_api::{shard::ShardIndex, upcall_api::ReAttachResponseTenant};
711 : use std::{io::ErrorKind, time::Duration};
712 : use tracing::info;
713 :
714 : use remote_storage::{RemoteStorageConfig, RemoteStorageKind};
715 : use tokio::task::JoinHandle;
716 :
717 : use crate::{
718 : control_plane_client::RetryForeverError,
719 : repository::Key,
720 : tenant::{harness::TenantHarness, storage_layer::DeltaLayerName},
721 : };
722 :
723 : use super::*;
724 : pub const TIMELINE_ID: TimelineId =
725 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
726 :
727 : pub const EXAMPLE_LAYER_NAME: LayerName = LayerName::Delta(DeltaLayerName {
728 : key_range: Key::from_i128(0x0)..Key::from_i128(0xFFFFFFFFFFFFFFFF),
729 : lsn_range: Lsn(0x00000000016B59D8)..Lsn(0x00000000016B5A51),
730 : });
731 :
732 : // When you need a second layer in a test.
733 : pub const EXAMPLE_LAYER_NAME_ALT: LayerName = LayerName::Delta(DeltaLayerName {
734 : key_range: Key::from_i128(0x0)..Key::from_i128(0xFFFFFFFFFFFFFFFF),
735 : lsn_range: Lsn(0x00000000016B5A51)..Lsn(0x00000000016B5A61),
736 : });
737 :
738 : struct TestSetup {
739 : harness: TenantHarness,
740 : remote_fs_dir: Utf8PathBuf,
741 : storage: GenericRemoteStorage,
742 : mock_control_plane: MockControlPlane,
743 : deletion_queue: DeletionQueue,
744 : worker_join: JoinHandle<()>,
745 : }
746 :
747 : impl TestSetup {
748 : /// Simulate a pageserver restart by destroying and recreating the deletion queue
749 2 : async fn restart(&mut self) {
750 2 : let (deletion_queue, workers) = DeletionQueue::new(
751 2 : self.storage.clone(),
752 2 : Some(self.mock_control_plane.clone()),
753 2 : self.harness.conf,
754 2 : );
755 2 :
756 2 : tracing::debug!("Spawning worker for new queue queue");
757 2 : let worker_join = workers
758 2 : .unwrap()
759 2 : .spawn_with(&tokio::runtime::Handle::current());
760 2 :
761 2 : let old_worker_join = std::mem::replace(&mut self.worker_join, worker_join);
762 2 : let old_deletion_queue = std::mem::replace(&mut self.deletion_queue, deletion_queue);
763 2 :
764 2 : tracing::debug!("Joining worker from previous queue");
765 2 : old_deletion_queue.cancel.cancel();
766 2 : old_worker_join
767 2 : .await
768 2 : .expect("Failed to join workers for previous deletion queue");
769 2 : }
770 :
771 6 : fn set_latest_generation(&self, gen: Generation) {
772 6 : let tenant_shard_id = self.harness.tenant_shard_id;
773 6 : self.mock_control_plane
774 6 : .latest_generation
775 6 : .lock()
776 6 : .unwrap()
777 6 : .insert(tenant_shard_id, gen);
778 6 : }
779 :
780 : /// Returns remote layer file name, suitable for use in assert_remote_files
781 6 : fn write_remote_layer(
782 6 : &self,
783 6 : file_name: LayerName,
784 6 : gen: Generation,
785 6 : ) -> anyhow::Result<String> {
786 6 : let tenant_shard_id = self.harness.tenant_shard_id;
787 6 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
788 6 : let remote_timeline_path = self.remote_fs_dir.join(relative_remote_path.get_path());
789 6 : std::fs::create_dir_all(&remote_timeline_path)?;
790 6 : let remote_layer_file_name = format!("{}{}", file_name, gen.get_suffix());
791 6 :
792 6 : let content: Vec<u8> = format!("placeholder contents of {file_name}").into();
793 6 :
794 6 : std::fs::write(
795 6 : remote_timeline_path.join(remote_layer_file_name.clone()),
796 6 : content,
797 6 : )?;
798 :
799 6 : Ok(remote_layer_file_name)
800 6 : }
801 : }
802 :
803 : #[derive(Debug, Clone)]
804 : struct MockControlPlane {
805 : pub latest_generation: std::sync::Arc<std::sync::Mutex<HashMap<TenantShardId, Generation>>>,
806 : }
807 :
808 : impl MockControlPlane {
809 6 : fn new() -> Self {
810 6 : Self {
811 6 : latest_generation: Arc::default(),
812 6 : }
813 6 : }
814 : }
815 :
816 : impl ControlPlaneGenerationsApi for MockControlPlane {
817 0 : async fn re_attach(
818 0 : &self,
819 0 : _conf: &PageServerConf,
820 0 : ) -> Result<HashMap<TenantShardId, ReAttachResponseTenant>, RetryForeverError> {
821 0 : unimplemented!()
822 : }
823 :
824 8 : async fn validate(
825 8 : &self,
826 8 : tenants: Vec<(TenantShardId, Generation)>,
827 8 : ) -> Result<HashMap<TenantShardId, bool>, RetryForeverError> {
828 8 : let mut result = HashMap::new();
829 8 :
830 8 : let latest_generation = self.latest_generation.lock().unwrap();
831 :
832 16 : for (tenant_shard_id, generation) in tenants {
833 8 : if let Some(latest) = latest_generation.get(&tenant_shard_id) {
834 8 : result.insert(tenant_shard_id, *latest == generation);
835 8 : }
836 : }
837 :
838 8 : Ok(result)
839 8 : }
840 : }
841 :
842 6 : fn setup(test_name: &str) -> anyhow::Result<TestSetup> {
843 6 : let test_name = Box::leak(Box::new(format!("deletion_queue__{test_name}")));
844 6 : let harness = TenantHarness::create(test_name)?;
845 :
846 : // We do not load() the harness: we only need its config and remote_storage
847 :
848 : // Set up a GenericRemoteStorage targetting a directory
849 6 : let remote_fs_dir = harness.conf.workdir.join("remote_fs");
850 6 : std::fs::create_dir_all(remote_fs_dir)?;
851 6 : let remote_fs_dir = harness.conf.workdir.join("remote_fs").canonicalize_utf8()?;
852 6 : let storage_config = RemoteStorageConfig {
853 6 : storage: RemoteStorageKind::LocalFs(remote_fs_dir.clone()),
854 6 : timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
855 6 : };
856 6 : let storage = GenericRemoteStorage::from_config(&storage_config).unwrap();
857 6 :
858 6 : let mock_control_plane = MockControlPlane::new();
859 6 :
860 6 : let (deletion_queue, worker) = DeletionQueue::new(
861 6 : storage.clone(),
862 6 : Some(mock_control_plane.clone()),
863 6 : harness.conf,
864 6 : );
865 6 :
866 6 : let worker = worker.unwrap();
867 6 : let worker_join = worker.spawn_with(&tokio::runtime::Handle::current());
868 6 :
869 6 : Ok(TestSetup {
870 6 : harness,
871 6 : remote_fs_dir,
872 6 : storage,
873 6 : mock_control_plane,
874 6 : deletion_queue,
875 6 : worker_join,
876 6 : })
877 6 : }
878 :
879 : // TODO: put this in a common location so that we can share with remote_timeline_client's tests
880 18 : fn assert_remote_files(expected: &[&str], remote_path: &Utf8Path) {
881 18 : let mut expected: Vec<String> = expected.iter().map(|x| String::from(*x)).collect();
882 18 : expected.sort();
883 18 :
884 18 : let mut found: Vec<String> = Vec::new();
885 18 : let dir = match std::fs::read_dir(remote_path) {
886 18 : Ok(d) => d,
887 0 : Err(e) => {
888 0 : if e.kind() == ErrorKind::NotFound {
889 0 : if expected.is_empty() {
890 : // We are asserting prefix is empty: it is expected that the dir is missing
891 0 : return;
892 : } else {
893 0 : assert_eq!(expected, Vec::<String>::new());
894 0 : unreachable!();
895 : }
896 : } else {
897 0 : panic!("Unexpected error listing {remote_path}: {e}");
898 : }
899 : }
900 : };
901 :
902 18 : for entry in dir.flatten() {
903 16 : let entry_name = entry.file_name();
904 16 : let fname = entry_name.to_str().unwrap();
905 16 : found.push(String::from(fname));
906 16 : }
907 18 : found.sort();
908 18 :
909 18 : assert_eq!(expected, found);
910 18 : }
911 :
912 10 : fn assert_local_files(expected: &[&str], directory: &Utf8Path) {
913 10 : let dir = match std::fs::read_dir(directory) {
914 8 : Ok(d) => d,
915 : Err(_) => {
916 2 : assert_eq!(expected, &Vec::<String>::new());
917 2 : return;
918 : }
919 : };
920 8 : let mut found = Vec::new();
921 18 : for dentry in dir {
922 10 : let dentry = dentry.unwrap();
923 10 : let file_name = dentry.file_name();
924 10 : let file_name_str = file_name.to_string_lossy();
925 10 : found.push(file_name_str.to_string());
926 10 : }
927 8 : found.sort();
928 8 : assert_eq!(expected, found);
929 10 : }
930 :
931 : #[tokio::test]
932 2 : async fn deletion_queue_smoke() -> anyhow::Result<()> {
933 2 : // Basic test that the deletion queue processes the deletions we pass into it
934 2 : let ctx = setup("deletion_queue_smoke").expect("Failed test setup");
935 2 : let client = ctx.deletion_queue.new_client();
936 2 : client.recover(HashMap::new())?;
937 2 :
938 2 : let layer_file_name_1: LayerName = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap();
939 2 : let tenant_shard_id = ctx.harness.tenant_shard_id;
940 2 :
941 2 : let content: Vec<u8> = "victim1 contents".into();
942 2 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
943 2 : let remote_timeline_path = ctx.remote_fs_dir.join(relative_remote_path.get_path());
944 2 : let deletion_prefix = ctx.harness.conf.deletion_prefix();
945 2 :
946 2 : // Exercise the distinction between the generation of the layers
947 2 : // we delete, and the generation of the running Tenant.
948 2 : let layer_generation = Generation::new(0xdeadbeef);
949 2 : let now_generation = Generation::new(0xfeedbeef);
950 2 : let layer_metadata =
951 2 : LayerFileMetadata::new(0xf00, layer_generation, ShardIndex::unsharded());
952 2 :
953 2 : let remote_layer_file_name_1 =
954 2 : format!("{}{}", layer_file_name_1, layer_generation.get_suffix());
955 2 :
956 2 : // Set mock control plane state to valid for our generation
957 2 : ctx.set_latest_generation(now_generation);
958 2 :
959 2 : // Inject a victim file to remote storage
960 2 : info!("Writing");
961 2 : std::fs::create_dir_all(&remote_timeline_path)?;
962 2 : std::fs::write(
963 2 : remote_timeline_path.join(remote_layer_file_name_1.clone()),
964 2 : content,
965 2 : )?;
966 2 : assert_remote_files(&[&remote_layer_file_name_1], &remote_timeline_path);
967 2 :
968 2 : // File should still be there after we push it to the queue (we haven't pushed enough to flush anything)
969 2 : info!("Pushing");
970 2 : client
971 2 : .push_layers(
972 2 : tenant_shard_id,
973 2 : TIMELINE_ID,
974 2 : now_generation,
975 2 : [(layer_file_name_1.clone(), layer_metadata)].to_vec(),
976 2 : )
977 2 : .await?;
978 2 : assert_remote_files(&[&remote_layer_file_name_1], &remote_timeline_path);
979 2 :
980 2 : assert_local_files(&[], &deletion_prefix);
981 2 :
982 2 : // File should still be there after we write a deletion list (we haven't pushed enough to execute anything)
983 2 : info!("Flushing");
984 2 : client.flush().await?;
985 2 : assert_remote_files(&[&remote_layer_file_name_1], &remote_timeline_path);
986 2 : assert_local_files(&["0000000000000001-01.list"], &deletion_prefix);
987 2 :
988 2 : // File should go away when we execute
989 2 : info!("Flush-executing");
990 6 : client.flush_execute().await?;
991 2 : assert_remote_files(&[], &remote_timeline_path);
992 2 : assert_local_files(&["header-01"], &deletion_prefix);
993 2 :
994 2 : // Flushing on an empty queue should succeed immediately, and not write any lists
995 2 : info!("Flush-executing on empty");
996 6 : client.flush_execute().await?;
997 2 : assert_local_files(&["header-01"], &deletion_prefix);
998 2 :
999 2 : Ok(())
1000 2 : }
1001 :
1002 : #[tokio::test]
1003 2 : async fn deletion_queue_validation() -> anyhow::Result<()> {
1004 2 : let ctx = setup("deletion_queue_validation").expect("Failed test setup");
1005 2 : let client = ctx.deletion_queue.new_client();
1006 2 : client.recover(HashMap::new())?;
1007 2 :
1008 2 : // Generation that the control plane thinks is current
1009 2 : let latest_generation = Generation::new(0xdeadbeef);
1010 2 : // Generation that our DeletionQueue thinks the tenant is running with
1011 2 : let stale_generation = latest_generation.previous();
1012 2 : // Generation that our example layer file was written with
1013 2 : let layer_generation = stale_generation.previous();
1014 2 : let layer_metadata =
1015 2 : LayerFileMetadata::new(0xf00, layer_generation, ShardIndex::unsharded());
1016 2 :
1017 2 : ctx.set_latest_generation(latest_generation);
1018 2 :
1019 2 : let tenant_shard_id = ctx.harness.tenant_shard_id;
1020 2 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
1021 2 : let remote_timeline_path = ctx.remote_fs_dir.join(relative_remote_path.get_path());
1022 2 :
1023 2 : // Initial state: a remote layer exists
1024 2 : let remote_layer_name = ctx.write_remote_layer(EXAMPLE_LAYER_NAME, layer_generation)?;
1025 2 : assert_remote_files(&[&remote_layer_name], &remote_timeline_path);
1026 2 :
1027 2 : tracing::debug!("Pushing...");
1028 2 : client
1029 2 : .push_layers(
1030 2 : tenant_shard_id,
1031 2 : TIMELINE_ID,
1032 2 : stale_generation,
1033 2 : [(EXAMPLE_LAYER_NAME.clone(), layer_metadata.clone())].to_vec(),
1034 2 : )
1035 2 : .await?;
1036 2 :
1037 2 : // We enqueued the operation in a stale generation: it should have failed validation
1038 2 : tracing::debug!("Flushing...");
1039 6 : tokio::time::timeout(Duration::from_secs(5), client.flush_execute()).await??;
1040 2 : assert_remote_files(&[&remote_layer_name], &remote_timeline_path);
1041 2 :
1042 2 : tracing::debug!("Pushing...");
1043 2 : client
1044 2 : .push_layers(
1045 2 : tenant_shard_id,
1046 2 : TIMELINE_ID,
1047 2 : latest_generation,
1048 2 : [(EXAMPLE_LAYER_NAME.clone(), layer_metadata.clone())].to_vec(),
1049 2 : )
1050 2 : .await?;
1051 2 :
1052 2 : // We enqueued the operation in a fresh generation: it should have passed validation
1053 2 : tracing::debug!("Flushing...");
1054 6 : tokio::time::timeout(Duration::from_secs(5), client.flush_execute()).await??;
1055 2 : assert_remote_files(&[], &remote_timeline_path);
1056 2 :
1057 2 : Ok(())
1058 2 : }
1059 :
1060 : #[tokio::test]
1061 2 : async fn deletion_queue_recovery() -> anyhow::Result<()> {
1062 2 : // Basic test that the deletion queue processes the deletions we pass into it
1063 2 : let mut ctx = setup("deletion_queue_recovery").expect("Failed test setup");
1064 2 : let client = ctx.deletion_queue.new_client();
1065 2 : client.recover(HashMap::new())?;
1066 2 :
1067 2 : let tenant_shard_id = ctx.harness.tenant_shard_id;
1068 2 :
1069 2 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
1070 2 : let remote_timeline_path = ctx.remote_fs_dir.join(relative_remote_path.get_path());
1071 2 : let deletion_prefix = ctx.harness.conf.deletion_prefix();
1072 2 :
1073 2 : let layer_generation = Generation::new(0xdeadbeef);
1074 2 : let now_generation = Generation::new(0xfeedbeef);
1075 2 : let layer_metadata =
1076 2 : LayerFileMetadata::new(0xf00, layer_generation, ShardIndex::unsharded());
1077 2 :
1078 2 : // Inject a deletion in the generation before generation_now: after restart,
1079 2 : // this deletion should _not_ get executed (only the immediately previous
1080 2 : // generation gets that treatment)
1081 2 : let remote_layer_file_name_historical =
1082 2 : ctx.write_remote_layer(EXAMPLE_LAYER_NAME, layer_generation)?;
1083 2 : client
1084 2 : .push_layers(
1085 2 : tenant_shard_id,
1086 2 : TIMELINE_ID,
1087 2 : now_generation.previous(),
1088 2 : [(EXAMPLE_LAYER_NAME.clone(), layer_metadata.clone())].to_vec(),
1089 2 : )
1090 2 : .await?;
1091 2 :
1092 2 : // Inject a deletion in the generation before generation_now: after restart,
1093 2 : // this deletion should get executed, because we execute deletions in the
1094 2 : // immediately previous generation on the same node.
1095 2 : let remote_layer_file_name_previous =
1096 2 : ctx.write_remote_layer(EXAMPLE_LAYER_NAME_ALT, layer_generation)?;
1097 2 : client
1098 2 : .push_layers(
1099 2 : tenant_shard_id,
1100 2 : TIMELINE_ID,
1101 2 : now_generation,
1102 2 : [(EXAMPLE_LAYER_NAME_ALT.clone(), layer_metadata.clone())].to_vec(),
1103 2 : )
1104 2 : .await?;
1105 2 :
1106 2 : client.flush().await?;
1107 2 : assert_remote_files(
1108 2 : &[
1109 2 : &remote_layer_file_name_historical,
1110 2 : &remote_layer_file_name_previous,
1111 2 : ],
1112 2 : &remote_timeline_path,
1113 2 : );
1114 2 :
1115 2 : // Different generatinos for the same tenant will cause two separate
1116 2 : // deletion lists to be emitted.
1117 2 : assert_local_files(
1118 2 : &["0000000000000001-01.list", "0000000000000002-01.list"],
1119 2 : &deletion_prefix,
1120 2 : );
1121 2 :
1122 2 : // Simulate a node restart: the latest generation advances
1123 2 : let now_generation = now_generation.next();
1124 2 : ctx.set_latest_generation(now_generation);
1125 2 :
1126 2 : // Restart the deletion queue
1127 2 : drop(client);
1128 2 : ctx.restart().await;
1129 2 : let client = ctx.deletion_queue.new_client();
1130 2 : client.recover(HashMap::from([(tenant_shard_id, now_generation)]))?;
1131 2 :
1132 2 : info!("Flush-executing");
1133 6 : client.flush_execute().await?;
1134 2 : // The deletion from immediately prior generation was executed, the one from
1135 2 : // an older generation was not.
1136 2 : assert_remote_files(&[&remote_layer_file_name_historical], &remote_timeline_path);
1137 2 : Ok(())
1138 2 : }
1139 : }
1140 :
1141 : /// A lightweight queue which can issue ordinary DeletionQueueClient objects, but doesn't do any persistence
1142 : /// or coalescing, and doesn't actually execute any deletions unless you call pump() to kick it.
1143 : #[cfg(test)]
1144 : pub(crate) mod mock {
1145 : use tracing::info;
1146 :
1147 : use super::*;
1148 : use std::sync::atomic::{AtomicUsize, Ordering};
1149 :
1150 : pub struct ConsumerState {
1151 : rx: tokio::sync::mpsc::UnboundedReceiver<ListWriterQueueMessage>,
1152 : executor_rx: tokio::sync::mpsc::Receiver<DeleterMessage>,
1153 : cancel: CancellationToken,
1154 : }
1155 :
1156 : impl ConsumerState {
1157 2 : async fn consume(&mut self, remote_storage: &GenericRemoteStorage) -> usize {
1158 2 : let mut executed = 0;
1159 2 :
1160 2 : info!("Executing all pending deletions");
1161 :
1162 : // Transform all executor messages to generic frontend messages
1163 2 : while let Ok(msg) = self.executor_rx.try_recv() {
1164 0 : match msg {
1165 0 : DeleterMessage::Delete(objects) => {
1166 0 : for path in objects {
1167 0 : match remote_storage.delete(&path, &self.cancel).await {
1168 : Ok(_) => {
1169 0 : debug!("Deleted {path}");
1170 : }
1171 0 : Err(e) => {
1172 0 : error!("Failed to delete {path}, leaking object! ({e})");
1173 : }
1174 : }
1175 0 : executed += 1;
1176 : }
1177 : }
1178 0 : DeleterMessage::Flush(flush_op) => {
1179 0 : flush_op.notify();
1180 0 : }
1181 : }
1182 : }
1183 :
1184 4 : while let Ok(msg) = self.rx.try_recv() {
1185 2 : match msg {
1186 2 : ListWriterQueueMessage::Delete(op) => {
1187 2 : let mut objects = op.objects;
1188 4 : for (layer, meta) in op.layers {
1189 2 : objects.push(remote_layer_path(
1190 2 : &op.tenant_shard_id.tenant_id,
1191 2 : &op.timeline_id,
1192 2 : meta.shard,
1193 2 : &layer,
1194 2 : meta.generation,
1195 2 : ));
1196 2 : }
1197 :
1198 4 : for path in objects {
1199 2 : info!("Executing deletion {path}");
1200 2 : match remote_storage.delete(&path, &self.cancel).await {
1201 : Ok(_) => {
1202 2 : debug!("Deleted {path}");
1203 : }
1204 0 : Err(e) => {
1205 0 : error!("Failed to delete {path}, leaking object! ({e})");
1206 : }
1207 : }
1208 2 : executed += 1;
1209 : }
1210 : }
1211 0 : ListWriterQueueMessage::Flush(op) => {
1212 0 : op.notify();
1213 0 : }
1214 0 : ListWriterQueueMessage::FlushExecute(op) => {
1215 0 : // We have already executed all prior deletions because mock does them inline
1216 0 : op.notify();
1217 0 : }
1218 0 : ListWriterQueueMessage::Recover(_) => {
1219 0 : // no-op in mock
1220 0 : }
1221 : }
1222 2 : info!("All pending deletions have been executed");
1223 : }
1224 :
1225 2 : executed
1226 2 : }
1227 : }
1228 :
1229 : pub struct MockDeletionQueue {
1230 : tx: tokio::sync::mpsc::UnboundedSender<ListWriterQueueMessage>,
1231 : executor_tx: tokio::sync::mpsc::Sender<DeleterMessage>,
1232 : executed: Arc<AtomicUsize>,
1233 : remote_storage: Option<GenericRemoteStorage>,
1234 : consumer: std::sync::Mutex<ConsumerState>,
1235 : lsn_table: Arc<std::sync::RwLock<VisibleLsnUpdates>>,
1236 : }
1237 :
1238 : impl MockDeletionQueue {
1239 157 : pub fn new(remote_storage: Option<GenericRemoteStorage>) -> Self {
1240 157 : let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
1241 157 : let (executor_tx, executor_rx) = tokio::sync::mpsc::channel(16384);
1242 157 :
1243 157 : let executed = Arc::new(AtomicUsize::new(0));
1244 157 :
1245 157 : Self {
1246 157 : tx,
1247 157 : executor_tx,
1248 157 : executed,
1249 157 : remote_storage,
1250 157 : consumer: std::sync::Mutex::new(ConsumerState {
1251 157 : rx,
1252 157 : executor_rx,
1253 157 : cancel: CancellationToken::new(),
1254 157 : }),
1255 157 : lsn_table: Arc::new(std::sync::RwLock::new(VisibleLsnUpdates::new())),
1256 157 : }
1257 157 : }
1258 :
1259 : #[allow(clippy::await_holding_lock)]
1260 2 : pub async fn pump(&self) {
1261 2 : if let Some(remote_storage) = &self.remote_storage {
1262 : // Permit holding mutex across await, because this is only ever
1263 : // called once at a time in tests.
1264 2 : let mut locked = self.consumer.lock().unwrap();
1265 2 : let count = locked.consume(remote_storage).await;
1266 2 : self.executed.fetch_add(count, Ordering::Relaxed);
1267 0 : }
1268 2 : }
1269 :
1270 167 : pub(crate) fn new_client(&self) -> DeletionQueueClient {
1271 167 : DeletionQueueClient {
1272 167 : tx: self.tx.clone(),
1273 167 : executor_tx: self.executor_tx.clone(),
1274 167 : lsn_table: self.lsn_table.clone(),
1275 167 : }
1276 167 : }
1277 : }
1278 :
1279 : /// Test round-trip serialization/deserialization, and test stability of the format
1280 : /// vs. a static expected string for the serialized version.
1281 : #[test]
1282 2 : fn deletion_list_serialization() -> anyhow::Result<()> {
1283 2 : let tenant_id = "ad6c1a56f5680419d3a16ff55d97ec3c"
1284 2 : .to_string()
1285 2 : .parse::<TenantShardId>()?;
1286 2 : let timeline_id = "be322c834ed9e709e63b5c9698691910"
1287 2 : .to_string()
1288 2 : .parse::<TimelineId>()?;
1289 2 : let generation = Generation::new(123);
1290 :
1291 2 : let object =
1292 2 : RemotePath::from_string(&format!("tenants/{tenant_id}/timelines/{timeline_id}/foo"))?;
1293 2 : let mut objects = [object].to_vec();
1294 2 :
1295 2 : let mut example = DeletionList::new(1);
1296 2 : example.push(&tenant_id, &timeline_id, generation, &mut objects);
1297 :
1298 2 : let encoded = serde_json::to_string(&example)?;
1299 :
1300 2 : let expected = "{\"version\":1,\"sequence\":1,\"tenants\":{\"ad6c1a56f5680419d3a16ff55d97ec3c\":{\"timelines\":{\"be322c834ed9e709e63b5c9698691910\":[\"foo\"]},\"generation\":123}},\"size\":1}".to_string();
1301 2 : assert_eq!(encoded, expected);
1302 :
1303 2 : let decoded = serde_json::from_str::<DeletionList>(&encoded)?;
1304 2 : assert_eq!(example, decoded);
1305 :
1306 2 : Ok(())
1307 2 : }
1308 : }
|