Line data Source code
1 : mod deleter;
2 : mod list_writer;
3 : mod validator;
4 :
5 : use std::collections::HashMap;
6 : use std::sync::Arc;
7 : use std::time::Duration;
8 :
9 : use anyhow::Context;
10 : use camino::Utf8PathBuf;
11 : use deleter::DeleterMessage;
12 : use list_writer::ListWriterQueueMessage;
13 : use pageserver_api::shard::TenantShardId;
14 : use remote_storage::{GenericRemoteStorage, RemotePath};
15 : use serde::{Deserialize, Serialize};
16 : use thiserror::Error;
17 : use tokio_util::sync::CancellationToken;
18 : use tracing::{Instrument, debug, error};
19 : use utils::crashsafe::path_with_suffix_extension;
20 : use utils::generation::Generation;
21 : use utils::id::TimelineId;
22 : use utils::lsn::{AtomicLsn, Lsn};
23 : use validator::ValidatorQueueMessage;
24 :
25 : use self::deleter::Deleter;
26 : use self::list_writer::{DeletionOp, ListWriter, RecoverOp};
27 : use self::validator::Validator;
28 : use crate::config::PageServerConf;
29 : use crate::controller_upcall_client::ControlPlaneGenerationsApi;
30 : use crate::metrics;
31 : use crate::tenant::remote_timeline_client::{LayerFileMetadata, remote_timeline_path};
32 : use crate::tenant::storage_layer::LayerName;
33 : use crate::virtual_file::{MaybeFatalIo, VirtualFile};
34 :
35 : // TODO: configurable for how long to wait before executing deletions
36 :
37 : /// We aggregate object deletions from many tenants in one place, for several reasons:
38 : /// - Coalesce deletions into fewer DeleteObjects calls
39 : /// - Enable Tenant/Timeline lifetimes to be shorter than the time it takes
40 : /// to flush any outstanding deletions.
41 : /// - Globally control throughput of deletions, as these are a low priority task: do
42 : /// not compete with the same S3 clients/connections used for higher priority uploads.
43 : /// - Enable gating deletions on validation of a tenant's generation number, to make
44 : /// it safe to multi-attach tenants (see docs/rfcs/025-generation-numbers.md)
45 : ///
46 : /// There are two kinds of deletion: deferred and immediate. A deferred deletion
47 : /// may be intentionally delayed to protect passive readers of S3 data, and is
48 : /// subject to a generation number validation step. An immediate deletion is
49 : /// ready to execute immediately, and is only queued up so that it can be coalesced
50 : /// with other deletions in flight.
51 : ///
52 : /// Deferred deletions pass through three steps:
53 : /// - ListWriter: accumulate deletion requests from Timelines, and batch them up into
54 : /// DeletionLists, which are persisted to disk.
55 : /// - Validator: accumulate deletion lists, and validate them en-masse prior to passing
56 : /// the keys in the list onward for actual deletion. Also validate remote_consistent_lsn
57 : /// updates for running timelines.
58 : /// - Deleter: accumulate object keys that the validator has validated, and execute them in
59 : /// batches of 1000 keys via DeleteObjects.
60 : ///
61 : /// Non-deferred deletions, such as during timeline deletion, bypass the first
62 : /// two stages and are passed straight into the Deleter.
63 : ///
64 : /// Internally, each stage is joined by a channel to the next. On disk, there is only
65 : /// one queue (of DeletionLists), which is written by the frontend and consumed
66 : /// by the backend.
67 : #[derive(Clone)]
68 : pub struct DeletionQueue {
69 : client: DeletionQueueClient,
70 :
71 : // Parent cancellation token for the tokens passed into background workers
72 : cancel: CancellationToken,
73 : }
74 :
75 : /// Opaque wrapper around individual worker tasks, to avoid making the
76 : /// worker objects themselves public
77 : pub struct DeletionQueueWorkers<C>
78 : where
79 : C: ControlPlaneGenerationsApi + Send + Sync,
80 : {
81 : frontend: ListWriter,
82 : backend: Validator<C>,
83 : executor: Deleter,
84 : }
85 :
86 : impl<C> DeletionQueueWorkers<C>
87 : where
88 : C: ControlPlaneGenerationsApi + Send + Sync + 'static,
89 : {
90 16 : pub fn spawn_with(mut self, runtime: &tokio::runtime::Handle) -> tokio::task::JoinHandle<()> {
91 16 : let jh_frontend = runtime.spawn(async move {
92 16 : self.frontend
93 16 : .background()
94 16 : .instrument(tracing::info_span!(parent:None, "deletion frontend"))
95 16 : .await
96 16 : });
97 16 : let jh_backend = runtime.spawn(async move {
98 16 : self.backend
99 16 : .background()
100 16 : .instrument(tracing::info_span!(parent:None, "deletion backend"))
101 16 : .await
102 16 : });
103 16 : let jh_executor = runtime.spawn(async move {
104 16 : self.executor
105 16 : .background()
106 16 : .instrument(tracing::info_span!(parent:None, "deletion executor"))
107 16 : .await
108 16 : });
109 16 :
110 16 : runtime.spawn({
111 16 : async move {
112 16 : jh_frontend.await.expect("error joining frontend worker");
113 4 : jh_backend.await.expect("error joining backend worker");
114 4 : drop(jh_executor.await.expect("error joining executor worker"));
115 16 : }
116 16 : })
117 16 : }
118 : }
119 :
120 : /// A FlushOp is just a oneshot channel, where we send the transmit side down
121 : /// another channel, and the receive side will receive a message when the channel
122 : /// we're flushing has reached the FlushOp we sent into it.
123 : ///
124 : /// The only extra behavior beyond the channel is that the notify() method does not
125 : /// return an error when the receive side has been dropped, because in this use case
126 : /// it is harmless (the code that initiated the flush no longer cares about the result).
127 : #[derive(Debug)]
128 : struct FlushOp {
129 : tx: tokio::sync::oneshot::Sender<()>,
130 : }
131 :
132 : impl FlushOp {
133 84 : fn new() -> (Self, tokio::sync::oneshot::Receiver<()>) {
134 84 : let (tx, rx) = tokio::sync::oneshot::channel::<()>();
135 84 : (Self { tx }, rx)
136 84 : }
137 :
138 88 : fn notify(self) {
139 88 : if self.tx.send(()).is_err() {
140 : // oneshot channel closed. This is legal: a client could be destroyed while waiting for a flush.
141 0 : debug!("deletion queue flush from dropped client");
142 88 : };
143 88 : }
144 : }
145 :
146 : #[derive(Clone, Debug)]
147 : pub struct DeletionQueueClient {
148 : tx: tokio::sync::mpsc::UnboundedSender<ListWriterQueueMessage>,
149 : executor_tx: tokio::sync::mpsc::Sender<DeleterMessage>,
150 :
151 : lsn_table: Arc<std::sync::RwLock<VisibleLsnUpdates>>,
152 : }
153 :
154 24 : #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
155 : struct TenantDeletionList {
156 : /// For each Timeline, a list of key fragments to append to the timeline remote path
157 : /// when reconstructing a full key
158 : timelines: HashMap<TimelineId, Vec<String>>,
159 :
160 : /// The generation in which this deletion was emitted: note that this may not be the
161 : /// same as the generation of any layers being deleted. The generation of the layer
162 : /// has already been absorbed into the keys in `objects`
163 : generation: Generation,
164 : }
165 :
166 : impl TenantDeletionList {
167 20 : pub(crate) fn len(&self) -> usize {
168 20 : self.timelines.values().map(|v| v.len()).sum()
169 20 : }
170 : }
171 :
172 : /// Files ending with this suffix will be ignored and erased
173 : /// during recovery as startup.
174 : const TEMP_SUFFIX: &str = "tmp";
175 :
176 48 : #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
177 : struct DeletionList {
178 : /// Serialization version, for future use
179 : version: u8,
180 :
181 : /// Used for constructing a unique key for each deletion list we write out.
182 : sequence: u64,
183 :
184 : /// To avoid repeating tenant/timeline IDs in every key, we store keys in
185 : /// nested HashMaps by TenantTimelineID. Each Tenant only appears once
186 : /// with one unique generation ID: if someone tries to push a second generation
187 : /// ID for the same tenant, we will start a new DeletionList.
188 : tenants: HashMap<TenantShardId, TenantDeletionList>,
189 :
190 : /// Avoid having to walk `tenants` to calculate the number of keys in
191 : /// the nested deletion lists
192 : size: usize,
193 :
194 : /// Set to true when the list has undergone validation with the control
195 : /// plane and the remaining contents of `tenants` are valid. A list may
196 : /// also be implicitly marked valid by DeletionHeader.validated_sequence
197 : /// advancing to >= DeletionList.sequence
198 : #[serde(default)]
199 : #[serde(skip_serializing_if = "std::ops::Not::not")]
200 : validated: bool,
201 : }
202 :
203 0 : #[derive(Debug, Serialize, Deserialize)]
204 : struct DeletionHeader {
205 : /// Serialization version, for future use
206 : version: u8,
207 :
208 : /// The highest sequence number (inclusive) that has been validated. All deletion
209 : /// lists on disk with a sequence <= this value are safe to execute.
210 : validated_sequence: u64,
211 : }
212 :
213 : impl DeletionHeader {
214 : const VERSION_LATEST: u8 = 1;
215 :
216 16 : fn new(validated_sequence: u64) -> Self {
217 16 : Self {
218 16 : version: Self::VERSION_LATEST,
219 16 : validated_sequence,
220 16 : }
221 16 : }
222 :
223 16 : async fn save(&self, conf: &'static PageServerConf) -> anyhow::Result<()> {
224 16 : debug!("Saving deletion list header {:?}", self);
225 16 : let header_bytes = serde_json::to_vec(self).context("serialize deletion header")?;
226 16 : let header_path = conf.deletion_header_path();
227 16 : let temp_path = path_with_suffix_extension(&header_path, TEMP_SUFFIX);
228 16 : VirtualFile::crashsafe_overwrite(header_path, temp_path, header_bytes)
229 16 : .await
230 16 : .maybe_fatal_err("save deletion header")?;
231 :
232 16 : Ok(())
233 16 : }
234 : }
235 :
236 : impl DeletionList {
237 : const VERSION_LATEST: u8 = 1;
238 40 : fn new(sequence: u64) -> Self {
239 40 : Self {
240 40 : version: Self::VERSION_LATEST,
241 40 : sequence,
242 40 : tenants: HashMap::new(),
243 40 : size: 0,
244 40 : validated: false,
245 40 : }
246 40 : }
247 :
248 54 : fn is_empty(&self) -> bool {
249 54 : self.tenants.is_empty()
250 54 : }
251 :
252 120 : fn len(&self) -> usize {
253 120 : self.size
254 120 : }
255 :
256 : /// Returns true if the push was accepted, false if the caller must start a new
257 : /// deletion list.
258 28 : fn push(
259 28 : &mut self,
260 28 : tenant: &TenantShardId,
261 28 : timeline: &TimelineId,
262 28 : generation: Generation,
263 28 : objects: &mut Vec<RemotePath>,
264 28 : ) -> bool {
265 28 : if objects.is_empty() {
266 : // Avoid inserting an empty TimelineDeletionList: this preserves the property
267 : // that if we have no keys, then self.objects is empty (used in Self::is_empty)
268 0 : return true;
269 28 : }
270 28 :
271 28 : let tenant_entry = self
272 28 : .tenants
273 28 : .entry(*tenant)
274 28 : .or_insert_with(|| TenantDeletionList {
275 24 : timelines: HashMap::new(),
276 24 : generation,
277 28 : });
278 28 :
279 28 : if tenant_entry.generation != generation {
280 : // Only one generation per tenant per list: signal to
281 : // caller to start a new list.
282 4 : return false;
283 24 : }
284 24 :
285 24 : let timeline_entry = tenant_entry.timelines.entry(*timeline).or_default();
286 24 :
287 24 : let timeline_remote_path = remote_timeline_path(tenant, timeline);
288 24 :
289 24 : self.size += objects.len();
290 24 : timeline_entry.extend(objects.drain(..).map(|p| {
291 24 : p.strip_prefix(&timeline_remote_path)
292 24 : .expect("Timeline paths always start with the timeline prefix")
293 24 : .to_string()
294 24 : }));
295 24 : true
296 28 : }
297 :
298 20 : fn into_remote_paths(self) -> Vec<RemotePath> {
299 20 : let mut result = Vec::new();
300 20 : for (tenant, tenant_deletions) in self.tenants.into_iter() {
301 12 : for (timeline, timeline_layers) in tenant_deletions.timelines.into_iter() {
302 12 : let timeline_remote_path = remote_timeline_path(&tenant, &timeline);
303 12 : result.extend(
304 12 : timeline_layers
305 12 : .into_iter()
306 12 : .map(|l| timeline_remote_path.join(Utf8PathBuf::from(l))),
307 12 : );
308 12 : }
309 : }
310 :
311 20 : result
312 20 : }
313 :
314 28 : async fn save(&self, conf: &'static PageServerConf) -> anyhow::Result<()> {
315 28 : let path = conf.deletion_list_path(self.sequence);
316 28 : let temp_path = path_with_suffix_extension(&path, TEMP_SUFFIX);
317 28 :
318 28 : let bytes = serde_json::to_vec(self).expect("Failed to serialize deletion list");
319 28 :
320 28 : VirtualFile::crashsafe_overwrite(path, temp_path, bytes)
321 28 : .await
322 28 : .maybe_fatal_err("save deletion list")
323 28 : .map_err(Into::into)
324 28 : }
325 : }
326 :
327 : impl std::fmt::Display for DeletionList {
328 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
329 0 : write!(
330 0 : f,
331 0 : "DeletionList<seq={}, tenants={}, keys={}>",
332 0 : self.sequence,
333 0 : self.tenants.len(),
334 0 : self.size
335 0 : )
336 0 : }
337 : }
338 :
339 : struct PendingLsn {
340 : projected: Lsn,
341 : result_slot: Arc<AtomicLsn>,
342 : }
343 :
344 : struct TenantLsnState {
345 : timelines: HashMap<TimelineId, PendingLsn>,
346 :
347 : // In what generation was the most recent update proposed?
348 : generation: Generation,
349 : }
350 :
351 : #[derive(Default)]
352 : struct VisibleLsnUpdates {
353 : tenants: HashMap<TenantShardId, TenantLsnState>,
354 : }
355 :
356 : impl VisibleLsnUpdates {
357 468 : fn new() -> Self {
358 468 : Self {
359 468 : tenants: HashMap::new(),
360 468 : }
361 468 : }
362 : }
363 :
364 : impl std::fmt::Debug for VisibleLsnUpdates {
365 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
366 0 : write!(f, "VisibleLsnUpdates({} tenants)", self.tenants.len())
367 0 : }
368 : }
369 :
370 : #[derive(Error, Debug)]
371 : pub enum DeletionQueueError {
372 : #[error("Deletion queue unavailable during shutdown")]
373 : ShuttingDown,
374 : }
375 :
376 : impl DeletionQueueClient {
377 : /// This is cancel-safe. If you drop the future before it completes, the message
378 : /// is not pushed, although in the context of the deletion queue it doesn't matter: once
379 : /// we decide to do a deletion the decision is always final.
380 771 : fn do_push<T>(
381 771 : &self,
382 771 : queue: &tokio::sync::mpsc::UnboundedSender<T>,
383 771 : msg: T,
384 771 : ) -> Result<(), DeletionQueueError> {
385 771 : match queue.send(msg) {
386 771 : Ok(_) => Ok(()),
387 0 : Err(e) => {
388 0 : // This shouldn't happen, we should shut down all tenants before
389 0 : // we shut down the global delete queue. If we encounter a bug like this,
390 0 : // we may leak objects as deletions won't be processed.
391 0 : error!("Deletion queue closed while pushing, shutting down? ({e})");
392 0 : Err(DeletionQueueError::ShuttingDown)
393 : }
394 : }
395 771 : }
396 :
397 16 : pub(crate) fn recover(
398 16 : &self,
399 16 : attached_tenants: HashMap<TenantShardId, Generation>,
400 16 : ) -> Result<(), DeletionQueueError> {
401 16 : self.do_push(
402 16 : &self.tx,
403 16 : ListWriterQueueMessage::Recover(RecoverOp { attached_tenants }),
404 16 : )
405 16 : }
406 :
407 : /// When a Timeline wishes to update the remote_consistent_lsn that it exposes to the outside
408 : /// world, it must validate its generation number before doing so. Rather than do this synchronously,
409 : /// we allow the timeline to publish updates at will via this API, and then read back what LSN was most
410 : /// recently validated separately.
411 : ///
412 : /// In this function we publish the LSN to the `projected` field of the timeline's entry in the VisibleLsnUpdates. The
413 : /// backend will later wake up and notice that the tenant's generation requires validation.
414 2985 : pub(crate) async fn update_remote_consistent_lsn(
415 2985 : &self,
416 2985 : tenant_shard_id: TenantShardId,
417 2985 : timeline_id: TimelineId,
418 2985 : current_generation: Generation,
419 2985 : lsn: Lsn,
420 2985 : result_slot: Arc<AtomicLsn>,
421 2985 : ) {
422 2985 : let mut locked = self
423 2985 : .lsn_table
424 2985 : .write()
425 2985 : .expect("Lock should never be poisoned");
426 2985 :
427 2985 : let tenant_entry = locked
428 2985 : .tenants
429 2985 : .entry(tenant_shard_id)
430 2985 : .or_insert(TenantLsnState {
431 2985 : timelines: HashMap::new(),
432 2985 : generation: current_generation,
433 2985 : });
434 2985 :
435 2985 : if tenant_entry.generation != current_generation {
436 0 : // Generation might have changed if we were detached and then re-attached: in this case,
437 0 : // state from the previous generation cannot be trusted.
438 0 : tenant_entry.timelines.clear();
439 0 : tenant_entry.generation = current_generation;
440 2985 : }
441 :
442 2985 : tenant_entry.timelines.insert(
443 2985 : timeline_id,
444 2985 : PendingLsn {
445 2985 : projected: lsn,
446 2985 : result_slot,
447 2985 : },
448 2985 : );
449 2985 : }
450 :
451 : /// Submit a list of layers for deletion: this function will return before the deletion is
452 : /// persistent, but it may be executed at any time after this function enters: do not push
453 : /// layers until you're sure they can be deleted safely (i.e. remote metadata no longer
454 : /// references them).
455 : ///
456 : /// The `current_generation` is the generation of this pageserver's current attachment. The
457 : /// generations in `layers` are the generations in which those layers were written.
458 707 : pub(crate) fn push_layers(
459 707 : &self,
460 707 : tenant_shard_id: TenantShardId,
461 707 : timeline_id: TimelineId,
462 707 : current_generation: Generation,
463 707 : layers: Vec<(LayerName, LayerFileMetadata)>,
464 707 : ) -> Result<(), DeletionQueueError> {
465 707 : // None generations are not valid for attached tenants: they must always be attached in
466 707 : // a known generation. None generations are still permitted for layers in the index because
467 707 : // they may be historical.
468 707 : assert!(!current_generation.is_none());
469 :
470 707 : metrics::DELETION_QUEUE
471 707 : .keys_submitted
472 707 : .inc_by(layers.len() as u64);
473 707 : self.do_push(
474 707 : &self.tx,
475 707 : ListWriterQueueMessage::Delete(DeletionOp {
476 707 : tenant_shard_id,
477 707 : timeline_id,
478 707 : layers,
479 707 : generation: current_generation,
480 707 : objects: Vec::new(),
481 707 : }),
482 707 : )
483 707 : }
484 :
485 : /// This is cancel-safe. If you drop the future the flush may still happen in the background.
486 48 : async fn do_flush<T>(
487 48 : &self,
488 48 : queue: &tokio::sync::mpsc::UnboundedSender<T>,
489 48 : msg: T,
490 48 : rx: tokio::sync::oneshot::Receiver<()>,
491 48 : ) -> Result<(), DeletionQueueError> {
492 48 : self.do_push(queue, msg)?;
493 48 : if rx.await.is_err() {
494 : // This shouldn't happen if tenants are shut down before deletion queue. If we
495 : // encounter a bug like this, then a flusher will incorrectly believe it has flushed
496 : // when it hasn't, possibly leading to leaking objects.
497 0 : error!("Deletion queue dropped flush op while client was still waiting");
498 0 : Err(DeletionQueueError::ShuttingDown)
499 : } else {
500 48 : Ok(())
501 : }
502 48 : }
503 :
504 : /// Wait until all previous deletions are persistent (either executed, or written to a DeletionList)
505 : ///
506 : /// This is cancel-safe. If you drop the future the flush may still happen in the background.
507 28 : pub async fn flush(&self) -> Result<(), DeletionQueueError> {
508 28 : let (flush_op, rx) = FlushOp::new();
509 28 : self.do_flush(&self.tx, ListWriterQueueMessage::Flush(flush_op), rx)
510 28 : .await
511 28 : }
512 :
513 : /// Issue a flush without waiting for it to complete. This is useful on advisory flushes where
514 : /// the caller wants to avoid the risk of waiting for lots of enqueued work, such as on tenant
515 : /// detach where flushing is nice but not necessary.
516 : ///
517 : /// This function provides no guarantees of work being done.
518 0 : pub fn flush_advisory(&self) {
519 0 : let (flush_op, _) = FlushOp::new();
520 0 :
521 0 : // Transmit the flush message, ignoring any result (such as a closed channel during shutdown).
522 0 : drop(self.tx.send(ListWriterQueueMessage::FlushExecute(flush_op)));
523 0 : }
524 :
525 : // Wait until all previous deletions are executed
526 20 : pub(crate) async fn flush_execute(&self) -> Result<(), DeletionQueueError> {
527 20 : debug!("flush_execute: flushing to deletion lists...");
528 : // Flush any buffered work to deletion lists
529 20 : self.flush().await?;
530 :
531 : // Flush the backend into the executor of deletion lists
532 20 : let (flush_op, rx) = FlushOp::new();
533 20 : debug!("flush_execute: flushing backend...");
534 20 : self.do_flush(&self.tx, ListWriterQueueMessage::FlushExecute(flush_op), rx)
535 20 : .await?;
536 20 : debug!("flush_execute: finished flushing backend...");
537 :
538 : // Flush any immediate-mode deletions (the above backend flush will only flush
539 : // the executor if deletions had flowed through the backend)
540 20 : debug!("flush_execute: flushing execution...");
541 20 : self.flush_immediate().await?;
542 20 : debug!("flush_execute: finished flushing execution...");
543 20 : Ok(())
544 20 : }
545 :
546 : /// This interface bypasses the persistent deletion queue, and any validation
547 : /// that this pageserver is still elegible to execute the deletions. It is for
548 : /// use in timeline deletions, where the control plane is telling us we may
549 : /// delete everything in the timeline.
550 : ///
551 : /// DO NOT USE THIS FROM GC OR COMPACTION CODE. Use the regular `push_layers`.
552 0 : pub(crate) async fn push_immediate(
553 0 : &self,
554 0 : objects: Vec<RemotePath>,
555 0 : ) -> Result<(), DeletionQueueError> {
556 0 : metrics::DELETION_QUEUE
557 0 : .keys_submitted
558 0 : .inc_by(objects.len() as u64);
559 0 : self.executor_tx
560 0 : .send(DeleterMessage::Delete(objects))
561 0 : .await
562 0 : .map_err(|_| DeletionQueueError::ShuttingDown)
563 0 : }
564 :
565 : /// Companion to push_immediate. When this returns Ok, all prior objects sent
566 : /// into push_immediate have been deleted from remote storage.
567 20 : pub(crate) async fn flush_immediate(&self) -> Result<(), DeletionQueueError> {
568 20 : let (flush_op, rx) = FlushOp::new();
569 20 : self.executor_tx
570 20 : .send(DeleterMessage::Flush(flush_op))
571 20 : .await
572 20 : .map_err(|_| DeletionQueueError::ShuttingDown)?;
573 :
574 20 : rx.await.map_err(|_| DeletionQueueError::ShuttingDown)
575 20 : }
576 : }
577 :
578 : impl DeletionQueue {
579 16 : pub fn new_client(&self) -> DeletionQueueClient {
580 16 : self.client.clone()
581 16 : }
582 :
583 : /// Caller may use the returned object to construct clients with new_client.
584 : /// Caller should tokio::spawn the background() members of the two worker objects returned:
585 : /// we don't spawn those inside new() so that the caller can use their runtime/spans of choice.
586 16 : pub fn new<C>(
587 16 : remote_storage: GenericRemoteStorage,
588 16 : controller_upcall_client: Option<C>,
589 16 : conf: &'static PageServerConf,
590 16 : ) -> (Self, DeletionQueueWorkers<C>)
591 16 : where
592 16 : C: ControlPlaneGenerationsApi + Send + Sync,
593 16 : {
594 16 : // Unbounded channel: enables non-async functions to submit deletions. The actual length is
595 16 : // constrained by how promptly the ListWriter wakes up and drains it, which should be frequent
596 16 : // enough to avoid this taking pathologically large amount of memory.
597 16 : let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
598 16 :
599 16 : // Shallow channel: it carries DeletionLists which each contain up to thousands of deletions
600 16 : let (backend_tx, backend_rx) = tokio::sync::mpsc::channel(16);
601 16 :
602 16 : // Shallow channel: it carries lists of paths, and we expect the main queueing to
603 16 : // happen in the backend (persistent), not in this queue.
604 16 : let (executor_tx, executor_rx) = tokio::sync::mpsc::channel(16);
605 16 :
606 16 : let lsn_table = Arc::new(std::sync::RwLock::new(VisibleLsnUpdates::new()));
607 16 :
608 16 : // The deletion queue has an independent cancellation token to
609 16 : // the general pageserver shutdown token, because it stays alive a bit
610 16 : // longer to flush after Tenants have all been torn down.
611 16 : let cancel = CancellationToken::new();
612 16 :
613 16 : (
614 16 : Self {
615 16 : client: DeletionQueueClient {
616 16 : tx,
617 16 : executor_tx: executor_tx.clone(),
618 16 : lsn_table: lsn_table.clone(),
619 16 : },
620 16 : cancel: cancel.clone(),
621 16 : },
622 16 : DeletionQueueWorkers {
623 16 : frontend: ListWriter::new(conf, rx, backend_tx, cancel.clone()),
624 16 : backend: Validator::new(
625 16 : conf,
626 16 : backend_rx,
627 16 : executor_tx,
628 16 : controller_upcall_client,
629 16 : lsn_table.clone(),
630 16 : cancel.clone(),
631 16 : ),
632 16 : executor: Deleter::new(remote_storage, executor_rx, cancel.clone()),
633 16 : },
634 16 : )
635 16 : }
636 :
637 0 : pub async fn shutdown(&mut self, timeout: Duration) {
638 0 : match tokio::time::timeout(timeout, self.client.flush()).await {
639 : Ok(Ok(())) => {
640 0 : tracing::info!("Deletion queue flushed successfully on shutdown")
641 : }
642 : Ok(Err(DeletionQueueError::ShuttingDown)) => {
643 : // This is not harmful for correctness, but is unexpected: the deletion
644 : // queue's workers should stay alive as long as there are any client handles instantiated.
645 0 : tracing::warn!("Deletion queue stopped prematurely");
646 : }
647 0 : Err(_timeout) => {
648 0 : tracing::warn!("Timed out flushing deletion queue on shutdown")
649 : }
650 : }
651 :
652 : // We only cancel _after_ flushing: otherwise we would be shutting down the
653 : // components that do the flush.
654 0 : self.cancel.cancel();
655 0 : }
656 : }
657 :
658 : #[cfg(test)]
659 : mod test {
660 : use std::io::ErrorKind;
661 : use std::time::Duration;
662 :
663 : use camino::Utf8Path;
664 : use hex_literal::hex;
665 : use pageserver_api::key::Key;
666 : use pageserver_api::shard::ShardIndex;
667 : use pageserver_api::upcall_api::ReAttachResponseTenant;
668 : use remote_storage::{RemoteStorageConfig, RemoteStorageKind};
669 : use tokio::task::JoinHandle;
670 : use tracing::info;
671 :
672 : use super::*;
673 : use crate::controller_upcall_client::RetryForeverError;
674 : use crate::tenant::harness::TenantHarness;
675 : use crate::tenant::storage_layer::DeltaLayerName;
676 : pub const TIMELINE_ID: TimelineId =
677 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
678 :
679 : pub const EXAMPLE_LAYER_NAME: LayerName = LayerName::Delta(DeltaLayerName {
680 : key_range: Key::from_i128(0x0)..Key::from_i128(0xFFFFFFFFFFFFFFFF),
681 : lsn_range: Lsn(0x00000000016B59D8)..Lsn(0x00000000016B5A51),
682 : });
683 :
684 : // When you need a second layer in a test.
685 : pub const EXAMPLE_LAYER_NAME_ALT: LayerName = LayerName::Delta(DeltaLayerName {
686 : key_range: Key::from_i128(0x0)..Key::from_i128(0xFFFFFFFFFFFFFFFF),
687 : lsn_range: Lsn(0x00000000016B5A51)..Lsn(0x00000000016B5A61),
688 : });
689 :
690 : struct TestSetup {
691 : harness: TenantHarness,
692 : remote_fs_dir: Utf8PathBuf,
693 : storage: GenericRemoteStorage,
694 : mock_control_plane: MockControlPlane,
695 : deletion_queue: DeletionQueue,
696 : worker_join: JoinHandle<()>,
697 : }
698 :
699 : impl TestSetup {
700 : /// Simulate a pageserver restart by destroying and recreating the deletion queue
701 4 : async fn restart(&mut self) {
702 4 : let (deletion_queue, workers) = DeletionQueue::new(
703 4 : self.storage.clone(),
704 4 : Some(self.mock_control_plane.clone()),
705 4 : self.harness.conf,
706 4 : );
707 4 :
708 4 : tracing::debug!("Spawning worker for new queue queue");
709 4 : let worker_join = workers.spawn_with(&tokio::runtime::Handle::current());
710 4 :
711 4 : let old_worker_join = std::mem::replace(&mut self.worker_join, worker_join);
712 4 : let old_deletion_queue = std::mem::replace(&mut self.deletion_queue, deletion_queue);
713 4 :
714 4 : tracing::debug!("Joining worker from previous queue");
715 4 : old_deletion_queue.cancel.cancel();
716 4 : old_worker_join
717 4 : .await
718 4 : .expect("Failed to join workers for previous deletion queue");
719 4 : }
720 :
721 12 : fn set_latest_generation(&self, gen_: Generation) {
722 12 : let tenant_shard_id = self.harness.tenant_shard_id;
723 12 : self.mock_control_plane
724 12 : .latest_generation
725 12 : .lock()
726 12 : .unwrap()
727 12 : .insert(tenant_shard_id, gen_);
728 12 : }
729 :
730 : /// Returns remote layer file name, suitable for use in assert_remote_files
731 12 : fn write_remote_layer(
732 12 : &self,
733 12 : file_name: LayerName,
734 12 : gen_: Generation,
735 12 : ) -> anyhow::Result<String> {
736 12 : let tenant_shard_id = self.harness.tenant_shard_id;
737 12 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
738 12 : let remote_timeline_path = self.remote_fs_dir.join(relative_remote_path.get_path());
739 12 : std::fs::create_dir_all(&remote_timeline_path)?;
740 12 : let remote_layer_file_name = format!("{}{}", file_name, gen_.get_suffix());
741 12 :
742 12 : let content: Vec<u8> = format!("placeholder contents of {file_name}").into();
743 12 :
744 12 : std::fs::write(
745 12 : remote_timeline_path.join(remote_layer_file_name.clone()),
746 12 : content,
747 12 : )?;
748 :
749 12 : Ok(remote_layer_file_name)
750 12 : }
751 : }
752 :
753 : #[derive(Debug, Clone)]
754 : struct MockControlPlane {
755 : pub latest_generation: std::sync::Arc<std::sync::Mutex<HashMap<TenantShardId, Generation>>>,
756 : }
757 :
758 : impl MockControlPlane {
759 12 : fn new() -> Self {
760 12 : Self {
761 12 : latest_generation: Arc::default(),
762 12 : }
763 12 : }
764 : }
765 :
766 : impl ControlPlaneGenerationsApi for MockControlPlane {
767 0 : async fn re_attach(
768 0 : &self,
769 0 : _conf: &PageServerConf,
770 0 : ) -> Result<HashMap<TenantShardId, ReAttachResponseTenant>, RetryForeverError> {
771 0 : unimplemented!()
772 : }
773 :
774 16 : async fn validate(
775 16 : &self,
776 16 : tenants: Vec<(TenantShardId, Generation)>,
777 16 : ) -> Result<HashMap<TenantShardId, bool>, RetryForeverError> {
778 16 : let mut result = HashMap::new();
779 16 :
780 16 : let latest_generation = self.latest_generation.lock().unwrap();
781 :
782 32 : for (tenant_shard_id, generation) in tenants {
783 16 : if let Some(latest) = latest_generation.get(&tenant_shard_id) {
784 16 : result.insert(tenant_shard_id, *latest == generation);
785 16 : }
786 : }
787 :
788 16 : Ok(result)
789 16 : }
790 : }
791 :
792 12 : async fn setup(test_name: &str) -> anyhow::Result<TestSetup> {
793 12 : let test_name = Box::leak(Box::new(format!("deletion_queue__{test_name}")));
794 12 : let harness = TenantHarness::create(test_name).await?;
795 :
796 : // We do not load() the harness: we only need its config and remote_storage
797 :
798 : // Set up a GenericRemoteStorage targetting a directory
799 12 : let remote_fs_dir = harness.conf.workdir.join("remote_fs");
800 12 : std::fs::create_dir_all(remote_fs_dir)?;
801 12 : let remote_fs_dir = harness.conf.workdir.join("remote_fs").canonicalize_utf8()?;
802 12 : let storage_config = RemoteStorageConfig {
803 12 : storage: RemoteStorageKind::LocalFs {
804 12 : local_path: remote_fs_dir.clone(),
805 12 : },
806 12 : timeout: RemoteStorageConfig::DEFAULT_TIMEOUT,
807 12 : small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT,
808 12 : };
809 12 : let storage = GenericRemoteStorage::from_config(&storage_config)
810 12 : .await
811 12 : .unwrap();
812 12 :
813 12 : let mock_control_plane = MockControlPlane::new();
814 12 :
815 12 : let (deletion_queue, worker) = DeletionQueue::new(
816 12 : storage.clone(),
817 12 : Some(mock_control_plane.clone()),
818 12 : harness.conf,
819 12 : );
820 12 :
821 12 : let worker_join = worker.spawn_with(&tokio::runtime::Handle::current());
822 12 :
823 12 : Ok(TestSetup {
824 12 : harness,
825 12 : remote_fs_dir,
826 12 : storage,
827 12 : mock_control_plane,
828 12 : deletion_queue,
829 12 : worker_join,
830 12 : })
831 12 : }
832 :
833 : // TODO: put this in a common location so that we can share with remote_timeline_client's tests
834 36 : fn assert_remote_files(expected: &[&str], remote_path: &Utf8Path) {
835 36 : let mut expected: Vec<String> = expected.iter().map(|x| String::from(*x)).collect();
836 36 : expected.sort();
837 36 :
838 36 : let mut found: Vec<String> = Vec::new();
839 36 : let dir = match std::fs::read_dir(remote_path) {
840 36 : Ok(d) => d,
841 0 : Err(e) => {
842 0 : if e.kind() == ErrorKind::NotFound {
843 0 : if expected.is_empty() {
844 : // We are asserting prefix is empty: it is expected that the dir is missing
845 0 : return;
846 : } else {
847 0 : assert_eq!(expected, Vec::<String>::new());
848 0 : unreachable!();
849 : }
850 : } else {
851 0 : panic!("Unexpected error listing {remote_path}: {e}");
852 : }
853 : }
854 : };
855 :
856 36 : for entry in dir.flatten() {
857 32 : let entry_name = entry.file_name();
858 32 : let fname = entry_name.to_str().unwrap();
859 32 : found.push(String::from(fname));
860 32 : }
861 36 : found.sort();
862 36 :
863 36 : assert_eq!(expected, found);
864 36 : }
865 :
866 20 : fn assert_local_files(expected: &[&str], directory: &Utf8Path) {
867 20 : let dir = match std::fs::read_dir(directory) {
868 16 : Ok(d) => d,
869 : Err(_) => {
870 4 : assert_eq!(expected, &Vec::<String>::new());
871 4 : return;
872 : }
873 : };
874 16 : let mut found = Vec::new();
875 36 : for dentry in dir {
876 20 : let dentry = dentry.unwrap();
877 20 : let file_name = dentry.file_name();
878 20 : let file_name_str = file_name.to_string_lossy();
879 20 : found.push(file_name_str.to_string());
880 20 : }
881 16 : found.sort();
882 16 : assert_eq!(expected, found);
883 20 : }
884 :
885 : #[tokio::test]
886 4 : async fn deletion_queue_smoke() -> anyhow::Result<()> {
887 4 : // Basic test that the deletion queue processes the deletions we pass into it
888 4 : let ctx = setup("deletion_queue_smoke")
889 4 : .await
890 4 : .expect("Failed test setup");
891 4 : let client = ctx.deletion_queue.new_client();
892 4 : client.recover(HashMap::new())?;
893 4 :
894 4 : let layer_file_name_1: LayerName = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap();
895 4 : let tenant_shard_id = ctx.harness.tenant_shard_id;
896 4 :
897 4 : let content: Vec<u8> = "victim1 contents".into();
898 4 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
899 4 : let remote_timeline_path = ctx.remote_fs_dir.join(relative_remote_path.get_path());
900 4 : let deletion_prefix = ctx.harness.conf.deletion_prefix();
901 4 :
902 4 : // Exercise the distinction between the generation of the layers
903 4 : // we delete, and the generation of the running Tenant.
904 4 : let layer_generation = Generation::new(0xdeadbeef);
905 4 : let now_generation = Generation::new(0xfeedbeef);
906 4 : let layer_metadata =
907 4 : LayerFileMetadata::new(0xf00, layer_generation, ShardIndex::unsharded());
908 4 :
909 4 : let remote_layer_file_name_1 =
910 4 : format!("{}{}", layer_file_name_1, layer_generation.get_suffix());
911 4 :
912 4 : // Set mock control plane state to valid for our generation
913 4 : ctx.set_latest_generation(now_generation);
914 4 :
915 4 : // Inject a victim file to remote storage
916 4 : info!("Writing");
917 4 : std::fs::create_dir_all(&remote_timeline_path)?;
918 4 : std::fs::write(
919 4 : remote_timeline_path.join(remote_layer_file_name_1.clone()),
920 4 : content,
921 4 : )?;
922 4 : assert_remote_files(&[&remote_layer_file_name_1], &remote_timeline_path);
923 4 :
924 4 : // File should still be there after we push it to the queue (we haven't pushed enough to flush anything)
925 4 : info!("Pushing");
926 4 : client.push_layers(
927 4 : tenant_shard_id,
928 4 : TIMELINE_ID,
929 4 : now_generation,
930 4 : [(layer_file_name_1.clone(), layer_metadata)].to_vec(),
931 4 : )?;
932 4 : assert_remote_files(&[&remote_layer_file_name_1], &remote_timeline_path);
933 4 :
934 4 : assert_local_files(&[], &deletion_prefix);
935 4 :
936 4 : // File should still be there after we write a deletion list (we haven't pushed enough to execute anything)
937 4 : info!("Flushing");
938 4 : client.flush().await?;
939 4 : assert_remote_files(&[&remote_layer_file_name_1], &remote_timeline_path);
940 4 : assert_local_files(&["0000000000000001-01.list"], &deletion_prefix);
941 4 :
942 4 : // File should go away when we execute
943 4 : info!("Flush-executing");
944 4 : client.flush_execute().await?;
945 4 : assert_remote_files(&[], &remote_timeline_path);
946 4 : assert_local_files(&["header-01"], &deletion_prefix);
947 4 :
948 4 : // Flushing on an empty queue should succeed immediately, and not write any lists
949 4 : info!("Flush-executing on empty");
950 4 : client.flush_execute().await?;
951 4 : assert_local_files(&["header-01"], &deletion_prefix);
952 4 :
953 4 : Ok(())
954 4 : }
955 :
956 : #[tokio::test]
957 4 : async fn deletion_queue_validation() -> anyhow::Result<()> {
958 4 : let ctx = setup("deletion_queue_validation")
959 4 : .await
960 4 : .expect("Failed test setup");
961 4 : let client = ctx.deletion_queue.new_client();
962 4 : client.recover(HashMap::new())?;
963 4 :
964 4 : // Generation that the control plane thinks is current
965 4 : let latest_generation = Generation::new(0xdeadbeef);
966 4 : // Generation that our DeletionQueue thinks the tenant is running with
967 4 : let stale_generation = latest_generation.previous();
968 4 : // Generation that our example layer file was written with
969 4 : let layer_generation = stale_generation.previous();
970 4 : let layer_metadata =
971 4 : LayerFileMetadata::new(0xf00, layer_generation, ShardIndex::unsharded());
972 4 :
973 4 : ctx.set_latest_generation(latest_generation);
974 4 :
975 4 : let tenant_shard_id = ctx.harness.tenant_shard_id;
976 4 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
977 4 : let remote_timeline_path = ctx.remote_fs_dir.join(relative_remote_path.get_path());
978 4 :
979 4 : // Initial state: a remote layer exists
980 4 : let remote_layer_name = ctx.write_remote_layer(EXAMPLE_LAYER_NAME, layer_generation)?;
981 4 : assert_remote_files(&[&remote_layer_name], &remote_timeline_path);
982 4 :
983 4 : tracing::debug!("Pushing...");
984 4 : client.push_layers(
985 4 : tenant_shard_id,
986 4 : TIMELINE_ID,
987 4 : stale_generation,
988 4 : [(EXAMPLE_LAYER_NAME.clone(), layer_metadata.clone())].to_vec(),
989 4 : )?;
990 4 :
991 4 : // We enqueued the operation in a stale generation: it should have failed validation
992 4 : tracing::debug!("Flushing...");
993 4 : tokio::time::timeout(Duration::from_secs(5), client.flush_execute()).await??;
994 4 : assert_remote_files(&[&remote_layer_name], &remote_timeline_path);
995 4 :
996 4 : tracing::debug!("Pushing...");
997 4 : client.push_layers(
998 4 : tenant_shard_id,
999 4 : TIMELINE_ID,
1000 4 : latest_generation,
1001 4 : [(EXAMPLE_LAYER_NAME.clone(), layer_metadata.clone())].to_vec(),
1002 4 : )?;
1003 4 :
1004 4 : // We enqueued the operation in a fresh generation: it should have passed validation
1005 4 : tracing::debug!("Flushing...");
1006 4 : tokio::time::timeout(Duration::from_secs(5), client.flush_execute()).await??;
1007 4 : assert_remote_files(&[], &remote_timeline_path);
1008 4 :
1009 4 : Ok(())
1010 4 : }
1011 :
1012 : #[tokio::test]
1013 4 : async fn deletion_queue_recovery() -> anyhow::Result<()> {
1014 4 : // Basic test that the deletion queue processes the deletions we pass into it
1015 4 : let mut ctx = setup("deletion_queue_recovery")
1016 4 : .await
1017 4 : .expect("Failed test setup");
1018 4 : let client = ctx.deletion_queue.new_client();
1019 4 : client.recover(HashMap::new())?;
1020 4 :
1021 4 : let tenant_shard_id = ctx.harness.tenant_shard_id;
1022 4 :
1023 4 : let relative_remote_path = remote_timeline_path(&tenant_shard_id, &TIMELINE_ID);
1024 4 : let remote_timeline_path = ctx.remote_fs_dir.join(relative_remote_path.get_path());
1025 4 : let deletion_prefix = ctx.harness.conf.deletion_prefix();
1026 4 :
1027 4 : let layer_generation = Generation::new(0xdeadbeef);
1028 4 : let now_generation = Generation::new(0xfeedbeef);
1029 4 : let layer_metadata =
1030 4 : LayerFileMetadata::new(0xf00, layer_generation, ShardIndex::unsharded());
1031 4 :
1032 4 : // Inject a deletion in the generation before generation_now: after restart,
1033 4 : // this deletion should _not_ get executed (only the immediately previous
1034 4 : // generation gets that treatment)
1035 4 : let remote_layer_file_name_historical =
1036 4 : ctx.write_remote_layer(EXAMPLE_LAYER_NAME, layer_generation)?;
1037 4 : client.push_layers(
1038 4 : tenant_shard_id,
1039 4 : TIMELINE_ID,
1040 4 : now_generation.previous(),
1041 4 : [(EXAMPLE_LAYER_NAME.clone(), layer_metadata.clone())].to_vec(),
1042 4 : )?;
1043 4 :
1044 4 : // Inject a deletion in the generation before generation_now: after restart,
1045 4 : // this deletion should get executed, because we execute deletions in the
1046 4 : // immediately previous generation on the same node.
1047 4 : let remote_layer_file_name_previous =
1048 4 : ctx.write_remote_layer(EXAMPLE_LAYER_NAME_ALT, layer_generation)?;
1049 4 : client.push_layers(
1050 4 : tenant_shard_id,
1051 4 : TIMELINE_ID,
1052 4 : now_generation,
1053 4 : [(EXAMPLE_LAYER_NAME_ALT.clone(), layer_metadata.clone())].to_vec(),
1054 4 : )?;
1055 4 :
1056 4 : client.flush().await?;
1057 4 : assert_remote_files(
1058 4 : &[
1059 4 : &remote_layer_file_name_historical,
1060 4 : &remote_layer_file_name_previous,
1061 4 : ],
1062 4 : &remote_timeline_path,
1063 4 : );
1064 4 :
1065 4 : // Different generatinos for the same tenant will cause two separate
1066 4 : // deletion lists to be emitted.
1067 4 : assert_local_files(
1068 4 : &["0000000000000001-01.list", "0000000000000002-01.list"],
1069 4 : &deletion_prefix,
1070 4 : );
1071 4 :
1072 4 : // Simulate a node restart: the latest generation advances
1073 4 : let now_generation = now_generation.next();
1074 4 : ctx.set_latest_generation(now_generation);
1075 4 :
1076 4 : // Restart the deletion queue
1077 4 : drop(client);
1078 4 : ctx.restart().await;
1079 4 : let client = ctx.deletion_queue.new_client();
1080 4 : client.recover(HashMap::from([(tenant_shard_id, now_generation)]))?;
1081 4 :
1082 4 : info!("Flush-executing");
1083 4 : client.flush_execute().await?;
1084 4 : // The deletion from immediately prior generation was executed, the one from
1085 4 : // an older generation was not.
1086 4 : assert_remote_files(&[&remote_layer_file_name_historical], &remote_timeline_path);
1087 4 : Ok(())
1088 4 : }
1089 : }
1090 :
1091 : /// A lightweight queue which can issue ordinary DeletionQueueClient objects, but doesn't do any persistence
1092 : /// or coalescing, and doesn't actually execute any deletions unless you call pump() to kick it.
1093 : #[cfg(test)]
1094 : pub(crate) mod mock {
1095 : use std::sync::atomic::{AtomicUsize, Ordering};
1096 :
1097 : use tracing::info;
1098 :
1099 : use super::*;
1100 : use crate::tenant::remote_timeline_client::remote_layer_path;
1101 :
1102 : pub struct ConsumerState {
1103 : rx: tokio::sync::mpsc::UnboundedReceiver<ListWriterQueueMessage>,
1104 : executor_rx: tokio::sync::mpsc::Receiver<DeleterMessage>,
1105 : cancel: CancellationToken,
1106 : executed: Arc<AtomicUsize>,
1107 : }
1108 :
1109 : impl ConsumerState {
1110 452 : async fn consume(&mut self, remote_storage: &GenericRemoteStorage) {
1111 452 : info!("Executing all pending deletions");
1112 :
1113 : // Transform all executor messages to generic frontend messages
1114 1082 : loop {
1115 1082 : use either::Either;
1116 1082 : let msg = tokio::select! {
1117 1082 : left = self.executor_rx.recv() => Either::Left(left),
1118 1082 : right = self.rx.recv() => Either::Right(right),
1119 : };
1120 4 : match msg {
1121 0 : Either::Left(None) => break,
1122 0 : Either::Right(None) => break,
1123 0 : Either::Left(Some(DeleterMessage::Delete(objects))) => {
1124 0 : for path in objects {
1125 0 : match remote_storage.delete(&path, &self.cancel).await {
1126 : Ok(_) => {
1127 0 : debug!("Deleted {path}");
1128 : }
1129 0 : Err(e) => {
1130 0 : error!("Failed to delete {path}, leaking object! ({e})");
1131 : }
1132 : }
1133 0 : self.executed.fetch_add(1, Ordering::Relaxed);
1134 : }
1135 : }
1136 4 : Either::Left(Some(DeleterMessage::Flush(flush_op))) => {
1137 4 : flush_op.notify();
1138 4 : }
1139 637 : Either::Right(Some(ListWriterQueueMessage::Delete(op))) => {
1140 637 : let mut objects = op.objects;
1141 1274 : for (layer, meta) in op.layers {
1142 637 : objects.push(remote_layer_path(
1143 637 : &op.tenant_shard_id.tenant_id,
1144 637 : &op.timeline_id,
1145 637 : meta.shard,
1146 637 : &layer,
1147 637 : meta.generation,
1148 637 : ));
1149 637 : }
1150 :
1151 1263 : for path in objects {
1152 637 : info!("Executing deletion {path}");
1153 637 : match remote_storage.delete(&path, &self.cancel).await {
1154 : Ok(_) => {
1155 626 : debug!("Deleted {path}");
1156 : }
1157 0 : Err(e) => {
1158 0 : error!("Failed to delete {path}, leaking object! ({e})");
1159 : }
1160 : }
1161 626 : self.executed.fetch_add(1, Ordering::Relaxed);
1162 : }
1163 : }
1164 0 : Either::Right(Some(ListWriterQueueMessage::Flush(op))) => {
1165 0 : op.notify();
1166 0 : }
1167 0 : Either::Right(Some(ListWriterQueueMessage::FlushExecute(op))) => {
1168 0 : // We have already executed all prior deletions because mock does them inline
1169 0 : op.notify();
1170 0 : }
1171 0 : Either::Right(Some(ListWriterQueueMessage::Recover(_))) => {
1172 0 : // no-op in mock
1173 0 : }
1174 : }
1175 : }
1176 0 : }
1177 : }
1178 :
1179 : pub struct MockDeletionQueue {
1180 : tx: tokio::sync::mpsc::UnboundedSender<ListWriterQueueMessage>,
1181 : executor_tx: tokio::sync::mpsc::Sender<DeleterMessage>,
1182 : lsn_table: Arc<std::sync::RwLock<VisibleLsnUpdates>>,
1183 : }
1184 :
1185 : impl MockDeletionQueue {
1186 452 : pub fn new(remote_storage: Option<GenericRemoteStorage>) -> Self {
1187 452 : let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
1188 452 : let (executor_tx, executor_rx) = tokio::sync::mpsc::channel(16384);
1189 452 :
1190 452 : let executed = Arc::new(AtomicUsize::new(0));
1191 452 :
1192 452 : let mut consumer = ConsumerState {
1193 452 : rx,
1194 452 : executor_rx,
1195 452 : cancel: CancellationToken::new(),
1196 452 : executed: executed.clone(),
1197 452 : };
1198 452 :
1199 452 : tokio::spawn(async move {
1200 452 : if let Some(remote_storage) = &remote_storage {
1201 452 : consumer.consume(remote_storage).await;
1202 0 : }
1203 452 : });
1204 452 :
1205 452 : Self {
1206 452 : tx,
1207 452 : executor_tx,
1208 452 : lsn_table: Arc::new(std::sync::RwLock::new(VisibleLsnUpdates::new())),
1209 452 : }
1210 452 : }
1211 :
1212 : #[allow(clippy::await_holding_lock)]
1213 4 : pub async fn pump(&self) {
1214 4 : let (tx, rx) = tokio::sync::oneshot::channel();
1215 4 : self.executor_tx
1216 4 : .send(DeleterMessage::Flush(FlushOp { tx }))
1217 4 : .await
1218 4 : .expect("Failed to send flush message");
1219 4 : rx.await.ok();
1220 4 : }
1221 :
1222 472 : pub(crate) fn new_client(&self) -> DeletionQueueClient {
1223 472 : DeletionQueueClient {
1224 472 : tx: self.tx.clone(),
1225 472 : executor_tx: self.executor_tx.clone(),
1226 472 : lsn_table: self.lsn_table.clone(),
1227 472 : }
1228 472 : }
1229 : }
1230 :
1231 : /// Test round-trip serialization/deserialization, and test stability of the format
1232 : /// vs. a static expected string for the serialized version.
1233 : #[test]
1234 4 : fn deletion_list_serialization() -> anyhow::Result<()> {
1235 4 : let tenant_id = "ad6c1a56f5680419d3a16ff55d97ec3c"
1236 4 : .to_string()
1237 4 : .parse::<TenantShardId>()?;
1238 4 : let timeline_id = "be322c834ed9e709e63b5c9698691910"
1239 4 : .to_string()
1240 4 : .parse::<TimelineId>()?;
1241 4 : let generation = Generation::new(123);
1242 :
1243 4 : let object =
1244 4 : RemotePath::from_string(&format!("tenants/{tenant_id}/timelines/{timeline_id}/foo"))?;
1245 4 : let mut objects = [object].to_vec();
1246 4 :
1247 4 : let mut example = DeletionList::new(1);
1248 4 : example.push(&tenant_id, &timeline_id, generation, &mut objects);
1249 :
1250 4 : let encoded = serde_json::to_string(&example)?;
1251 :
1252 4 : let expected = "{\"version\":1,\"sequence\":1,\"tenants\":{\"ad6c1a56f5680419d3a16ff55d97ec3c\":{\"timelines\":{\"be322c834ed9e709e63b5c9698691910\":[\"foo\"]},\"generation\":123}},\"size\":1}".to_string();
1253 4 : assert_eq!(encoded, expected);
1254 :
1255 4 : let decoded = serde_json::from_str::<DeletionList>(&encoded)?;
1256 4 : assert_eq!(example, decoded);
1257 :
1258 4 : Ok(())
1259 4 : }
1260 : }
|