Line data Source code
1 : //! The deleter is the final stage in the deletion queue. It accumulates remote
2 : //! paths to delete, and periodically executes them in batches of up to 1000
3 : //! using the DeleteObjects request.
4 : //!
5 : //! Its purpose is to increase efficiency of remote storage I/O by issuing a smaller
6 : //! number of full-sized DeleteObjects requests, rather than a larger number of
7 : //! smaller requests.
8 :
9 : use remote_storage::GenericRemoteStorage;
10 : use remote_storage::RemotePath;
11 : use remote_storage::TimeoutOrCancel;
12 : use remote_storage::MAX_KEYS_PER_DELETE;
13 : use std::time::Duration;
14 : use tokio_util::sync::CancellationToken;
15 : use tracing::info;
16 : use tracing::warn;
17 : use utils::backoff;
18 :
19 : use crate::metrics;
20 :
21 : use super::DeletionQueueError;
22 : use super::FlushOp;
23 :
24 : const AUTOFLUSH_INTERVAL: Duration = Duration::from_secs(10);
25 :
26 : pub(super) enum DeleterMessage {
27 : Delete(Vec<RemotePath>),
28 : Flush(FlushOp),
29 : }
30 :
31 : /// Non-persistent deletion queue, for coalescing multiple object deletes into
32 : /// larger DeleteObjects requests.
33 : pub(super) struct Deleter {
34 : // Accumulate up to 1000 keys for the next deletion operation
35 : accumulator: Vec<RemotePath>,
36 :
37 : rx: tokio::sync::mpsc::Receiver<DeleterMessage>,
38 :
39 : cancel: CancellationToken,
40 : remote_storage: GenericRemoteStorage,
41 : }
42 :
43 : impl Deleter {
44 8 : pub(super) fn new(
45 8 : remote_storage: GenericRemoteStorage,
46 8 : rx: tokio::sync::mpsc::Receiver<DeleterMessage>,
47 8 : cancel: CancellationToken,
48 8 : ) -> Self {
49 8 : Self {
50 8 : remote_storage,
51 8 : rx,
52 8 : cancel,
53 8 : accumulator: Vec::new(),
54 8 : }
55 8 : }
56 :
57 : /// Wrap the remote `delete_objects` with a failpoint
58 6 : async fn remote_delete(&self) -> Result<(), anyhow::Error> {
59 6 : // A backoff::retry is used here for two reasons:
60 6 : // - To provide a backoff rather than busy-polling the API on errors
61 6 : // - To absorb transient 429/503 conditions without hitting our error
62 6 : // logging path for issues deleting objects.
63 6 : backoff::retry(
64 6 : || async {
65 6 : fail::fail_point!("deletion-queue-before-execute", |_| {
66 0 : info!("Skipping execution, failpoint set");
67 :
68 0 : metrics::DELETION_QUEUE
69 0 : .remote_errors
70 0 : .with_label_values(&["failpoint"])
71 0 : .inc();
72 0 : Err(anyhow::anyhow!("failpoint: deletion-queue-before-execute"))
73 6 : });
74 :
75 6 : self.remote_storage
76 6 : .delete_objects(&self.accumulator, &self.cancel)
77 5 : .await
78 12 : },
79 6 : TimeoutOrCancel::caused_by_cancel,
80 6 : 3,
81 6 : 10,
82 6 : "executing deletion batch",
83 6 : &self.cancel,
84 6 : )
85 5 : .await
86 6 : .ok_or_else(|| anyhow::anyhow!("Shutting down"))
87 6 : .and_then(|x| x)
88 6 : }
89 :
90 : /// Block until everything in accumulator has been executed
91 22 : async fn flush(&mut self) -> Result<(), DeletionQueueError> {
92 28 : while !self.accumulator.is_empty() && !self.cancel.is_cancelled() {
93 6 : match self.remote_delete().await {
94 : Ok(()) => {
95 : // Note: we assume that the remote storage layer returns Ok(()) if some
96 : // or all of the deleted objects were already gone.
97 6 : metrics::DELETION_QUEUE
98 6 : .keys_executed
99 6 : .inc_by(self.accumulator.len() as u64);
100 6 : info!(
101 0 : "Executed deletion batch {}..{}",
102 0 : self.accumulator
103 0 : .first()
104 0 : .expect("accumulator should be non-empty"),
105 0 : self.accumulator
106 0 : .last()
107 0 : .expect("accumulator should be non-empty"),
108 : );
109 6 : self.accumulator.clear();
110 : }
111 0 : Err(e) => {
112 0 : if self.cancel.is_cancelled() {
113 0 : return Err(DeletionQueueError::ShuttingDown);
114 0 : }
115 0 : warn!("DeleteObjects request failed: {e:#}, will continue trying");
116 0 : metrics::DELETION_QUEUE
117 0 : .remote_errors
118 0 : .with_label_values(&["execute"])
119 0 : .inc();
120 : }
121 : };
122 : }
123 22 : if self.cancel.is_cancelled() {
124 : // Expose an error because we may not have actually flushed everything
125 2 : Err(DeletionQueueError::ShuttingDown)
126 : } else {
127 20 : Ok(())
128 : }
129 22 : }
130 :
131 8 : pub(super) async fn background(&mut self) -> Result<(), DeletionQueueError> {
132 8 : self.accumulator.reserve(MAX_KEYS_PER_DELETE);
133 :
134 : loop {
135 38 : if self.cancel.is_cancelled() {
136 0 : return Err(DeletionQueueError::ShuttingDown);
137 38 : }
138 :
139 38 : let msg = match tokio::time::timeout(AUTOFLUSH_INTERVAL, self.rx.recv()).await {
140 28 : Ok(Some(m)) => m,
141 : Ok(None) => {
142 : // All queue senders closed
143 0 : info!("Shutting down");
144 0 : return Err(DeletionQueueError::ShuttingDown);
145 : }
146 : Err(_) => {
147 : // Timeout, we hit deadline to execute whatever we have in hand. These functions will
148 : // return immediately if no work is pending
149 4 : self.flush().await?;
150 :
151 2 : continue;
152 : }
153 : };
154 :
155 28 : match msg {
156 10 : DeleterMessage::Delete(mut list) => {
157 16 : while !list.is_empty() || self.accumulator.len() == MAX_KEYS_PER_DELETE {
158 6 : if self.accumulator.len() == MAX_KEYS_PER_DELETE {
159 0 : self.flush().await?;
160 : // If we have received this number of keys, proceed with attempting to execute
161 0 : assert_eq!(self.accumulator.len(), 0);
162 6 : }
163 :
164 6 : let available_slots = MAX_KEYS_PER_DELETE - self.accumulator.len();
165 6 : let take_count = std::cmp::min(available_slots, list.len());
166 6 : for path in list.drain(list.len() - take_count..) {
167 6 : self.accumulator.push(path);
168 6 : }
169 : }
170 : }
171 18 : DeleterMessage::Flush(flush_op) => {
172 18 : // If flush() errors, we drop the flush_op and the caller will get
173 18 : // an error recv()'ing their oneshot channel.
174 18 : self.flush().await?;
175 18 : flush_op.notify();
176 : }
177 : }
178 : }
179 2 : }
180 : }
|