Line data Source code
1 : //! The deleter is the final stage in the deletion queue. It accumulates remote
2 : //! paths to delete, and periodically executes them in batches of up to 1000
3 : //! using the DeleteObjects request.
4 : //!
5 : //! Its purpose is to increase efficiency of remote storage I/O by issuing a smaller
6 : //! number of full-sized DeleteObjects requests, rather than a larger number of
7 : //! smaller requests.
8 :
9 : use remote_storage::GenericRemoteStorage;
10 : use remote_storage::RemotePath;
11 : use remote_storage::TimeoutOrCancel;
12 : use std::time::Duration;
13 : use tokio_util::sync::CancellationToken;
14 : use tracing::info;
15 : use tracing::warn;
16 : use utils::backoff;
17 : use utils::pausable_failpoint;
18 :
19 : use crate::metrics;
20 :
21 : use super::DeletionQueueError;
22 : use super::FlushOp;
23 :
24 : const AUTOFLUSH_INTERVAL: Duration = Duration::from_secs(10);
25 :
26 : pub(super) enum DeleterMessage {
27 : Delete(Vec<RemotePath>),
28 : Flush(FlushOp),
29 : }
30 :
31 : /// Non-persistent deletion queue, for coalescing multiple object deletes into
32 : /// larger DeleteObjects requests.
33 : pub(super) struct Deleter {
34 : // Accumulate up to 1000 keys for the next deletion operation
35 : accumulator: Vec<RemotePath>,
36 :
37 : rx: tokio::sync::mpsc::Receiver<DeleterMessage>,
38 :
39 : cancel: CancellationToken,
40 : remote_storage: GenericRemoteStorage,
41 : }
42 :
43 : impl Deleter {
44 8 : pub(super) fn new(
45 8 : remote_storage: GenericRemoteStorage,
46 8 : rx: tokio::sync::mpsc::Receiver<DeleterMessage>,
47 8 : cancel: CancellationToken,
48 8 : ) -> Self {
49 8 : Self {
50 8 : remote_storage,
51 8 : rx,
52 8 : cancel,
53 8 : accumulator: Vec::new(),
54 8 : }
55 8 : }
56 :
57 : /// Wrap the remote `delete_objects` with a failpoint
58 6 : async fn remote_delete(&self) -> Result<(), anyhow::Error> {
59 6 : // A backoff::retry is used here for two reasons:
60 6 : // - To provide a backoff rather than busy-polling the API on errors
61 6 : // - To absorb transient 429/503 conditions without hitting our error
62 6 : // logging path for issues deleting objects.
63 6 : backoff::retry(
64 6 : || async {
65 6 : fail::fail_point!("deletion-queue-before-execute", |_| {
66 0 : info!("Skipping execution, failpoint set");
67 :
68 0 : metrics::DELETION_QUEUE
69 0 : .remote_errors
70 0 : .with_label_values(&["failpoint"])
71 0 : .inc();
72 0 : Err(anyhow::anyhow!("failpoint: deletion-queue-before-execute"))
73 6 : });
74 :
75 6 : self.remote_storage
76 6 : .delete_objects(&self.accumulator, &self.cancel)
77 6 : .await
78 12 : },
79 6 : TimeoutOrCancel::caused_by_cancel,
80 6 : 3,
81 6 : 10,
82 6 : "executing deletion batch",
83 6 : &self.cancel,
84 6 : )
85 6 : .await
86 6 : .ok_or_else(|| anyhow::anyhow!("Shutting down"))
87 6 : .and_then(|x| x)
88 6 : }
89 :
90 : /// Block until everything in accumulator has been executed
91 22 : async fn flush(&mut self) -> Result<(), DeletionQueueError> {
92 28 : while !self.accumulator.is_empty() && !self.cancel.is_cancelled() {
93 6 : pausable_failpoint!("deletion-queue-before-execute-pause");
94 6 : match self.remote_delete().await {
95 : Ok(()) => {
96 : // Note: we assume that the remote storage layer returns Ok(()) if some
97 : // or all of the deleted objects were already gone.
98 6 : metrics::DELETION_QUEUE
99 6 : .keys_executed
100 6 : .inc_by(self.accumulator.len() as u64);
101 6 : info!(
102 0 : "Executed deletion batch {}..{}",
103 0 : self.accumulator
104 0 : .first()
105 0 : .expect("accumulator should be non-empty"),
106 0 : self.accumulator
107 0 : .last()
108 0 : .expect("accumulator should be non-empty"),
109 : );
110 6 : self.accumulator.clear();
111 : }
112 0 : Err(e) => {
113 0 : if self.cancel.is_cancelled() {
114 0 : return Err(DeletionQueueError::ShuttingDown);
115 0 : }
116 0 : warn!("DeleteObjects request failed: {e:#}, will continue trying");
117 0 : metrics::DELETION_QUEUE
118 0 : .remote_errors
119 0 : .with_label_values(&["execute"])
120 0 : .inc();
121 : }
122 : };
123 : }
124 22 : if self.cancel.is_cancelled() {
125 : // Expose an error because we may not have actually flushed everything
126 2 : Err(DeletionQueueError::ShuttingDown)
127 : } else {
128 20 : Ok(())
129 : }
130 22 : }
131 :
132 8 : pub(super) async fn background(&mut self) -> Result<(), DeletionQueueError> {
133 8 : let max_keys_per_delete = self.remote_storage.max_keys_per_delete();
134 8 : self.accumulator.reserve(max_keys_per_delete);
135 :
136 : loop {
137 38 : if self.cancel.is_cancelled() {
138 0 : return Err(DeletionQueueError::ShuttingDown);
139 38 : }
140 :
141 38 : let msg = match tokio::time::timeout(AUTOFLUSH_INTERVAL, self.rx.recv()).await {
142 28 : Ok(Some(m)) => m,
143 : Ok(None) => {
144 : // All queue senders closed
145 0 : info!("Shutting down");
146 0 : return Err(DeletionQueueError::ShuttingDown);
147 : }
148 : Err(_) => {
149 : // Timeout, we hit deadline to execute whatever we have in hand. These functions will
150 : // return immediately if no work is pending
151 4 : self.flush().await?;
152 :
153 2 : continue;
154 : }
155 : };
156 :
157 28 : match msg {
158 10 : DeleterMessage::Delete(mut list) => {
159 16 : while !list.is_empty() || self.accumulator.len() == max_keys_per_delete {
160 6 : if self.accumulator.len() == max_keys_per_delete {
161 0 : self.flush().await?;
162 : // If we have received this number of keys, proceed with attempting to execute
163 0 : assert_eq!(self.accumulator.len(), 0);
164 6 : }
165 :
166 6 : let available_slots = max_keys_per_delete - self.accumulator.len();
167 6 : let take_count = std::cmp::min(available_slots, list.len());
168 6 : for path in list.drain(list.len() - take_count..) {
169 6 : self.accumulator.push(path);
170 6 : }
171 : }
172 : }
173 18 : DeleterMessage::Flush(flush_op) => {
174 18 : // If flush() errors, we drop the flush_op and the caller will get
175 18 : // an error recv()'ing their oneshot channel.
176 18 : self.flush().await?;
177 18 : flush_op.notify();
178 : }
179 : }
180 : }
181 2 : }
182 : }
|