Line data Source code
1 : use std::collections::{HashMap, HashSet, VecDeque};
2 : use std::fmt::Debug;
3 : use std::sync::Arc;
4 : use std::sync::atomic::AtomicU32;
5 :
6 : use chrono::NaiveDateTime;
7 : use once_cell::sync::Lazy;
8 : use tracing::info;
9 : use utils::generation::Generation;
10 : use utils::lsn::{AtomicLsn, Lsn};
11 :
12 : use super::remote_timeline_client::is_same_remote_layer_path;
13 : use super::storage_layer::{AsLayerDesc as _, LayerName, ResidentLayer};
14 : use crate::tenant::metadata::TimelineMetadata;
15 : use crate::tenant::remote_timeline_client::index::{IndexPart, LayerFileMetadata};
16 :
17 : /// Kill switch for upload queue reordering in case it causes problems.
18 : /// TODO: remove this once we have confidence in it.
19 : static DISABLE_UPLOAD_QUEUE_REORDERING: Lazy<bool> =
20 394 : Lazy::new(|| std::env::var("DISABLE_UPLOAD_QUEUE_REORDERING").as_deref() == Ok("true"));
21 :
22 : /// Kill switch for index upload coalescing in case it causes problems.
23 : /// TODO: remove this once we have confidence in it.
24 : static DISABLE_UPLOAD_QUEUE_INDEX_COALESCING: Lazy<bool> =
25 22 : Lazy::new(|| std::env::var("DISABLE_UPLOAD_QUEUE_INDEX_COALESCING").as_deref() == Ok("true"));
26 :
27 : // clippy warns that Uninitialized is much smaller than Initialized, which wastes
28 : // memory for Uninitialized variants. Doesn't matter in practice, there are not
29 : // that many upload queues in a running pageserver, and most of them are initialized
30 : // anyway.
31 : #[allow(clippy::large_enum_variant)]
32 : pub enum UploadQueue {
33 : Uninitialized,
34 : Initialized(UploadQueueInitialized),
35 : Stopped(UploadQueueStopped),
36 : }
37 :
38 : impl UploadQueue {
39 0 : pub fn as_str(&self) -> &'static str {
40 0 : match self {
41 0 : UploadQueue::Uninitialized => "Uninitialized",
42 0 : UploadQueue::Initialized(_) => "Initialized",
43 0 : UploadQueue::Stopped(_) => "Stopped",
44 : }
45 0 : }
46 : }
47 :
48 : #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
49 : pub enum OpType {
50 : MayReorder,
51 : FlushDeletion,
52 : }
53 :
54 : /// This keeps track of queued and in-progress tasks.
55 : pub struct UploadQueueInitialized {
56 : /// Maximum number of inprogress tasks to schedule. 0 is no limit.
57 : pub(crate) inprogress_limit: usize,
58 :
59 : /// Counter to assign task IDs
60 : pub(crate) task_counter: u64,
61 :
62 : /// The next uploaded index_part.json; assumed to be dirty.
63 : ///
64 : /// Should not be read, directly except for layer file updates. Instead you should add a
65 : /// projected field.
66 : pub(crate) dirty: IndexPart,
67 :
68 : /// The latest remote persisted IndexPart.
69 : ///
70 : /// Each completed metadata upload will update this. The second item is the task_id which last
71 : /// updated the value, used to ensure we never store an older value over a newer one.
72 : pub(crate) clean: (IndexPart, Option<u64>),
73 :
74 : /// How many file uploads or deletions been scheduled, since the
75 : /// last (scheduling of) metadata index upload?
76 : pub(crate) latest_files_changes_since_metadata_upload_scheduled: u64,
77 :
78 : /// The Lsn is only updated after our generation has been validated with
79 : /// the control plane (unlesss a timeline's generation is None, in which case
80 : /// we skip validation)
81 : pub(crate) visible_remote_consistent_lsn: Arc<AtomicLsn>,
82 :
83 : /// Tasks that are currently in-progress. In-progress means that a tokio Task
84 : /// has been launched for it. An in-progress task can be busy uploading, but it can
85 : /// also be waiting on the `concurrency_limiter` Semaphore in S3Bucket, or it can
86 : /// be waiting for retry in `exponential_backoff`.
87 : pub inprogress_tasks: HashMap<u64, Arc<UploadTask>>,
88 :
89 : /// Queued operations that have not been launched yet. They might depend on previous
90 : /// tasks to finish. For example, metadata upload cannot be performed before all
91 : /// preceding layer file uploads have completed.
92 : pub queued_operations: VecDeque<UploadOp>,
93 :
94 : /// Files which have been unlinked but not yet had scheduled a deletion for. Only kept around
95 : /// for error logging.
96 : ///
97 : /// Putting this behind a testing feature to catch problems in tests, but assuming we could have a
98 : /// bug causing leaks, then it's better to not leave this enabled for production builds.
99 : #[cfg(feature = "testing")]
100 : pub(crate) dangling_files: HashMap<LayerName, Generation>,
101 :
102 : /// Ensure we order file operations correctly.
103 : pub(crate) recently_deleted: HashSet<(LayerName, Generation)>,
104 :
105 : /// Deletions that are blocked by the tenant configuration
106 : pub(crate) blocked_deletions: Vec<Delete>,
107 :
108 : /// Set to true when we have inserted the `UploadOp::Shutdown` into the `inprogress_tasks`.
109 : pub(crate) shutting_down: bool,
110 :
111 : /// Permitless semaphore on which any number of `RemoteTimelineClient::shutdown` futures can
112 : /// wait on until one of them stops the queue. The semaphore is closed when
113 : /// `RemoteTimelineClient::launch_queued_tasks` encounters `UploadOp::Shutdown`.
114 : pub(crate) shutdown_ready: Arc<tokio::sync::Semaphore>,
115 : }
116 :
117 : impl UploadQueueInitialized {
118 16 : pub(super) fn no_pending_work(&self) -> bool {
119 16 : self.inprogress_tasks.is_empty() && self.queued_operations.is_empty()
120 16 : }
121 :
122 0 : pub(super) fn get_last_remote_consistent_lsn_visible(&self) -> Lsn {
123 0 : self.visible_remote_consistent_lsn.load()
124 0 : }
125 :
126 0 : pub(super) fn get_last_remote_consistent_lsn_projected(&self) -> Option<Lsn> {
127 0 : let lsn = self.clean.0.metadata.disk_consistent_lsn();
128 0 : self.clean.1.map(|_| lsn)
129 0 : }
130 :
131 : /// Returns and removes the next ready operation from the queue, if any. This isn't necessarily
132 : /// the first operation in the queue, to avoid head-of-line blocking -- an operation can jump
133 : /// the queue if it doesn't conflict with operations ahead of it.
134 : ///
135 : /// Also returns any operations that were coalesced into this one, e.g. multiple index uploads.
136 : ///
137 : /// None may be returned even if the queue isn't empty, if no operations are ready yet.
138 : ///
139 : /// NB: this is quadratic, but queues are expected to be small, and bounded by inprogress_limit.
140 22136 : pub fn next_ready(&mut self) -> Option<(UploadOp, Vec<UploadOp>)> {
141 22136 : // If inprogress_tasks is already at limit, don't schedule anything more.
142 22136 : if self.inprogress_limit > 0 && self.inprogress_tasks.len() >= self.inprogress_limit {
143 12 : return None;
144 22124 : }
145 :
146 43709 : for (i, candidate) in self.queued_operations.iter().enumerate() {
147 : // If this candidate is ready, go for it. Otherwise, try the next one.
148 43709 : if self.is_ready(i) {
149 : // Shutdown operations are left at the head of the queue, to prevent further
150 : // operations from starting. Signal that we're ready to shut down.
151 7803 : if matches!(candidate, UploadOp::Shutdown) {
152 20 : assert!(self.inprogress_tasks.is_empty(), "shutdown with tasks");
153 20 : assert_eq!(i, 0, "shutdown not at head of queue");
154 20 : self.shutdown_ready.close();
155 20 : return None;
156 7783 : }
157 7783 :
158 7783 : let mut op = self.queued_operations.remove(i).expect("i can't disappear");
159 7783 :
160 7783 : // Coalesce any back-to-back index uploads by only uploading the newest one that's
161 7783 : // ready. This typically happens with layer/index/layer/index/... sequences, where
162 7783 : // the layers bypass the indexes, leaving the indexes queued.
163 7783 : //
164 7783 : // If other operations are interleaved between index uploads we don't try to
165 7783 : // coalesce them, since we may as well update the index concurrently with them.
166 7783 : // This keeps the index fresh and avoids starvation.
167 7783 : //
168 7783 : // NB: we assume that all uploaded indexes have the same remote path. This
169 7783 : // is true at the time of writing: the path only depends on the tenant,
170 7783 : // timeline and generation, all of which are static for a timeline instance.
171 7783 : // Otherwise, we must be careful not to coalesce different paths.
172 7783 : let mut coalesced_ops = Vec::new();
173 7783 : if matches!(op, UploadOp::UploadMetadata { .. }) {
174 3057 : while let Some(UploadOp::UploadMetadata { .. }) = self.queued_operations.get(i)
175 : {
176 70 : if *DISABLE_UPLOAD_QUEUE_INDEX_COALESCING {
177 0 : break;
178 70 : }
179 70 : if !self.is_ready(i) {
180 50 : break;
181 20 : }
182 20 : coalesced_ops.push(op);
183 20 : op = self.queued_operations.remove(i).expect("i can't disappear");
184 : }
185 4746 : }
186 :
187 7783 : return Some((op, coalesced_ops));
188 35906 : }
189 :
190 : // Nothing can bypass a barrier or shutdown. If it wasn't scheduled above, give up.
191 35906 : if matches!(candidate, UploadOp::Barrier(_) | UploadOp::Shutdown) {
192 786 : return None;
193 35120 : }
194 35120 :
195 35120 : // If upload queue reordering is disabled, bail out after the first operation.
196 35120 : if *DISABLE_UPLOAD_QUEUE_REORDERING {
197 0 : return None;
198 35120 : }
199 : }
200 13535 : None
201 22136 : }
202 :
203 : /// Returns true if the queued operation at the given position is ready to be uploaded, i.e. if
204 : /// it doesn't conflict with any in-progress or queued operations ahead of it. Operations are
205 : /// allowed to skip the queue when it's safe to do so, to increase parallelism.
206 : ///
207 : /// The position must be valid for the queue size.
208 43779 : fn is_ready(&self, pos: usize) -> bool {
209 43779 : let candidate = self.queued_operations.get(pos).expect("invalid position");
210 43779 : self
211 43779 : // Look at in-progress operations, in random order.
212 43779 : .inprogress_tasks
213 43779 : .values()
214 2346478 : .map(|task| &task.op)
215 43779 : // Then queued operations ahead of the candidate, front-to-back.
216 43779 : .chain(self.queued_operations.iter().take(pos))
217 43779 : // Keep track of the active index ahead of each operation. This is used to ensure that
218 43779 : // an upload doesn't skip the queue too far, such that it modifies a layer that's
219 43779 : // referenced by an active index.
220 43779 : //
221 43779 : // It's okay that in-progress operations are emitted in random order above, since at
222 43779 : // most one of them can be an index upload (enforced by can_bypass).
223 2374954 : .scan(&self.clean.0, |next_active_index, op| {
224 2374954 : let active_index = *next_active_index;
225 2374954 : if let UploadOp::UploadMetadata { uploaded } = op {
226 31096 : *next_active_index = uploaded; // stash index for next operation after this
227 2343858 : }
228 2374954 : Some((op, active_index))
229 2374954 : })
230 43779 : // Check if the candidate can bypass all of them.
231 2374954 : .all(|(op, active_index)| candidate.can_bypass(op, active_index))
232 43779 : }
233 :
234 : /// Returns the number of in-progress deletion operations.
235 : #[cfg(test)]
236 4 : pub(crate) fn num_inprogress_deletions(&self) -> usize {
237 4 : self.inprogress_tasks
238 4 : .iter()
239 4 : .filter(|(_, t)| matches!(t.op, UploadOp::Delete(_)))
240 4 : .count()
241 4 : }
242 :
243 : /// Returns the number of in-progress layer uploads.
244 : #[cfg(test)]
245 8 : pub(crate) fn num_inprogress_layer_uploads(&self) -> usize {
246 8 : self.inprogress_tasks
247 8 : .iter()
248 12 : .filter(|(_, t)| matches!(t.op, UploadOp::UploadLayer(_, _, _)))
249 8 : .count()
250 8 : }
251 :
252 : /// Test helper that schedules all ready operations into inprogress_tasks, and returns
253 : /// references to them.
254 : ///
255 : /// TODO: the corresponding production logic should be moved from RemoteTimelineClient into
256 : /// UploadQueue, so we can use the same code path.
257 : #[cfg(test)]
258 156 : fn schedule_ready(&mut self) -> Vec<Arc<UploadTask>> {
259 156 : let mut tasks = Vec::new();
260 : // NB: schedule operations one by one, to handle conflicts with inprogress_tasks.
261 344 : while let Some((op, coalesced_ops)) = self.next_ready() {
262 188 : self.task_counter += 1;
263 188 : let task = Arc::new(UploadTask {
264 188 : task_id: self.task_counter,
265 188 : op,
266 188 : coalesced_ops,
267 188 : retries: 0.into(),
268 188 : });
269 188 : self.inprogress_tasks.insert(task.task_id, task.clone());
270 188 : tasks.push(task);
271 188 : }
272 156 : tasks
273 156 : }
274 :
275 : /// Test helper that marks an operation as completed, removing it from inprogress_tasks.
276 : ///
277 : /// TODO: the corresponding production logic should be moved from RemoteTimelineClient into
278 : /// UploadQueue, so we can use the same code path.
279 : #[cfg(test)]
280 116 : fn complete(&mut self, task_id: u64) {
281 116 : let Some(task) = self.inprogress_tasks.remove(&task_id) else {
282 0 : return;
283 : };
284 : // Update the clean index on uploads.
285 116 : if let UploadOp::UploadMetadata { ref uploaded } = task.op {
286 32 : if task.task_id > self.clean.1.unwrap_or_default() {
287 32 : self.clean = (*uploaded.clone(), Some(task.task_id));
288 32 : }
289 84 : }
290 116 : }
291 : }
292 :
293 : #[derive(Clone, Copy)]
294 : pub(super) enum SetDeletedFlagProgress {
295 : NotRunning,
296 : InProgress(NaiveDateTime),
297 : Successful(NaiveDateTime),
298 : }
299 :
300 : pub struct UploadQueueStoppedDeletable {
301 : pub(super) upload_queue_for_deletion: UploadQueueInitialized,
302 : pub(super) deleted_at: SetDeletedFlagProgress,
303 : }
304 :
305 : #[allow(clippy::large_enum_variant, reason = "TODO")]
306 : pub enum UploadQueueStopped {
307 : Deletable(UploadQueueStoppedDeletable),
308 : Uninitialized,
309 : }
310 :
311 : #[derive(thiserror::Error, Debug)]
312 : pub enum NotInitialized {
313 : #[error("queue is in state Uninitialized")]
314 : Uninitialized,
315 : #[error("queue is in state Stopped")]
316 : Stopped,
317 : #[error("queue is shutting down")]
318 : ShuttingDown,
319 : }
320 :
321 : impl NotInitialized {
322 0 : pub(crate) fn is_stopping(&self) -> bool {
323 : use NotInitialized::*;
324 0 : match self {
325 0 : Uninitialized => false,
326 0 : Stopped => true,
327 0 : ShuttingDown => true,
328 : }
329 0 : }
330 : }
331 :
332 : impl UploadQueue {
333 908 : pub fn initialize_empty_remote(
334 908 : &mut self,
335 908 : metadata: &TimelineMetadata,
336 908 : inprogress_limit: usize,
337 908 : ) -> anyhow::Result<&mut UploadQueueInitialized> {
338 908 : match self {
339 908 : UploadQueue::Uninitialized => (),
340 : UploadQueue::Initialized(_) | UploadQueue::Stopped(_) => {
341 0 : anyhow::bail!("already initialized, state {}", self.as_str())
342 : }
343 : }
344 :
345 908 : info!("initializing upload queue for empty remote");
346 :
347 908 : let index_part = IndexPart::empty(metadata.clone());
348 908 :
349 908 : let state = UploadQueueInitialized {
350 908 : inprogress_limit,
351 908 : dirty: index_part.clone(),
352 908 : clean: (index_part, None),
353 908 : latest_files_changes_since_metadata_upload_scheduled: 0,
354 908 : visible_remote_consistent_lsn: Arc::new(AtomicLsn::new(0)),
355 908 : // what follows are boring default initializations
356 908 : task_counter: 0,
357 908 : inprogress_tasks: HashMap::new(),
358 908 : queued_operations: VecDeque::new(),
359 908 : #[cfg(feature = "testing")]
360 908 : dangling_files: HashMap::new(),
361 908 : recently_deleted: HashSet::new(),
362 908 : blocked_deletions: Vec::new(),
363 908 : shutting_down: false,
364 908 : shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)),
365 908 : };
366 908 :
367 908 : *self = UploadQueue::Initialized(state);
368 908 : Ok(self.initialized_mut().expect("we just set it"))
369 908 : }
370 :
371 44 : pub fn initialize_with_current_remote_index_part(
372 44 : &mut self,
373 44 : index_part: &IndexPart,
374 44 : inprogress_limit: usize,
375 44 : ) -> anyhow::Result<&mut UploadQueueInitialized> {
376 44 : match self {
377 44 : UploadQueue::Uninitialized => (),
378 : UploadQueue::Initialized(_) | UploadQueue::Stopped(_) => {
379 0 : anyhow::bail!("already initialized, state {}", self.as_str())
380 : }
381 : }
382 :
383 44 : info!(
384 0 : "initializing upload queue with remote index_part.disk_consistent_lsn: {}",
385 0 : index_part.metadata.disk_consistent_lsn()
386 : );
387 :
388 44 : let state = UploadQueueInitialized {
389 44 : inprogress_limit,
390 44 : dirty: index_part.clone(),
391 44 : clean: (index_part.clone(), None),
392 44 : latest_files_changes_since_metadata_upload_scheduled: 0,
393 44 : visible_remote_consistent_lsn: Arc::new(
394 44 : index_part.metadata.disk_consistent_lsn().into(),
395 44 : ),
396 44 : // what follows are boring default initializations
397 44 : task_counter: 0,
398 44 : inprogress_tasks: HashMap::new(),
399 44 : queued_operations: VecDeque::new(),
400 44 : #[cfg(feature = "testing")]
401 44 : dangling_files: HashMap::new(),
402 44 : recently_deleted: HashSet::new(),
403 44 : blocked_deletions: Vec::new(),
404 44 : shutting_down: false,
405 44 : shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)),
406 44 : };
407 44 :
408 44 : *self = UploadQueue::Initialized(state);
409 44 : Ok(self.initialized_mut().expect("we just set it"))
410 44 : }
411 :
412 16031 : pub fn initialized_mut(&mut self) -> Result<&mut UploadQueueInitialized, NotInitialized> {
413 : use UploadQueue::*;
414 16031 : match self {
415 0 : Uninitialized => Err(NotInitialized::Uninitialized),
416 16031 : Initialized(x) => {
417 16031 : if x.shutting_down {
418 7 : Err(NotInitialized::ShuttingDown)
419 : } else {
420 16024 : Ok(x)
421 : }
422 : }
423 0 : Stopped(_) => Err(NotInitialized::Stopped),
424 : }
425 16031 : }
426 :
427 4 : pub(crate) fn stopped_mut(&mut self) -> anyhow::Result<&mut UploadQueueStoppedDeletable> {
428 4 : match self {
429 : UploadQueue::Initialized(_) | UploadQueue::Uninitialized => {
430 0 : anyhow::bail!("queue is in state {}", self.as_str())
431 : }
432 : UploadQueue::Stopped(UploadQueueStopped::Uninitialized) => {
433 0 : anyhow::bail!("queue is in state Stopped(Uninitialized)")
434 : }
435 4 : UploadQueue::Stopped(UploadQueueStopped::Deletable(deletable)) => Ok(deletable),
436 : }
437 4 : }
438 : }
439 :
440 : /// An in-progress upload or delete task.
441 : #[derive(Debug)]
442 : pub struct UploadTask {
443 : /// Unique ID of this task. Used as the key in `inprogress_tasks` above.
444 : pub task_id: u64,
445 : /// Number of task retries.
446 : pub retries: AtomicU32,
447 : /// The upload operation.
448 : pub op: UploadOp,
449 : /// Any upload operations that were coalesced into this operation. This typically happens with
450 : /// back-to-back index uploads, see `UploadQueueInitialized::next_ready()`.
451 : pub coalesced_ops: Vec<UploadOp>,
452 : }
453 :
454 : /// A deletion of some layers within the lifetime of a timeline. This is not used
455 : /// for timeline deletion, which skips this queue and goes directly to DeletionQueue.
456 : #[derive(Debug, Clone)]
457 : pub struct Delete {
458 : pub layers: Vec<(LayerName, LayerFileMetadata)>,
459 : }
460 :
461 : #[derive(Clone, Debug)]
462 : pub enum UploadOp {
463 : /// Upload a layer file. The last field indicates the last operation for thie file.
464 : UploadLayer(ResidentLayer, LayerFileMetadata, Option<OpType>),
465 :
466 : /// Upload a index_part.json file
467 : UploadMetadata {
468 : /// The next [`UploadQueueInitialized::clean`] after this upload succeeds.
469 : uploaded: Box<IndexPart>,
470 : },
471 :
472 : /// Delete layer files
473 : Delete(Delete),
474 :
475 : /// Barrier. When the barrier operation is reached, the channel is closed.
476 : Barrier(tokio::sync::watch::Sender<()>),
477 :
478 : /// Shutdown; upon encountering this operation no new operations will be spawned, otherwise
479 : /// this is the same as a Barrier.
480 : Shutdown,
481 : }
482 :
483 : impl std::fmt::Display for UploadOp {
484 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
485 0 : match self {
486 0 : UploadOp::UploadLayer(layer, metadata, mode) => {
487 0 : write!(
488 0 : f,
489 0 : "UploadLayer({}, size={:?}, gen={:?}, mode={:?})",
490 0 : layer, metadata.file_size, metadata.generation, mode
491 0 : )
492 : }
493 0 : UploadOp::UploadMetadata { uploaded, .. } => {
494 0 : write!(
495 0 : f,
496 0 : "UploadMetadata(lsn: {})",
497 0 : uploaded.metadata.disk_consistent_lsn()
498 0 : )
499 : }
500 0 : UploadOp::Delete(delete) => {
501 0 : write!(f, "Delete({} layers)", delete.layers.len())
502 : }
503 0 : UploadOp::Barrier(_) => write!(f, "Barrier"),
504 0 : UploadOp::Shutdown => write!(f, "Shutdown"),
505 : }
506 0 : }
507 : }
508 :
509 : impl UploadOp {
510 : /// Returns true if self can bypass other, i.e. if the operations don't conflict. index is the
511 : /// active index when other would be uploaded -- if we allow self to bypass other, this would
512 : /// be the active index when self is uploaded.
513 2375050 : pub fn can_bypass(&self, other: &UploadOp, index: &IndexPart) -> bool {
514 2375050 : match (self, other) {
515 : // Nothing can bypass a barrier or shutdown, and it can't bypass anything.
516 788 : (UploadOp::Barrier(_), _) | (_, UploadOp::Barrier(_)) => false,
517 30 : (UploadOp::Shutdown, _) | (_, UploadOp::Shutdown) => false,
518 :
519 : // Uploads and deletes can bypass each other unless they're for the same file.
520 40081 : (UploadOp::UploadLayer(a, ameta, _), UploadOp::UploadLayer(b, bmeta, _)) => {
521 40081 : let aname = &a.layer_desc().layer_name();
522 40081 : let bname = &b.layer_desc().layer_name();
523 40081 : !is_same_remote_layer_path(aname, ameta, bname, bmeta)
524 : }
525 65 : (UploadOp::UploadLayer(u, umeta, _), UploadOp::Delete(d))
526 2292606 : | (UploadOp::Delete(d), UploadOp::UploadLayer(u, umeta, _)) => {
527 2292703 : d.layers.iter().all(|(dname, dmeta)| {
528 2292703 : !is_same_remote_layer_path(&u.layer_desc().layer_name(), umeta, dname, dmeta)
529 2292703 : })
530 : }
531 :
532 : // Deletes are idempotent and can always bypass each other.
533 5727 : (UploadOp::Delete(_), UploadOp::Delete(_)) => true,
534 :
535 : // Uploads and deletes can bypass an index upload as long as neither the uploaded index
536 : // nor the active index below it references the file. A layer can't be modified or
537 : // deleted while referenced by an index.
538 : //
539 : // Similarly, index uploads can bypass uploads and deletes as long as neither the
540 : // uploaded index nor the active index references the file (the latter would be
541 : // incorrect use by the caller).
542 312 : (UploadOp::UploadLayer(u, umeta, _), UploadOp::UploadMetadata { uploaded: i })
543 4992 : | (UploadOp::UploadMetadata { uploaded: i }, UploadOp::UploadLayer(u, umeta, _)) => {
544 5304 : let uname = u.layer_desc().layer_name();
545 5304 : !i.references(&uname, umeta) && !index.references(&uname, umeta)
546 : }
547 29914 : (UploadOp::Delete(d), UploadOp::UploadMetadata { uploaded: i })
548 32 : | (UploadOp::UploadMetadata { uploaded: i }, UploadOp::Delete(d)) => {
549 29946 : d.layers.iter().all(|(dname, dmeta)| {
550 29946 : !i.references(dname, dmeta) && !index.references(dname, dmeta)
551 29946 : })
552 : }
553 :
554 : // Indexes can never bypass each other. They can coalesce though, and
555 : // `UploadQueue::next_ready()` currently does this when possible.
556 503 : (UploadOp::UploadMetadata { .. }, UploadOp::UploadMetadata { .. }) => false,
557 : }
558 2375050 : }
559 : }
560 :
561 : #[cfg(test)]
562 : mod tests {
563 : use std::str::FromStr as _;
564 :
565 : use itertools::Itertools as _;
566 : use utils::shard::{ShardCount, ShardIndex, ShardNumber};
567 :
568 : use super::*;
569 : use crate::DEFAULT_PG_VERSION;
570 : use crate::tenant::Timeline;
571 : use crate::tenant::harness::{TIMELINE_ID, TenantHarness};
572 : use crate::tenant::storage_layer::Layer;
573 : use crate::tenant::storage_layer::layer::local_layer_path;
574 :
575 : /// Test helper which asserts that two operations are the same, in lieu of UploadOp PartialEq.
576 : #[track_caller]
577 196 : fn assert_same_op(a: &UploadOp, b: &UploadOp) {
578 : use UploadOp::*;
579 196 : match (a, b) {
580 88 : (UploadLayer(a, ameta, atype), UploadLayer(b, bmeta, btype)) => {
581 88 : assert_eq!(a.layer_desc().layer_name(), b.layer_desc().layer_name());
582 88 : assert_eq!(ameta, bmeta);
583 88 : assert_eq!(atype, btype);
584 : }
585 44 : (Delete(a), Delete(b)) => assert_eq!(a.layers, b.layers),
586 56 : (UploadMetadata { uploaded: a }, UploadMetadata { uploaded: b }) => assert_eq!(a, b),
587 8 : (Barrier(_), Barrier(_)) => {}
588 0 : (Shutdown, Shutdown) => {}
589 0 : (a, b) => panic!("{a:?} != {b:?}"),
590 : }
591 196 : }
592 :
593 : /// Test helper which asserts that two sets of operations are the same.
594 : #[track_caller]
595 44 : fn assert_same_ops<'a>(
596 44 : a: impl IntoIterator<Item = &'a UploadOp>,
597 44 : b: impl IntoIterator<Item = &'a UploadOp>,
598 44 : ) {
599 44 : a.into_iter()
600 44 : .zip_eq(b)
601 116 : .for_each(|(a, b)| assert_same_op(a, b))
602 44 : }
603 :
604 : /// Test helper to construct a test timeline.
605 : ///
606 : /// TODO: it really shouldn't be necessary to construct an entire tenant and timeline just to
607 : /// test the upload queue -- decouple ResidentLayer from Timeline.
608 : ///
609 : /// TODO: the upload queue uses TimelineMetadata::example() instead, because there's no way to
610 : /// obtain a TimelineMetadata from a Timeline.
611 48 : fn make_timeline() -> Arc<Timeline> {
612 48 : // Grab the current test name from the current thread name.
613 48 : // TODO: TenantHarness shouldn't take a &'static str, but just leak the test name for now.
614 48 : let test_name = std::thread::current().name().unwrap().to_string();
615 48 : let test_name = Box::leak(test_name.into_boxed_str());
616 48 :
617 48 : let runtime = tokio::runtime::Builder::new_current_thread()
618 48 : .enable_all()
619 48 : .build()
620 48 : .expect("failed to create runtime");
621 48 :
622 48 : runtime
623 48 : .block_on(async {
624 48 : let harness = TenantHarness::create(test_name).await?;
625 48 : let (tenant, ctx) = harness.load().await;
626 48 : tenant
627 48 : .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
628 48 : .await
629 48 : })
630 48 : .expect("failed to create timeline")
631 48 : }
632 :
633 : /// Test helper to construct an (empty) resident layer.
634 120 : fn make_layer(timeline: &Arc<Timeline>, name: &str) -> ResidentLayer {
635 120 : make_layer_with_size(timeline, name, 0)
636 120 : }
637 :
638 : /// Test helper to construct a resident layer with the given size.
639 132 : fn make_layer_with_size(timeline: &Arc<Timeline>, name: &str, size: usize) -> ResidentLayer {
640 132 : let metadata = LayerFileMetadata {
641 132 : generation: timeline.generation,
642 132 : shard: timeline.get_shard_index(),
643 132 : file_size: size as u64,
644 132 : };
645 132 : make_layer_with_metadata(timeline, name, metadata)
646 132 : }
647 :
648 : /// Test helper to construct a layer with the given metadata.
649 196 : fn make_layer_with_metadata(
650 196 : timeline: &Arc<Timeline>,
651 196 : name: &str,
652 196 : metadata: LayerFileMetadata,
653 196 : ) -> ResidentLayer {
654 196 : let name = LayerName::from_str(name).expect("invalid name");
655 196 : let local_path = local_layer_path(
656 196 : timeline.conf,
657 196 : &timeline.tenant_shard_id,
658 196 : &timeline.timeline_id,
659 196 : &name,
660 196 : &metadata.generation,
661 196 : );
662 196 : std::fs::write(&local_path, vec![0; metadata.file_size as usize])
663 196 : .expect("failed to write file");
664 196 : Layer::for_resident(timeline.conf, timeline, local_path, name, metadata)
665 196 : }
666 :
667 : /// Test helper to add a layer to an index and return a new index.
668 24 : fn index_with(index: &IndexPart, layer: &ResidentLayer) -> Box<IndexPart> {
669 24 : let mut index = index.clone();
670 24 : index
671 24 : .layer_metadata
672 24 : .insert(layer.layer_desc().layer_name(), layer.metadata());
673 24 : Box::new(index)
674 24 : }
675 :
676 : /// Test helper to remove a layer from an index and return a new index.
677 8 : fn index_without(index: &IndexPart, layer: &ResidentLayer) -> Box<IndexPart> {
678 8 : let mut index = index.clone();
679 8 : index
680 8 : .layer_metadata
681 8 : .remove(&layer.layer_desc().layer_name());
682 8 : Box::new(index)
683 8 : }
684 :
685 : /// Nothing can bypass a barrier, and it can't bypass inprogress tasks.
686 : #[test]
687 4 : fn schedule_barrier() -> anyhow::Result<()> {
688 4 : let mut queue = UploadQueue::Uninitialized;
689 4 : let queue = queue.initialize_empty_remote(&TimelineMetadata::example(), 0)?;
690 4 : let tli = make_timeline();
691 4 :
692 4 : let index = Box::new(queue.clean.0.clone()); // empty, doesn't matter
693 4 : let layer0 = make_layer(
694 4 : &tli,
695 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
696 4 : );
697 4 : let layer1 = make_layer(
698 4 : &tli,
699 4 : "100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
700 4 : );
701 4 : let layer2 = make_layer(
702 4 : &tli,
703 4 : "200000000000000000000000000000000000-300000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
704 4 : );
705 4 : let layer3 = make_layer(
706 4 : &tli,
707 4 : "300000000000000000000000000000000000-400000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
708 4 : );
709 4 : let (barrier, _) = tokio::sync::watch::channel(());
710 4 :
711 4 : // Enqueue non-conflicting upload, delete, and index before and after a barrier.
712 4 : let ops = [
713 4 : UploadOp::UploadLayer(layer0.clone(), layer0.metadata(), None),
714 4 : UploadOp::Delete(Delete {
715 4 : layers: vec![(layer1.layer_desc().layer_name(), layer1.metadata())],
716 4 : }),
717 4 : UploadOp::UploadMetadata {
718 4 : uploaded: index.clone(),
719 4 : },
720 4 : UploadOp::Barrier(barrier),
721 4 : UploadOp::UploadLayer(layer2.clone(), layer2.metadata(), None),
722 4 : UploadOp::Delete(Delete {
723 4 : layers: vec![(layer3.layer_desc().layer_name(), layer3.metadata())],
724 4 : }),
725 4 : UploadOp::UploadMetadata {
726 4 : uploaded: index.clone(),
727 4 : },
728 4 : ];
729 4 :
730 4 : queue.queued_operations.extend(ops.clone());
731 4 :
732 4 : // Schedule the initial operations ahead of the barrier.
733 4 : let tasks = queue.schedule_ready();
734 4 :
735 12 : assert_same_ops(tasks.iter().map(|t| &t.op), &ops[0..3]);
736 4 : assert!(matches!(
737 4 : queue.queued_operations.front(),
738 : Some(&UploadOp::Barrier(_))
739 : ));
740 :
741 : // Complete the initial operations. The barrier isn't scheduled while they're pending.
742 16 : for task in tasks {
743 12 : assert!(queue.schedule_ready().is_empty());
744 12 : queue.complete(task.task_id);
745 : }
746 :
747 : // Schedule the barrier. The later tasks won't schedule until it completes.
748 4 : let tasks = queue.schedule_ready();
749 4 :
750 4 : assert_eq!(tasks.len(), 1);
751 4 : assert!(matches!(tasks[0].op, UploadOp::Barrier(_)));
752 4 : assert_eq!(queue.queued_operations.len(), 3);
753 :
754 : // Complete the barrier. The rest of the tasks schedule immediately.
755 4 : queue.complete(tasks[0].task_id);
756 4 :
757 4 : let tasks = queue.schedule_ready();
758 12 : assert_same_ops(tasks.iter().map(|t| &t.op), &ops[4..]);
759 4 : assert!(queue.queued_operations.is_empty());
760 :
761 4 : Ok(())
762 4 : }
763 :
764 : /// Deletes can be scheduled in parallel, even if they're for the same file.
765 : #[test]
766 4 : fn schedule_delete_parallel() -> anyhow::Result<()> {
767 4 : let mut queue = UploadQueue::Uninitialized;
768 4 : let queue = queue.initialize_empty_remote(&TimelineMetadata::example(), 0)?;
769 4 : let tli = make_timeline();
770 4 :
771 4 : // Enqueue a bunch of deletes, some with conflicting names.
772 4 : let layer0 = make_layer(
773 4 : &tli,
774 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
775 4 : );
776 4 : let layer1 = make_layer(
777 4 : &tli,
778 4 : "100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
779 4 : );
780 4 : let layer2 = make_layer(
781 4 : &tli,
782 4 : "200000000000000000000000000000000000-300000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
783 4 : );
784 4 : let layer3 = make_layer(
785 4 : &tli,
786 4 : "300000000000000000000000000000000000-400000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
787 4 : );
788 4 :
789 4 : let ops = [
790 4 : UploadOp::Delete(Delete {
791 4 : layers: vec![(layer0.layer_desc().layer_name(), layer0.metadata())],
792 4 : }),
793 4 : UploadOp::Delete(Delete {
794 4 : layers: vec![(layer1.layer_desc().layer_name(), layer1.metadata())],
795 4 : }),
796 4 : UploadOp::Delete(Delete {
797 4 : layers: vec![
798 4 : (layer1.layer_desc().layer_name(), layer1.metadata()),
799 4 : (layer2.layer_desc().layer_name(), layer2.metadata()),
800 4 : ],
801 4 : }),
802 4 : UploadOp::Delete(Delete {
803 4 : layers: vec![(layer2.layer_desc().layer_name(), layer2.metadata())],
804 4 : }),
805 4 : UploadOp::Delete(Delete {
806 4 : layers: vec![(layer3.layer_desc().layer_name(), layer3.metadata())],
807 4 : }),
808 4 : ];
809 4 :
810 4 : queue.queued_operations.extend(ops.clone());
811 4 :
812 4 : // Schedule all ready operations. Since deletes don't conflict, they're all scheduled.
813 4 : let tasks = queue.schedule_ready();
814 4 :
815 20 : assert_same_ops(tasks.iter().map(|t| &t.op), &ops);
816 4 : assert!(queue.queued_operations.is_empty());
817 :
818 4 : Ok(())
819 4 : }
820 :
821 : /// Conflicting uploads are serialized.
822 : #[test]
823 4 : fn schedule_upload_conflicts() -> anyhow::Result<()> {
824 4 : let mut queue = UploadQueue::Uninitialized;
825 4 : let queue = queue.initialize_with_current_remote_index_part(&IndexPart::example(), 0)?;
826 4 : let tli = make_timeline();
827 4 :
828 4 : // Enqueue three versions of the same layer, with different file sizes.
829 4 : let layer0a = make_layer_with_size(
830 4 : &tli,
831 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
832 4 : 1,
833 4 : );
834 4 : let layer0b = make_layer_with_size(
835 4 : &tli,
836 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
837 4 : 2,
838 4 : );
839 4 : let layer0c = make_layer_with_size(
840 4 : &tli,
841 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
842 4 : 3,
843 4 : );
844 4 :
845 4 : let ops = [
846 4 : UploadOp::UploadLayer(layer0a.clone(), layer0a.metadata(), None),
847 4 : UploadOp::UploadLayer(layer0b.clone(), layer0b.metadata(), None),
848 4 : UploadOp::UploadLayer(layer0c.clone(), layer0c.metadata(), None),
849 4 : ];
850 4 :
851 4 : queue.queued_operations.extend(ops.clone());
852 :
853 : // Only one version should be scheduled and uploaded at a time.
854 16 : for op in ops {
855 12 : let tasks = queue.schedule_ready();
856 12 : assert_eq!(tasks.len(), 1);
857 12 : assert_same_op(&tasks[0].op, &op);
858 12 : queue.complete(tasks[0].task_id);
859 : }
860 4 : assert!(queue.schedule_ready().is_empty());
861 4 : assert!(queue.queued_operations.is_empty());
862 :
863 4 : Ok(())
864 4 : }
865 :
866 : /// Conflicting uploads and deletes are serialized.
867 : #[test]
868 4 : fn schedule_upload_delete_conflicts() -> anyhow::Result<()> {
869 4 : let mut queue = UploadQueue::Uninitialized;
870 4 : let queue = queue.initialize_with_current_remote_index_part(&IndexPart::example(), 0)?;
871 4 : let tli = make_timeline();
872 4 :
873 4 : // Enqueue two layer uploads, with a delete of both layers in between them. These should be
874 4 : // scheduled one at a time, since deletes can't bypass uploads and vice versa.
875 4 : let layer0 = make_layer(
876 4 : &tli,
877 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
878 4 : );
879 4 : let layer1 = make_layer(
880 4 : &tli,
881 4 : "100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
882 4 : );
883 4 :
884 4 : let ops = [
885 4 : UploadOp::UploadLayer(layer0.clone(), layer0.metadata(), None),
886 4 : UploadOp::Delete(Delete {
887 4 : layers: vec![
888 4 : (layer0.layer_desc().layer_name(), layer0.metadata()),
889 4 : (layer1.layer_desc().layer_name(), layer1.metadata()),
890 4 : ],
891 4 : }),
892 4 : UploadOp::UploadLayer(layer1.clone(), layer1.metadata(), None),
893 4 : ];
894 4 :
895 4 : queue.queued_operations.extend(ops.clone());
896 :
897 : // Only one version should be scheduled and uploaded at a time.
898 16 : for op in ops {
899 12 : let tasks = queue.schedule_ready();
900 12 : assert_eq!(tasks.len(), 1);
901 12 : assert_same_op(&tasks[0].op, &op);
902 12 : queue.complete(tasks[0].task_id);
903 : }
904 4 : assert!(queue.schedule_ready().is_empty());
905 4 : assert!(queue.queued_operations.is_empty());
906 :
907 4 : Ok(())
908 4 : }
909 :
910 : /// Non-conflicting uploads and deletes can bypass the queue, avoiding the conflicting
911 : /// delete/upload operations at the head of the queue.
912 : #[test]
913 4 : fn schedule_upload_delete_conflicts_bypass() -> anyhow::Result<()> {
914 4 : let mut queue = UploadQueue::Uninitialized;
915 4 : let queue = queue.initialize_with_current_remote_index_part(&IndexPart::example(), 0)?;
916 4 : let tli = make_timeline();
917 4 :
918 4 : // Enqueue two layer uploads, with a delete of both layers in between them. These should be
919 4 : // scheduled one at a time, since deletes can't bypass uploads and vice versa.
920 4 : //
921 4 : // Also enqueue non-conflicting uploads and deletes at the end. These can bypass the queue
922 4 : // and run immediately.
923 4 : let layer0 = make_layer(
924 4 : &tli,
925 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
926 4 : );
927 4 : let layer1 = make_layer(
928 4 : &tli,
929 4 : "100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
930 4 : );
931 4 : let layer2 = make_layer(
932 4 : &tli,
933 4 : "200000000000000000000000000000000000-300000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
934 4 : );
935 4 : let layer3 = make_layer(
936 4 : &tli,
937 4 : "300000000000000000000000000000000000-400000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
938 4 : );
939 4 :
940 4 : let ops = [
941 4 : UploadOp::UploadLayer(layer0.clone(), layer0.metadata(), None),
942 4 : UploadOp::Delete(Delete {
943 4 : layers: vec![
944 4 : (layer0.layer_desc().layer_name(), layer0.metadata()),
945 4 : (layer1.layer_desc().layer_name(), layer1.metadata()),
946 4 : ],
947 4 : }),
948 4 : UploadOp::UploadLayer(layer1.clone(), layer1.metadata(), None),
949 4 : UploadOp::UploadLayer(layer2.clone(), layer2.metadata(), None),
950 4 : UploadOp::Delete(Delete {
951 4 : layers: vec![(layer3.layer_desc().layer_name(), layer3.metadata())],
952 4 : }),
953 4 : ];
954 4 :
955 4 : queue.queued_operations.extend(ops.clone());
956 4 :
957 4 : // Operations 0, 3, and 4 are scheduled immediately.
958 4 : let tasks = queue.schedule_ready();
959 12 : assert_same_ops(tasks.iter().map(|t| &t.op), [&ops[0], &ops[3], &ops[4]]);
960 4 : assert_eq!(queue.queued_operations.len(), 2);
961 :
962 4 : Ok(())
963 4 : }
964 :
965 : /// Non-conflicting uploads are parallelized.
966 : #[test]
967 4 : fn schedule_upload_parallel() -> anyhow::Result<()> {
968 4 : let mut queue = UploadQueue::Uninitialized;
969 4 : let queue = queue.initialize_with_current_remote_index_part(&IndexPart::example(), 0)?;
970 4 : let tli = make_timeline();
971 4 :
972 4 : // Enqueue three different layer uploads.
973 4 : let layer0 = make_layer(
974 4 : &tli,
975 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
976 4 : );
977 4 : let layer1 = make_layer(
978 4 : &tli,
979 4 : "100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
980 4 : );
981 4 : let layer2 = make_layer(
982 4 : &tli,
983 4 : "200000000000000000000000000000000000-300000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
984 4 : );
985 4 :
986 4 : let ops = [
987 4 : UploadOp::UploadLayer(layer0.clone(), layer0.metadata(), None),
988 4 : UploadOp::UploadLayer(layer1.clone(), layer1.metadata(), None),
989 4 : UploadOp::UploadLayer(layer2.clone(), layer2.metadata(), None),
990 4 : ];
991 4 :
992 4 : queue.queued_operations.extend(ops.clone());
993 4 :
994 4 : // All uploads should be scheduled concurrently.
995 4 : let tasks = queue.schedule_ready();
996 4 :
997 12 : assert_same_ops(tasks.iter().map(|t| &t.op), &ops);
998 4 : assert!(queue.queued_operations.is_empty());
999 :
1000 4 : Ok(())
1001 4 : }
1002 :
1003 : /// Index uploads are coalesced.
1004 : #[test]
1005 4 : fn schedule_index_coalesce() -> anyhow::Result<()> {
1006 4 : let mut queue = UploadQueue::Uninitialized;
1007 4 : let queue = queue.initialize_with_current_remote_index_part(&IndexPart::example(), 0)?;
1008 :
1009 : // Enqueue three uploads of the current empty index.
1010 4 : let index = Box::new(queue.clean.0.clone());
1011 4 :
1012 4 : let ops = [
1013 4 : UploadOp::UploadMetadata {
1014 4 : uploaded: index.clone(),
1015 4 : },
1016 4 : UploadOp::UploadMetadata {
1017 4 : uploaded: index.clone(),
1018 4 : },
1019 4 : UploadOp::UploadMetadata {
1020 4 : uploaded: index.clone(),
1021 4 : },
1022 4 : ];
1023 4 :
1024 4 : queue.queued_operations.extend(ops.clone());
1025 4 :
1026 4 : // The index uploads are coalesced into a single operation.
1027 4 : let tasks = queue.schedule_ready();
1028 4 : assert_eq!(tasks.len(), 1);
1029 4 : assert_same_op(&tasks[0].op, &ops[2]);
1030 4 : assert_same_ops(&tasks[0].coalesced_ops, &ops[0..2]);
1031 4 :
1032 4 : assert!(queue.queued_operations.is_empty());
1033 :
1034 4 : Ok(())
1035 4 : }
1036 :
1037 : /// Chains of upload/index operations lead to parallel layer uploads and serial index uploads.
1038 : /// This is the common case with layer flushes.
1039 : #[test]
1040 4 : fn schedule_index_upload_chain() -> anyhow::Result<()> {
1041 4 : let mut queue = UploadQueue::Uninitialized;
1042 4 : let queue = queue.initialize_with_current_remote_index_part(&IndexPart::example(), 0)?;
1043 4 : let tli = make_timeline();
1044 4 :
1045 4 : // Enqueue three uploads of the current empty index.
1046 4 : let index = Box::new(queue.clean.0.clone());
1047 4 : let layer0 = make_layer(
1048 4 : &tli,
1049 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1050 4 : );
1051 4 : let index0 = index_with(&index, &layer0);
1052 4 : let layer1 = make_layer(
1053 4 : &tli,
1054 4 : "100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1055 4 : );
1056 4 : let index1 = index_with(&index0, &layer1);
1057 4 : let layer2 = make_layer(
1058 4 : &tli,
1059 4 : "200000000000000000000000000000000000-300000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1060 4 : );
1061 4 : let index2 = index_with(&index1, &layer2);
1062 4 :
1063 4 : let ops = [
1064 4 : UploadOp::UploadLayer(layer0.clone(), layer0.metadata(), None),
1065 4 : UploadOp::UploadMetadata {
1066 4 : uploaded: index0.clone(),
1067 4 : },
1068 4 : UploadOp::UploadLayer(layer1.clone(), layer1.metadata(), None),
1069 4 : UploadOp::UploadMetadata {
1070 4 : uploaded: index1.clone(),
1071 4 : },
1072 4 : UploadOp::UploadLayer(layer2.clone(), layer2.metadata(), None),
1073 4 : UploadOp::UploadMetadata {
1074 4 : uploaded: index2.clone(),
1075 4 : },
1076 4 : ];
1077 4 :
1078 4 : queue.queued_operations.extend(ops.clone());
1079 4 :
1080 4 : // The layer uploads should be scheduled immediately. The indexes must wait.
1081 4 : let upload_tasks = queue.schedule_ready();
1082 4 : assert_same_ops(
1083 12 : upload_tasks.iter().map(|t| &t.op),
1084 4 : [&ops[0], &ops[2], &ops[4]],
1085 4 : );
1086 4 :
1087 4 : // layer2 completes first. None of the indexes can upload yet.
1088 4 : queue.complete(upload_tasks[2].task_id);
1089 4 : assert!(queue.schedule_ready().is_empty());
1090 :
1091 : // layer0 completes. index0 can upload. It completes.
1092 4 : queue.complete(upload_tasks[0].task_id);
1093 4 : let index_tasks = queue.schedule_ready();
1094 4 : assert_eq!(index_tasks.len(), 1);
1095 4 : assert_same_op(&index_tasks[0].op, &ops[1]);
1096 4 : queue.complete(index_tasks[0].task_id);
1097 4 :
1098 4 : // layer 1 completes. This unblocks index 1 and 2, which coalesce into
1099 4 : // a single upload for index 2.
1100 4 : queue.complete(upload_tasks[1].task_id);
1101 4 :
1102 4 : let index_tasks = queue.schedule_ready();
1103 4 : assert_eq!(index_tasks.len(), 1);
1104 4 : assert_same_op(&index_tasks[0].op, &ops[5]);
1105 4 : assert_same_ops(&index_tasks[0].coalesced_ops, &ops[3..4]);
1106 4 :
1107 4 : assert!(queue.queued_operations.is_empty());
1108 :
1109 4 : Ok(())
1110 4 : }
1111 :
1112 : /// A delete can't bypass an index upload if an index ahead of it still references it.
1113 : #[test]
1114 4 : fn schedule_index_delete_dereferenced() -> anyhow::Result<()> {
1115 4 : let mut queue = UploadQueue::Uninitialized;
1116 4 : let queue = queue.initialize_with_current_remote_index_part(&IndexPart::example(), 0)?;
1117 4 : let tli = make_timeline();
1118 4 :
1119 4 : // Create a layer to upload.
1120 4 : let layer = make_layer(
1121 4 : &tli,
1122 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1123 4 : );
1124 4 : let index_upload = index_with(&queue.clean.0, &layer);
1125 4 :
1126 4 : // Remove the layer reference in a new index, then delete the layer.
1127 4 : let index_deref = index_without(&index_upload, &layer);
1128 4 :
1129 4 : let ops = [
1130 4 : // Initial upload, with a barrier to prevent index coalescing.
1131 4 : UploadOp::UploadLayer(layer.clone(), layer.metadata(), None),
1132 4 : UploadOp::UploadMetadata {
1133 4 : uploaded: index_upload.clone(),
1134 4 : },
1135 4 : UploadOp::Barrier(tokio::sync::watch::channel(()).0),
1136 4 : // Dereference the layer and delete it.
1137 4 : UploadOp::UploadMetadata {
1138 4 : uploaded: index_deref.clone(),
1139 4 : },
1140 4 : UploadOp::Delete(Delete {
1141 4 : layers: vec![(layer.layer_desc().layer_name(), layer.metadata())],
1142 4 : }),
1143 4 : ];
1144 4 :
1145 4 : queue.queued_operations.extend(ops.clone());
1146 :
1147 : // Operations are serialized.
1148 24 : for op in ops {
1149 20 : let tasks = queue.schedule_ready();
1150 20 : assert_eq!(tasks.len(), 1);
1151 20 : assert_same_op(&tasks[0].op, &op);
1152 20 : queue.complete(tasks[0].task_id);
1153 : }
1154 4 : assert!(queue.queued_operations.is_empty());
1155 :
1156 4 : Ok(())
1157 4 : }
1158 :
1159 : /// An upload with a reused layer name doesn't clobber the previous layer. Specifically, a
1160 : /// dereference/upload/reference cycle can't allow the upload to bypass the reference.
1161 : #[test]
1162 4 : fn schedule_index_upload_dereferenced() -> anyhow::Result<()> {
1163 4 : let mut queue = UploadQueue::Uninitialized;
1164 4 : let queue = queue.initialize_with_current_remote_index_part(&IndexPart::example(), 0)?;
1165 4 : let tli = make_timeline();
1166 4 :
1167 4 : // Create a layer to upload.
1168 4 : let layer = make_layer(
1169 4 : &tli,
1170 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1171 4 : );
1172 4 :
1173 4 : // Upload the layer. Then dereference the layer, and upload/reference it again.
1174 4 : let index_upload = index_with(&queue.clean.0, &layer);
1175 4 : let index_deref = index_without(&index_upload, &layer);
1176 4 : let index_ref = index_with(&index_deref, &layer);
1177 4 :
1178 4 : let ops = [
1179 4 : // Initial upload, with a barrier to prevent index coalescing.
1180 4 : UploadOp::UploadLayer(layer.clone(), layer.metadata(), None),
1181 4 : UploadOp::UploadMetadata {
1182 4 : uploaded: index_upload.clone(),
1183 4 : },
1184 4 : UploadOp::Barrier(tokio::sync::watch::channel(()).0),
1185 4 : // Dereference the layer.
1186 4 : UploadOp::UploadMetadata {
1187 4 : uploaded: index_deref.clone(),
1188 4 : },
1189 4 : // Replace and reference the layer.
1190 4 : UploadOp::UploadLayer(layer.clone(), layer.metadata(), None),
1191 4 : UploadOp::UploadMetadata {
1192 4 : uploaded: index_ref.clone(),
1193 4 : },
1194 4 : ];
1195 4 :
1196 4 : queue.queued_operations.extend(ops.clone());
1197 :
1198 : // Operations are serialized.
1199 28 : for op in ops {
1200 24 : let tasks = queue.schedule_ready();
1201 24 : assert_eq!(tasks.len(), 1);
1202 24 : assert_same_op(&tasks[0].op, &op);
1203 24 : queue.complete(tasks[0].task_id);
1204 : }
1205 4 : assert!(queue.queued_operations.is_empty());
1206 :
1207 4 : Ok(())
1208 4 : }
1209 :
1210 : /// Nothing can bypass a shutdown, and it waits for inprogress tasks. It's never returned from
1211 : /// next_ready(), but is left at the head of the queue.
1212 : #[test]
1213 4 : fn schedule_shutdown() -> anyhow::Result<()> {
1214 4 : let mut queue = UploadQueue::Uninitialized;
1215 4 : let queue = queue.initialize_empty_remote(&TimelineMetadata::example(), 0)?;
1216 4 : let tli = make_timeline();
1217 4 :
1218 4 : let index = Box::new(queue.clean.0.clone()); // empty, doesn't matter
1219 4 : let layer0 = make_layer(
1220 4 : &tli,
1221 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1222 4 : );
1223 4 : let layer1 = make_layer(
1224 4 : &tli,
1225 4 : "100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1226 4 : );
1227 4 : let layer2 = make_layer(
1228 4 : &tli,
1229 4 : "200000000000000000000000000000000000-300000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1230 4 : );
1231 4 : let layer3 = make_layer(
1232 4 : &tli,
1233 4 : "300000000000000000000000000000000000-400000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1234 4 : );
1235 4 :
1236 4 : // Enqueue non-conflicting upload, delete, and index before and after a shutdown.
1237 4 : let ops = [
1238 4 : UploadOp::UploadLayer(layer0.clone(), layer0.metadata(), None),
1239 4 : UploadOp::Delete(Delete {
1240 4 : layers: vec![(layer1.layer_desc().layer_name(), layer1.metadata())],
1241 4 : }),
1242 4 : UploadOp::UploadMetadata {
1243 4 : uploaded: index.clone(),
1244 4 : },
1245 4 : UploadOp::Shutdown,
1246 4 : UploadOp::UploadLayer(layer2.clone(), layer2.metadata(), None),
1247 4 : UploadOp::Delete(Delete {
1248 4 : layers: vec![(layer3.layer_desc().layer_name(), layer3.metadata())],
1249 4 : }),
1250 4 : UploadOp::UploadMetadata {
1251 4 : uploaded: index.clone(),
1252 4 : },
1253 4 : ];
1254 4 :
1255 4 : queue.queued_operations.extend(ops.clone());
1256 4 :
1257 4 : // Schedule the initial operations ahead of the shutdown.
1258 4 : let tasks = queue.schedule_ready();
1259 4 :
1260 12 : assert_same_ops(tasks.iter().map(|t| &t.op), &ops[0..3]);
1261 4 : assert!(matches!(
1262 4 : queue.queued_operations.front(),
1263 : Some(&UploadOp::Shutdown)
1264 : ));
1265 :
1266 : // Complete the initial operations. The shutdown isn't triggered while they're pending.
1267 16 : for task in tasks {
1268 12 : assert!(queue.schedule_ready().is_empty());
1269 12 : queue.complete(task.task_id);
1270 : }
1271 :
1272 : // The shutdown is triggered the next time we try to pull an operation. It isn't returned,
1273 : // but is left in the queue.
1274 4 : assert!(!queue.shutdown_ready.is_closed());
1275 4 : assert!(queue.next_ready().is_none());
1276 4 : assert!(queue.shutdown_ready.is_closed());
1277 :
1278 4 : Ok(())
1279 4 : }
1280 :
1281 : /// Scheduling respects inprogress_limit.
1282 : #[test]
1283 4 : fn schedule_inprogress_limit() -> anyhow::Result<()> {
1284 4 : // Create a queue with inprogress_limit=2.
1285 4 : let mut queue = UploadQueue::Uninitialized;
1286 4 : let queue = queue.initialize_empty_remote(&TimelineMetadata::example(), 2)?;
1287 4 : let tli = make_timeline();
1288 4 :
1289 4 : // Enqueue a bunch of uploads.
1290 4 : let layer0 = make_layer(
1291 4 : &tli,
1292 4 : "000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1293 4 : );
1294 4 : let layer1 = make_layer(
1295 4 : &tli,
1296 4 : "100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1297 4 : );
1298 4 : let layer2 = make_layer(
1299 4 : &tli,
1300 4 : "200000000000000000000000000000000000-300000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1301 4 : );
1302 4 : let layer3 = make_layer(
1303 4 : &tli,
1304 4 : "300000000000000000000000000000000000-400000000000000000000000000000000000__00000000016B59D8-00000000016B5A51",
1305 4 : );
1306 4 :
1307 4 : let ops = [
1308 4 : UploadOp::UploadLayer(layer0.clone(), layer0.metadata(), None),
1309 4 : UploadOp::UploadLayer(layer1.clone(), layer1.metadata(), None),
1310 4 : UploadOp::UploadLayer(layer2.clone(), layer2.metadata(), None),
1311 4 : UploadOp::UploadLayer(layer3.clone(), layer3.metadata(), None),
1312 4 : ];
1313 4 :
1314 4 : queue.queued_operations.extend(ops.clone());
1315 4 :
1316 4 : // Schedule all ready operations. Only 2 are scheduled.
1317 4 : let tasks = queue.schedule_ready();
1318 8 : assert_same_ops(tasks.iter().map(|t| &t.op), &ops[0..2]);
1319 4 : assert!(queue.next_ready().is_none());
1320 :
1321 : // When one completes, another is scheduled.
1322 4 : queue.complete(tasks[0].task_id);
1323 4 : let tasks = queue.schedule_ready();
1324 4 : assert_same_ops(tasks.iter().map(|t| &t.op), &ops[2..3]);
1325 4 :
1326 4 : Ok(())
1327 4 : }
1328 :
1329 : /// Tests that can_bypass takes name, generation and shard index into account for all operations.
1330 : #[test]
1331 4 : fn can_bypass_path() -> anyhow::Result<()> {
1332 4 : let tli = make_timeline();
1333 4 :
1334 4 : let name0 = &"000000000000000000000000000000000000-100000000000000000000000000000000000__00000000016B59D8-00000000016B5A51";
1335 4 : let name1 = &"100000000000000000000000000000000000-200000000000000000000000000000000000__00000000016B59D8-00000000016B5A51";
1336 :
1337 : // Asserts that layers a and b either can or can't bypass each other, for all combinations
1338 : // of operations (except Delete and UploadMetadata which are special-cased).
1339 : #[track_caller]
1340 32 : fn assert_can_bypass(a: ResidentLayer, b: ResidentLayer, can_bypass: bool) {
1341 32 : let index = IndexPart::empty(TimelineMetadata::example());
1342 96 : for (a, b) in make_ops(a).into_iter().zip(make_ops(b)) {
1343 96 : match (&a, &b) {
1344 : // Deletes can always bypass each other.
1345 32 : (UploadOp::Delete(_), UploadOp::Delete(_)) => assert!(a.can_bypass(&b, &index)),
1346 : // Indexes can never bypass each other.
1347 : (UploadOp::UploadMetadata { .. }, UploadOp::UploadMetadata { .. }) => {
1348 32 : assert!(!a.can_bypass(&b, &index))
1349 : }
1350 : // For other operations, assert as requested.
1351 32 : (a, b) => assert_eq!(a.can_bypass(b, &index), can_bypass),
1352 : }
1353 : }
1354 32 : }
1355 :
1356 64 : fn make_ops(layer: ResidentLayer) -> Vec<UploadOp> {
1357 64 : let mut index = IndexPart::empty(TimelineMetadata::example());
1358 64 : index
1359 64 : .layer_metadata
1360 64 : .insert(layer.layer_desc().layer_name(), layer.metadata());
1361 64 : vec![
1362 64 : UploadOp::UploadLayer(layer.clone(), layer.metadata(), None),
1363 64 : UploadOp::Delete(Delete {
1364 64 : layers: vec![(layer.layer_desc().layer_name(), layer.metadata())],
1365 64 : }),
1366 64 : UploadOp::UploadMetadata {
1367 64 : uploaded: Box::new(index),
1368 64 : },
1369 64 : ]
1370 64 : }
1371 :
1372 : // Makes a ResidentLayer.
1373 64 : let layer = |name: &'static str, shard: Option<u8>, generation: u32| -> ResidentLayer {
1374 64 : let shard = shard
1375 64 : .map(|n| ShardIndex::new(ShardNumber(n), ShardCount(8)))
1376 64 : .unwrap_or(ShardIndex::unsharded());
1377 64 : let metadata = LayerFileMetadata {
1378 64 : shard,
1379 64 : generation: Generation::Valid(generation),
1380 64 : file_size: 0,
1381 64 : };
1382 64 : make_layer_with_metadata(&tli, name, metadata)
1383 64 : };
1384 :
1385 : // Same name and metadata can't bypass. This goes both for unsharded and sharded, as well as
1386 : // 0 or >0 generation.
1387 4 : assert_can_bypass(layer(name0, None, 0), layer(name0, None, 0), false);
1388 4 : assert_can_bypass(layer(name0, Some(0), 0), layer(name0, Some(0), 0), false);
1389 4 : assert_can_bypass(layer(name0, None, 1), layer(name0, None, 1), false);
1390 4 :
1391 4 : // Different names can bypass.
1392 4 : assert_can_bypass(layer(name0, None, 0), layer(name1, None, 0), true);
1393 4 :
1394 4 : // Different shards can bypass. Shard 0 is different from unsharded.
1395 4 : assert_can_bypass(layer(name0, Some(0), 0), layer(name0, Some(1), 0), true);
1396 4 : assert_can_bypass(layer(name0, Some(0), 0), layer(name0, None, 0), true);
1397 4 :
1398 4 : // Different generations can bypass, both sharded and unsharded.
1399 4 : assert_can_bypass(layer(name0, None, 0), layer(name0, None, 1), true);
1400 4 : assert_can_bypass(layer(name0, Some(1), 0), layer(name0, Some(1), 1), true);
1401 4 :
1402 4 : Ok(())
1403 4 : }
1404 : }
|