Line data Source code
1 : //! The timeline manager task is responsible for managing the timeline's background tasks.
2 : //! It is spawned alongside each timeline and exits when the timeline is deleted.
3 : //! It watches for changes in the timeline state and decides when to spawn or kill background tasks.
4 : //! It also can manage some reactive state, like should the timeline be active for broker pushes or not.
5 : //!
6 : //! Be aware that you need to be extra careful with manager code, because it is not respawned on panic.
7 : //! Also, if it will stuck in some branch, it will prevent any further progress in the timeline.
8 :
9 : use std::{
10 : sync::{atomic::AtomicUsize, Arc},
11 : time::Duration,
12 : };
13 :
14 : use postgres_ffi::XLogSegNo;
15 : use serde::{Deserialize, Serialize};
16 : use tokio::{
17 : task::{JoinError, JoinHandle},
18 : time::Instant,
19 : };
20 : use tracing::{debug, info, info_span, instrument, warn, Instrument};
21 : use utils::lsn::Lsn;
22 :
23 : use crate::{
24 : control_file::{FileStorage, Storage},
25 : metrics::{MANAGER_ACTIVE_CHANGES, MANAGER_ITERATIONS_TOTAL},
26 : recovery::recovery_main,
27 : remove_wal::calc_horizon_lsn,
28 : safekeeper::Term,
29 : send_wal::WalSenders,
30 : state::TimelineState,
31 : timeline::{ManagerTimeline, PeerInfo, ReadGuardSharedState, StateSK, WalResidentTimeline},
32 : timeline_guard::{AccessService, GuardId, ResidenceGuard},
33 : timelines_set::{TimelineSetGuard, TimelinesSet},
34 : wal_backup::{self, WalBackupTaskHandle},
35 : wal_backup_partial::{self, PartialRemoteSegment},
36 : SafeKeeperConf,
37 : };
38 :
39 : pub(crate) struct StateSnapshot {
40 : // inmem values
41 : pub(crate) commit_lsn: Lsn,
42 : pub(crate) backup_lsn: Lsn,
43 : pub(crate) remote_consistent_lsn: Lsn,
44 :
45 : // persistent control file values
46 : pub(crate) cfile_peer_horizon_lsn: Lsn,
47 : pub(crate) cfile_remote_consistent_lsn: Lsn,
48 : pub(crate) cfile_backup_lsn: Lsn,
49 :
50 : // latest state
51 : pub(crate) flush_lsn: Lsn,
52 : pub(crate) last_log_term: Term,
53 :
54 : // misc
55 : pub(crate) cfile_last_persist_at: std::time::Instant,
56 : pub(crate) inmem_flush_pending: bool,
57 : pub(crate) wal_removal_on_hold: bool,
58 : pub(crate) peers: Vec<PeerInfo>,
59 : }
60 :
61 : impl StateSnapshot {
62 : /// Create a new snapshot of the timeline state.
63 0 : fn new(read_guard: ReadGuardSharedState, heartbeat_timeout: Duration) -> Self {
64 0 : let state = read_guard.sk.state();
65 0 : Self {
66 0 : commit_lsn: state.inmem.commit_lsn,
67 0 : backup_lsn: state.inmem.backup_lsn,
68 0 : remote_consistent_lsn: state.inmem.remote_consistent_lsn,
69 0 : cfile_peer_horizon_lsn: state.peer_horizon_lsn,
70 0 : cfile_remote_consistent_lsn: state.remote_consistent_lsn,
71 0 : cfile_backup_lsn: state.backup_lsn,
72 0 : flush_lsn: read_guard.sk.flush_lsn(),
73 0 : last_log_term: read_guard.sk.last_log_term(),
74 0 : cfile_last_persist_at: state.pers.last_persist_at(),
75 0 : inmem_flush_pending: Self::has_unflushed_inmem_state(state),
76 0 : wal_removal_on_hold: read_guard.wal_removal_on_hold,
77 0 : peers: read_guard.get_peers(heartbeat_timeout),
78 0 : }
79 0 : }
80 :
81 0 : fn has_unflushed_inmem_state(state: &TimelineState<FileStorage>) -> bool {
82 0 : state.inmem.commit_lsn > state.commit_lsn
83 0 : || state.inmem.backup_lsn > state.backup_lsn
84 0 : || state.inmem.peer_horizon_lsn > state.peer_horizon_lsn
85 0 : || state.inmem.remote_consistent_lsn > state.remote_consistent_lsn
86 0 : }
87 : }
88 :
89 : /// Control how often the manager task should wake up to check updates.
90 : /// There is no need to check for updates more often than this.
91 : const REFRESH_INTERVAL: Duration = Duration::from_millis(300);
92 :
93 : pub enum ManagerCtlMessage {
94 : /// Request to get a guard for WalResidentTimeline, with WAL files available locally.
95 : GuardRequest(tokio::sync::oneshot::Sender<anyhow::Result<ResidenceGuard>>),
96 : /// Request to drop the guard.
97 : GuardDrop(GuardId),
98 : }
99 :
100 : impl std::fmt::Debug for ManagerCtlMessage {
101 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
102 0 : match self {
103 0 : ManagerCtlMessage::GuardRequest(_) => write!(f, "GuardRequest"),
104 0 : ManagerCtlMessage::GuardDrop(id) => write!(f, "GuardDrop({:?})", id),
105 : }
106 0 : }
107 : }
108 :
109 : pub struct ManagerCtl {
110 : manager_tx: tokio::sync::mpsc::UnboundedSender<ManagerCtlMessage>,
111 :
112 : // this is used to initialize manager, it will be moved out in bootstrap().
113 : init_manager_rx:
114 : std::sync::Mutex<Option<tokio::sync::mpsc::UnboundedReceiver<ManagerCtlMessage>>>,
115 : }
116 :
117 : impl Default for ManagerCtl {
118 0 : fn default() -> Self {
119 0 : Self::new()
120 0 : }
121 : }
122 :
123 : impl ManagerCtl {
124 0 : pub fn new() -> Self {
125 0 : let (tx, rx) = tokio::sync::mpsc::unbounded_channel();
126 0 : Self {
127 0 : manager_tx: tx,
128 0 : init_manager_rx: std::sync::Mutex::new(Some(rx)),
129 0 : }
130 0 : }
131 :
132 : /// Issue a new guard and wait for manager to prepare the timeline.
133 : /// Sends a message to the manager and waits for the response.
134 : /// Can be blocked indefinitely if the manager is stuck.
135 0 : pub async fn wal_residence_guard(&self) -> anyhow::Result<ResidenceGuard> {
136 0 : let (tx, rx) = tokio::sync::oneshot::channel();
137 0 : self.manager_tx.send(ManagerCtlMessage::GuardRequest(tx))?;
138 :
139 : // wait for the manager to respond with the guard
140 0 : rx.await
141 0 : .map_err(|e| anyhow::anyhow!("response read fail: {:?}", e))
142 0 : .and_then(std::convert::identity)
143 0 : }
144 :
145 : /// Must be called exactly once to bootstrap the manager.
146 0 : pub fn bootstrap_manager(
147 0 : &self,
148 0 : ) -> (
149 0 : tokio::sync::mpsc::UnboundedSender<ManagerCtlMessage>,
150 0 : tokio::sync::mpsc::UnboundedReceiver<ManagerCtlMessage>,
151 0 : ) {
152 0 : let rx = self
153 0 : .init_manager_rx
154 0 : .lock()
155 0 : .expect("mutex init_manager_rx poisoned")
156 0 : .take()
157 0 : .expect("manager already bootstrapped");
158 0 :
159 0 : (self.manager_tx.clone(), rx)
160 0 : }
161 : }
162 :
163 : pub(crate) struct Manager {
164 : // configuration & dependencies
165 : pub(crate) tli: ManagerTimeline,
166 : pub(crate) conf: SafeKeeperConf,
167 : pub(crate) wal_seg_size: usize,
168 : pub(crate) walsenders: Arc<WalSenders>,
169 :
170 : // current state
171 : pub(crate) state_version_rx: tokio::sync::watch::Receiver<usize>,
172 : pub(crate) num_computes_rx: tokio::sync::watch::Receiver<usize>,
173 : pub(crate) tli_broker_active: TimelineSetGuard,
174 : pub(crate) last_removed_segno: XLogSegNo,
175 : pub(crate) is_offloaded: bool,
176 :
177 : // background tasks
178 : pub(crate) backup_task: Option<WalBackupTaskHandle>,
179 : pub(crate) recovery_task: Option<JoinHandle<()>>,
180 : pub(crate) wal_removal_task: Option<JoinHandle<anyhow::Result<u64>>>,
181 :
182 : // partial backup
183 : pub(crate) partial_backup_task: Option<JoinHandle<Option<PartialRemoteSegment>>>,
184 : pub(crate) partial_backup_uploaded: Option<PartialRemoteSegment>,
185 :
186 : // misc
187 : pub(crate) access_service: AccessService,
188 : }
189 :
190 : /// This task gets spawned alongside each timeline and is responsible for managing the timeline's
191 : /// background tasks.
192 : /// Be careful, this task is not respawned on panic, so it should not panic.
193 0 : #[instrument(name = "manager", skip_all, fields(ttid = %tli.ttid))]
194 : pub async fn main_task(
195 : tli: ManagerTimeline,
196 : conf: SafeKeeperConf,
197 : broker_active_set: Arc<TimelinesSet>,
198 : manager_tx: tokio::sync::mpsc::UnboundedSender<ManagerCtlMessage>,
199 : mut manager_rx: tokio::sync::mpsc::UnboundedReceiver<ManagerCtlMessage>,
200 : ) {
201 : tli.set_status(Status::Started);
202 :
203 : let defer_tli = tli.tli.clone();
204 : scopeguard::defer! {
205 : if defer_tli.is_cancelled() {
206 : info!("manager task finished");
207 : } else {
208 : warn!("manager task finished prematurely");
209 : }
210 : };
211 :
212 : let mut mgr = Manager::new(tli, conf, broker_active_set, manager_tx).await;
213 :
214 : // Start recovery task which always runs on the timeline.
215 : if !mgr.is_offloaded && mgr.conf.peer_recovery_enabled {
216 : let tli = mgr.wal_resident_timeline();
217 : mgr.recovery_task = Some(tokio::spawn(recovery_main(tli, mgr.conf.clone())));
218 : }
219 :
220 : let last_state = 'outer: loop {
221 : MANAGER_ITERATIONS_TOTAL.inc();
222 :
223 : mgr.set_status(Status::StateSnapshot);
224 : let state_snapshot = mgr.state_snapshot().await;
225 :
226 : let mut next_event: Option<Instant> = None;
227 : if !mgr.is_offloaded {
228 : let num_computes = *mgr.num_computes_rx.borrow();
229 :
230 : mgr.set_status(Status::UpdateBackup);
231 : let is_wal_backup_required = mgr.update_backup(num_computes, &state_snapshot).await;
232 : mgr.update_is_active(is_wal_backup_required, num_computes, &state_snapshot);
233 :
234 : mgr.set_status(Status::UpdateControlFile);
235 : mgr.update_control_file_save(&state_snapshot, &mut next_event)
236 : .await;
237 :
238 : mgr.set_status(Status::UpdateWalRemoval);
239 : mgr.update_wal_removal(&state_snapshot).await;
240 :
241 : mgr.set_status(Status::UpdatePartialBackup);
242 : mgr.update_partial_backup(&state_snapshot).await;
243 :
244 : if mgr.conf.enable_offload && mgr.ready_for_eviction(&next_event, &state_snapshot) {
245 : mgr.set_status(Status::EvictTimeline);
246 : mgr.evict_timeline().await;
247 : }
248 : }
249 :
250 : mgr.set_status(Status::Wait);
251 : // wait until something changes. tx channels are stored under Arc, so they will not be
252 : // dropped until the manager task is finished.
253 : tokio::select! {
254 : _ = mgr.tli.cancel.cancelled() => {
255 : // timeline was deleted
256 : break 'outer state_snapshot;
257 : }
258 0 : _ = async {
259 0 : // don't wake up on every state change, but at most every REFRESH_INTERVAL
260 0 : tokio::time::sleep(REFRESH_INTERVAL).await;
261 0 : let _ = mgr.state_version_rx.changed().await;
262 0 : } => {
263 : // state was updated
264 : }
265 : _ = mgr.num_computes_rx.changed() => {
266 : // number of connected computes was updated
267 : }
268 : _ = sleep_until(&next_event) => {
269 : // we were waiting for some event (e.g. cfile save)
270 : }
271 : res = await_task_finish(&mut mgr.wal_removal_task) => {
272 : // WAL removal task finished
273 : mgr.wal_removal_task = None;
274 : mgr.update_wal_removal_end(res);
275 : }
276 : res = await_task_finish(&mut mgr.partial_backup_task) => {
277 : // partial backup task finished
278 : mgr.partial_backup_task = None;
279 : mgr.update_partial_backup_end(res);
280 : }
281 :
282 : msg = manager_rx.recv() => {
283 : mgr.set_status(Status::HandleMessage);
284 : mgr.handle_message(msg).await;
285 : }
286 : }
287 : };
288 : mgr.set_status(Status::Exiting);
289 :
290 : // remove timeline from the broker active set sooner, before waiting for background tasks
291 : mgr.tli_broker_active.set(false);
292 :
293 : // shutdown background tasks
294 : if mgr.conf.is_wal_backup_enabled() {
295 : wal_backup::update_task(&mut mgr, false, &last_state).await;
296 : }
297 :
298 : if let Some(recovery_task) = &mut mgr.recovery_task {
299 : if let Err(e) = recovery_task.await {
300 : warn!("recovery task failed: {:?}", e);
301 : }
302 : }
303 :
304 : if let Some(partial_backup_task) = &mut mgr.partial_backup_task {
305 : if let Err(e) = partial_backup_task.await {
306 : warn!("partial backup task failed: {:?}", e);
307 : }
308 : }
309 :
310 : if let Some(wal_removal_task) = &mut mgr.wal_removal_task {
311 : let res = wal_removal_task.await;
312 : mgr.update_wal_removal_end(res);
313 : }
314 :
315 : mgr.set_status(Status::Finished);
316 : }
317 :
318 : impl Manager {
319 0 : async fn new(
320 0 : tli: ManagerTimeline,
321 0 : conf: SafeKeeperConf,
322 0 : broker_active_set: Arc<TimelinesSet>,
323 0 : manager_tx: tokio::sync::mpsc::UnboundedSender<ManagerCtlMessage>,
324 0 : ) -> Manager {
325 0 : let (is_offloaded, partial_backup_uploaded) = tli.bootstrap_mgr().await;
326 : Manager {
327 0 : conf,
328 0 : wal_seg_size: tli.get_wal_seg_size().await,
329 0 : walsenders: tli.get_walsenders().clone(),
330 0 : state_version_rx: tli.get_state_version_rx(),
331 0 : num_computes_rx: tli.get_walreceivers().get_num_rx(),
332 0 : tli_broker_active: broker_active_set.guard(tli.clone()),
333 0 : last_removed_segno: 0,
334 0 : is_offloaded,
335 0 : backup_task: None,
336 0 : recovery_task: None,
337 0 : wal_removal_task: None,
338 0 : partial_backup_task: None,
339 0 : partial_backup_uploaded,
340 0 : access_service: AccessService::new(manager_tx),
341 0 : tli,
342 0 : }
343 0 : }
344 :
345 0 : fn set_status(&self, status: Status) {
346 0 : self.tli.set_status(status);
347 0 : }
348 :
349 : /// Get a WalResidentTimeline.
350 : /// Manager code must use this function instead of one from `Timeline`
351 : /// directly, because it will deadlock.
352 0 : pub(crate) fn wal_resident_timeline(&mut self) -> WalResidentTimeline {
353 0 : assert!(!self.is_offloaded);
354 0 : let guard = self.access_service.create_guard();
355 0 : WalResidentTimeline::new(self.tli.clone(), guard)
356 0 : }
357 :
358 : /// Get a snapshot of the timeline state.
359 0 : async fn state_snapshot(&self) -> StateSnapshot {
360 0 : StateSnapshot::new(
361 0 : self.tli.read_shared_state().await,
362 0 : self.conf.heartbeat_timeout,
363 0 : )
364 0 : }
365 :
366 : /// Spawns/kills backup task and returns true if backup is required.
367 0 : async fn update_backup(&mut self, num_computes: usize, state: &StateSnapshot) -> bool {
368 0 : let is_wal_backup_required =
369 0 : wal_backup::is_wal_backup_required(self.wal_seg_size, num_computes, state);
370 0 :
371 0 : if self.conf.is_wal_backup_enabled() {
372 0 : wal_backup::update_task(self, is_wal_backup_required, state).await;
373 0 : }
374 :
375 : // update the state in Arc<Timeline>
376 0 : self.tli.wal_backup_active.store(
377 0 : self.backup_task.is_some(),
378 0 : std::sync::atomic::Ordering::Relaxed,
379 0 : );
380 0 : is_wal_backup_required
381 0 : }
382 :
383 : /// Update is_active flag and returns its value.
384 0 : fn update_is_active(
385 0 : &mut self,
386 0 : is_wal_backup_required: bool,
387 0 : num_computes: usize,
388 0 : state: &StateSnapshot,
389 0 : ) {
390 0 : let is_active = is_wal_backup_required
391 0 : || num_computes > 0
392 0 : || state.remote_consistent_lsn < state.commit_lsn;
393 :
394 : // update the broker timeline set
395 0 : if self.tli_broker_active.set(is_active) {
396 : // write log if state has changed
397 0 : info!(
398 0 : "timeline active={} now, remote_consistent_lsn={}, commit_lsn={}",
399 : is_active, state.remote_consistent_lsn, state.commit_lsn,
400 : );
401 :
402 0 : MANAGER_ACTIVE_CHANGES.inc();
403 0 : }
404 :
405 : // update the state in Arc<Timeline>
406 0 : self.tli
407 0 : .broker_active
408 0 : .store(is_active, std::sync::atomic::Ordering::Relaxed);
409 0 : }
410 :
411 : /// Save control file if needed. Returns Instant if we should persist the control file in the future.
412 0 : async fn update_control_file_save(
413 0 : &self,
414 0 : state: &StateSnapshot,
415 0 : next_event: &mut Option<Instant>,
416 0 : ) {
417 0 : if !state.inmem_flush_pending {
418 0 : return;
419 0 : }
420 0 :
421 0 : if state.cfile_last_persist_at.elapsed() > self.conf.control_file_save_interval {
422 0 : let mut write_guard = self.tli.write_shared_state().await;
423 : // it should be done in the background because it blocks manager task, but flush() should
424 : // be fast enough not to be a problem now
425 0 : if let Err(e) = write_guard.sk.state_mut().flush().await {
426 0 : warn!("failed to save control file: {:?}", e);
427 0 : }
428 0 : } else {
429 0 : // we should wait until some time passed until the next save
430 0 : update_next_event(
431 0 : next_event,
432 0 : (state.cfile_last_persist_at + self.conf.control_file_save_interval).into(),
433 0 : );
434 0 : }
435 0 : }
436 :
437 : /// Spawns WAL removal task if needed.
438 0 : async fn update_wal_removal(&mut self, state: &StateSnapshot) {
439 0 : if self.wal_removal_task.is_some() || state.wal_removal_on_hold {
440 : // WAL removal is already in progress or hold off
441 0 : return;
442 0 : }
443 :
444 : // If enabled, we use LSN of the most lagging walsender as a WAL removal horizon.
445 : // This allows to get better read speed for pageservers that are lagging behind,
446 : // at the cost of keeping more WAL on disk.
447 0 : let replication_horizon_lsn = if self.conf.walsenders_keep_horizon {
448 0 : self.walsenders.laggard_lsn()
449 : } else {
450 0 : None
451 : };
452 :
453 0 : let removal_horizon_lsn = calc_horizon_lsn(state, replication_horizon_lsn);
454 0 : let removal_horizon_segno = removal_horizon_lsn
455 0 : .segment_number(self.wal_seg_size)
456 0 : .saturating_sub(1);
457 0 :
458 0 : if removal_horizon_segno > self.last_removed_segno {
459 : // we need to remove WAL
460 0 : let remover = match self.tli.read_shared_state().await.sk {
461 0 : StateSK::Loaded(ref sk) => {
462 0 : crate::wal_storage::Storage::remove_up_to(&sk.wal_store, removal_horizon_segno)
463 : }
464 : StateSK::Offloaded(_) => {
465 : // we can't remove WAL if it's not loaded
466 0 : warn!("unexpectedly trying to run WAL removal on offloaded timeline");
467 0 : return;
468 : }
469 0 : StateSK::Empty => unreachable!(),
470 : };
471 :
472 0 : self.wal_removal_task = Some(tokio::spawn(
473 0 : async move {
474 0 : remover.await?;
475 0 : Ok(removal_horizon_segno)
476 0 : }
477 0 : .instrument(info_span!("WAL removal", ttid=%self.tli.ttid)),
478 : ));
479 0 : }
480 0 : }
481 :
482 : /// Update the state after WAL removal task finished.
483 0 : fn update_wal_removal_end(&mut self, res: Result<anyhow::Result<u64>, JoinError>) {
484 0 : let new_last_removed_segno = match res {
485 0 : Ok(Ok(segno)) => segno,
486 0 : Err(e) => {
487 0 : warn!("WAL removal task failed: {:?}", e);
488 0 : return;
489 : }
490 0 : Ok(Err(e)) => {
491 0 : warn!("WAL removal task failed: {:?}", e);
492 0 : return;
493 : }
494 : };
495 :
496 0 : self.last_removed_segno = new_last_removed_segno;
497 0 : // update the state in Arc<Timeline>
498 0 : self.tli
499 0 : .last_removed_segno
500 0 : .store(new_last_removed_segno, std::sync::atomic::Ordering::Relaxed);
501 0 : }
502 :
503 : /// Spawns partial WAL backup task if needed.
504 0 : async fn update_partial_backup(&mut self, state: &StateSnapshot) {
505 0 : // check if partial backup is enabled and should be started
506 0 : if !self.conf.is_wal_backup_enabled() || !self.conf.partial_backup_enabled {
507 0 : return;
508 0 : }
509 0 :
510 0 : if self.partial_backup_task.is_some() {
511 : // partial backup is already running
512 0 : return;
513 0 : }
514 0 :
515 0 : if !wal_backup_partial::needs_uploading(state, &self.partial_backup_uploaded) {
516 : // nothing to upload
517 0 : return;
518 0 : }
519 0 :
520 0 : // Get WalResidentTimeline and start partial backup task.
521 0 : self.partial_backup_task = Some(tokio::spawn(wal_backup_partial::main_task(
522 0 : self.wal_resident_timeline(),
523 0 : self.conf.clone(),
524 0 : )));
525 0 : }
526 :
527 : /// Update the state after partial WAL backup task finished.
528 0 : fn update_partial_backup_end(&mut self, res: Result<Option<PartialRemoteSegment>, JoinError>) {
529 0 : match res {
530 0 : Ok(new_upload_state) => {
531 0 : self.partial_backup_uploaded = new_upload_state;
532 0 : }
533 0 : Err(e) => {
534 0 : warn!("partial backup task panicked: {:?}", e);
535 : }
536 : }
537 0 : }
538 :
539 : /// Handle message arrived from ManagerCtl.
540 0 : async fn handle_message(&mut self, msg: Option<ManagerCtlMessage>) {
541 0 : debug!("received manager message: {:?}", msg);
542 0 : match msg {
543 0 : Some(ManagerCtlMessage::GuardRequest(tx)) => {
544 0 : if self.is_offloaded {
545 : // trying to unevict timeline, but without gurarantee that it will be successful
546 0 : self.unevict_timeline().await;
547 0 : }
548 :
549 0 : let guard = if self.is_offloaded {
550 0 : Err(anyhow::anyhow!("timeline is offloaded, can't get a guard"))
551 : } else {
552 0 : Ok(self.access_service.create_guard())
553 : };
554 :
555 0 : if tx.send(guard).is_err() {
556 0 : warn!("failed to reply with a guard, receiver dropped");
557 0 : }
558 : }
559 0 : Some(ManagerCtlMessage::GuardDrop(guard_id)) => {
560 0 : self.access_service.drop_guard(guard_id);
561 0 : }
562 : None => {
563 : // can't happen, we're holding the sender
564 0 : unreachable!();
565 : }
566 : }
567 0 : }
568 : }
569 :
570 : // utility functions
571 0 : async fn sleep_until(option: &Option<tokio::time::Instant>) {
572 0 : if let Some(timeout) = option {
573 0 : tokio::time::sleep_until(*timeout).await;
574 : } else {
575 0 : futures::future::pending::<()>().await;
576 : }
577 0 : }
578 :
579 0 : async fn await_task_finish<T>(option: &mut Option<JoinHandle<T>>) -> Result<T, JoinError> {
580 0 : if let Some(task) = option {
581 0 : task.await
582 : } else {
583 0 : futures::future::pending().await
584 : }
585 0 : }
586 :
587 : /// Update next_event if candidate is earlier.
588 0 : fn update_next_event(next_event: &mut Option<Instant>, candidate: Instant) {
589 0 : if let Some(next) = next_event {
590 0 : if candidate < *next {
591 0 : *next = candidate;
592 0 : }
593 0 : } else {
594 0 : *next_event = Some(candidate);
595 0 : }
596 0 : }
597 :
598 : #[repr(usize)]
599 0 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
600 : pub enum Status {
601 : NotStarted,
602 : Started,
603 : StateSnapshot,
604 : UpdateBackup,
605 : UpdateControlFile,
606 : UpdateWalRemoval,
607 : UpdatePartialBackup,
608 : EvictTimeline,
609 : Wait,
610 : HandleMessage,
611 : Exiting,
612 : Finished,
613 : }
614 :
615 : /// AtomicStatus is a wrapper around AtomicUsize adapted for the Status enum.
616 : pub struct AtomicStatus {
617 : inner: AtomicUsize,
618 : }
619 :
620 : impl Default for AtomicStatus {
621 0 : fn default() -> Self {
622 0 : Self::new()
623 0 : }
624 : }
625 :
626 : impl AtomicStatus {
627 0 : pub fn new() -> Self {
628 0 : AtomicStatus {
629 0 : inner: AtomicUsize::new(Status::NotStarted as usize),
630 0 : }
631 0 : }
632 :
633 0 : pub fn load(&self, order: std::sync::atomic::Ordering) -> Status {
634 0 : // Safety: This line of code uses `std::mem::transmute` to reinterpret the loaded value as `Status`.
635 0 : // It is safe to use `transmute` in this context because `Status` is a repr(usize) enum,
636 0 : // which means it has the same memory layout as usize.
637 0 : // However, it is important to ensure that the loaded value is a valid variant of `Status`,
638 0 : // otherwise, the behavior will be undefined.
639 0 : unsafe { std::mem::transmute(self.inner.load(order)) }
640 0 : }
641 :
642 0 : pub fn get(&self) -> Status {
643 0 : self.load(std::sync::atomic::Ordering::Relaxed)
644 0 : }
645 :
646 0 : pub fn store(&self, val: Status, order: std::sync::atomic::Ordering) {
647 0 : self.inner.store(val as usize, order);
648 0 : }
649 : }
|