Line data Source code
1 : use std::{
2 : collections::HashMap,
3 : str::FromStr,
4 : sync::{Arc, atomic::AtomicU64},
5 : time::Duration,
6 : };
7 :
8 : use clashmap::{ClashMap, Entry};
9 : use safekeeper_api::models::PullTimelineRequest;
10 : use safekeeper_client::mgmt_api;
11 : use tokio::sync::{
12 : Semaphore,
13 : mpsc::{self, UnboundedReceiver, UnboundedSender},
14 : };
15 : use tokio_util::sync::CancellationToken;
16 : use tracing::Instrument;
17 : use utils::{
18 : id::{NodeId, TenantId, TimelineId},
19 : logging::SecretString,
20 : };
21 :
22 : use crate::{
23 : persistence::SafekeeperTimelineOpKind, safekeeper::Safekeeper,
24 : safekeeper_client::SafekeeperClient,
25 : };
26 :
27 : use super::Service;
28 :
29 : pub(crate) struct SafekeeperReconcilers {
30 : cancel: CancellationToken,
31 : reconcilers: HashMap<NodeId, ReconcilerHandle>,
32 : }
33 :
34 : impl SafekeeperReconcilers {
35 0 : pub fn new(cancel: CancellationToken) -> Self {
36 0 : SafekeeperReconcilers {
37 0 : cancel,
38 0 : reconcilers: HashMap::new(),
39 0 : }
40 0 : }
41 : /// Adds a safekeeper-specific reconciler.
42 : /// Can be called multiple times, but it needs to be called at least once
43 : /// for every new safekeeper added.
44 0 : pub(crate) fn start_reconciler(&mut self, node_id: NodeId, service: &Arc<Service>) {
45 0 : self.reconcilers.entry(node_id).or_insert_with(|| {
46 0 : SafekeeperReconciler::spawn(self.cancel.child_token(), service.clone())
47 0 : });
48 0 : }
49 : /// Stop a safekeeper-specific reconciler.
50 : /// Stops the reconciler, cancelling all ongoing tasks.
51 0 : pub(crate) fn stop_reconciler(&mut self, node_id: NodeId) {
52 0 : if let Some(handle) = self.reconcilers.remove(&node_id) {
53 0 : handle.cancel.cancel();
54 0 : }
55 0 : }
56 0 : pub(crate) fn schedule_request_vec(&self, reqs: Vec<ScheduleRequest>) {
57 0 : tracing::info!(
58 0 : "Scheduling {} pending safekeeper ops loaded from db",
59 0 : reqs.len()
60 : );
61 0 : for req in reqs {
62 0 : self.schedule_request(req);
63 0 : }
64 0 : }
65 0 : pub(crate) fn schedule_request(&self, req: ScheduleRequest) {
66 0 : let node_id = req.safekeeper.get_id();
67 0 : let reconciler_handle = self.reconcilers.get(&node_id).unwrap();
68 0 : reconciler_handle.schedule_reconcile(req);
69 0 : }
70 : /// Cancel ongoing reconciles for the given timeline
71 : ///
72 : /// Specifying `None` here only removes reconciles for the tenant-global reconciliation,
73 : /// instead of doing this for all timelines of the tenant.
74 : ///
75 : /// Callers must remove the reconciles from the db manually
76 0 : pub(crate) fn cancel_reconciles_for_timeline(
77 0 : &mut self,
78 0 : node_id: NodeId,
79 0 : tenant_id: TenantId,
80 0 : timeline_id: Option<TimelineId>,
81 0 : ) {
82 0 : if let Some(handle) = self.reconcilers.get(&node_id) {
83 0 : handle.cancel_reconciliation(tenant_id, timeline_id);
84 0 : }
85 0 : }
86 : }
87 :
88 : /// Initial load of the pending operations from the db
89 0 : pub(crate) async fn load_schedule_requests(
90 0 : service: &Arc<Service>,
91 0 : safekeepers: &HashMap<NodeId, Safekeeper>,
92 0 : ) -> anyhow::Result<Vec<ScheduleRequest>> {
93 0 : let pending_ops_timelines = service
94 0 : .persistence
95 0 : .list_pending_ops_with_timelines()
96 0 : .await?;
97 0 : let mut res = Vec::with_capacity(pending_ops_timelines.len());
98 0 : for (op_persist, timeline_persist) in pending_ops_timelines {
99 0 : let node_id = NodeId(op_persist.sk_id as u64);
100 0 : let Some(sk) = safekeepers.get(&node_id) else {
101 : // This shouldn't happen, at least the safekeeper should exist as decomissioned.
102 0 : tracing::warn!(
103 : tenant_id = op_persist.tenant_id,
104 : timeline_id = op_persist.timeline_id,
105 0 : "couldn't find safekeeper with pending op id {node_id} in list of stored safekeepers"
106 : );
107 0 : continue;
108 : };
109 0 : let sk = Box::new(sk.clone());
110 0 : let tenant_id = TenantId::from_str(&op_persist.tenant_id)?;
111 0 : let timeline_id = if !op_persist.timeline_id.is_empty() {
112 0 : Some(TimelineId::from_str(&op_persist.timeline_id)?)
113 : } else {
114 0 : None
115 : };
116 0 : let host_list = match op_persist.op_kind {
117 0 : SafekeeperTimelineOpKind::Delete => Vec::new(),
118 0 : SafekeeperTimelineOpKind::Exclude => Vec::new(),
119 : SafekeeperTimelineOpKind::Pull => {
120 0 : if timeline_id.is_none() {
121 : // We only do this extra check (outside of timeline_persist check) to give better error msgs
122 0 : anyhow::bail!(
123 0 : "timeline_id is empty for `pull` schedule request for {tenant_id}"
124 0 : );
125 0 : };
126 0 : let Some(timeline_persist) = timeline_persist else {
127 : // This shouldn't happen, the timeline should still exist
128 0 : tracing::warn!(
129 : tenant_id = op_persist.tenant_id,
130 : timeline_id = op_persist.timeline_id,
131 0 : "couldn't find timeline for corresponding pull op"
132 : );
133 0 : continue;
134 : };
135 0 : timeline_persist
136 0 : .sk_set
137 0 : .iter()
138 0 : .filter_map(|sk_id| {
139 0 : let other_node_id = NodeId(*sk_id as u64);
140 0 : if node_id == other_node_id {
141 : // We obviously don't want to pull from ourselves
142 0 : return None;
143 0 : }
144 0 : let Some(sk) = safekeepers.get(&other_node_id) else {
145 0 : tracing::warn!(
146 0 : "couldnt find safekeeper with pending op id {other_node_id}, not pulling from it"
147 : );
148 0 : return None;
149 : };
150 0 : Some((other_node_id, sk.base_url()))
151 0 : })
152 0 : .collect::<Vec<_>>()
153 : }
154 : };
155 0 : let req = ScheduleRequest {
156 0 : safekeeper: sk,
157 0 : host_list,
158 0 : tenant_id,
159 0 : timeline_id,
160 0 : generation: op_persist.generation as u32,
161 0 : kind: op_persist.op_kind,
162 0 : };
163 0 : res.push(req);
164 : }
165 0 : Ok(res)
166 0 : }
167 :
168 : pub(crate) struct ScheduleRequest {
169 : pub(crate) safekeeper: Box<Safekeeper>,
170 : pub(crate) host_list: Vec<(NodeId, String)>,
171 : pub(crate) tenant_id: TenantId,
172 : pub(crate) timeline_id: Option<TimelineId>,
173 : pub(crate) generation: u32,
174 : pub(crate) kind: SafekeeperTimelineOpKind,
175 : }
176 :
177 : /// A way to keep ongoing/queued reconcile requests apart
178 : #[derive(Copy, Clone, PartialEq, Eq)]
179 : struct TokenId(u64);
180 :
181 : type OngoingTokens = ClashMap<(TenantId, Option<TimelineId>), (CancellationToken, TokenId)>;
182 :
183 : /// Handle to per safekeeper reconciler.
184 : struct ReconcilerHandle {
185 : tx: UnboundedSender<(ScheduleRequest, CancellationToken, TokenId)>,
186 : ongoing_tokens: Arc<OngoingTokens>,
187 : token_id_counter: AtomicU64,
188 : cancel: CancellationToken,
189 : }
190 :
191 : impl ReconcilerHandle {
192 : /// Obtain a new token slot, cancelling any existing reconciliations for
193 : /// that timeline. It is not useful to have >1 operation per <tenant_id,
194 : /// timeline_id, safekeeper>, hence scheduling op cancels current one if it
195 : /// exists.
196 0 : fn new_token_slot(
197 0 : &self,
198 0 : tenant_id: TenantId,
199 0 : timeline_id: Option<TimelineId>,
200 0 : ) -> (CancellationToken, TokenId) {
201 0 : let token_id = self
202 0 : .token_id_counter
203 0 : .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
204 0 : let token_id = TokenId(token_id);
205 0 : let entry = self.ongoing_tokens.entry((tenant_id, timeline_id));
206 0 : if let Entry::Occupied(entry) = &entry {
207 0 : let (cancel, _) = entry.get();
208 0 : cancel.cancel();
209 0 : }
210 0 : entry.insert((self.cancel.child_token(), token_id)).clone()
211 0 : }
212 : /// Cancel an ongoing reconciliation
213 0 : fn cancel_reconciliation(&self, tenant_id: TenantId, timeline_id: Option<TimelineId>) {
214 0 : if let Some((_, (cancel, _id))) = self.ongoing_tokens.remove(&(tenant_id, timeline_id)) {
215 0 : cancel.cancel();
216 0 : }
217 0 : }
218 0 : fn schedule_reconcile(&self, req: ScheduleRequest) {
219 0 : let (cancel, token_id) = self.new_token_slot(req.tenant_id, req.timeline_id);
220 0 : let hostname = req.safekeeper.skp.host.clone();
221 0 : if let Err(err) = self.tx.send((req, cancel, token_id)) {
222 0 : tracing::info!("scheduling request onto {hostname} returned error: {err}");
223 0 : }
224 0 : }
225 : }
226 :
227 : pub(crate) struct SafekeeperReconciler {
228 : inner: SafekeeperReconcilerInner,
229 : concurrency_limiter: Arc<Semaphore>,
230 : rx: UnboundedReceiver<(ScheduleRequest, CancellationToken, TokenId)>,
231 : cancel: CancellationToken,
232 : }
233 :
234 : /// Thin wrapper over `Service` to not clutter its inherent functions
235 : #[derive(Clone)]
236 : struct SafekeeperReconcilerInner {
237 : ongoing_tokens: Arc<OngoingTokens>,
238 : service: Arc<Service>,
239 : }
240 :
241 : impl SafekeeperReconciler {
242 0 : fn spawn(cancel: CancellationToken, service: Arc<Service>) -> ReconcilerHandle {
243 0 : // We hold the ServiceInner lock so we don't want to make sending to the reconciler channel to be blocking.
244 0 : let (tx, rx) = mpsc::unbounded_channel();
245 0 : let concurrency = service.config.safekeeper_reconciler_concurrency;
246 0 : let ongoing_tokens = Arc::new(ClashMap::new());
247 0 : let mut reconciler = SafekeeperReconciler {
248 0 : inner: SafekeeperReconcilerInner {
249 0 : service,
250 0 : ongoing_tokens: ongoing_tokens.clone(),
251 0 : },
252 0 : rx,
253 0 : concurrency_limiter: Arc::new(Semaphore::new(concurrency)),
254 0 : cancel: cancel.clone(),
255 0 : };
256 0 : let handle = ReconcilerHandle {
257 0 : tx,
258 0 : ongoing_tokens,
259 0 : token_id_counter: AtomicU64::new(0),
260 0 : cancel,
261 0 : };
262 0 : tokio::spawn(async move { reconciler.run().await });
263 0 : handle
264 0 : }
265 0 : async fn run(&mut self) {
266 : loop {
267 0 : let req = tokio::select! {
268 0 : req = self.rx.recv() => req,
269 0 : _ = self.cancel.cancelled() => break,
270 : };
271 0 : let Some((req, req_cancel, req_token_id)) = req else {
272 0 : break;
273 : };
274 :
275 0 : let permit_res = tokio::select! {
276 0 : req = self.concurrency_limiter.clone().acquire_owned() => req,
277 0 : _ = self.cancel.cancelled() => break,
278 : };
279 0 : let Ok(_permit) = permit_res else { return };
280 :
281 0 : let inner = self.inner.clone();
282 0 : if req_cancel.is_cancelled() {
283 0 : continue;
284 0 : }
285 0 :
286 0 : tokio::task::spawn(async move {
287 0 : let kind = req.kind;
288 0 : let tenant_id = req.tenant_id;
289 0 : let timeline_id = req.timeline_id;
290 0 : let node_id = req.safekeeper.skp.id;
291 0 : inner
292 0 : .reconcile_one(req, req_cancel, req_token_id)
293 0 : .instrument(tracing::info_span!(
294 0 : "reconcile_one",
295 0 : ?kind,
296 0 : %tenant_id,
297 0 : ?timeline_id,
298 0 : %node_id,
299 0 : ))
300 0 : .await;
301 0 : });
302 : }
303 0 : }
304 : }
305 :
306 : impl SafekeeperReconcilerInner {
307 0 : async fn reconcile_one(
308 0 : &self,
309 0 : req: ScheduleRequest,
310 0 : req_cancel: CancellationToken,
311 0 : req_token_id: TokenId,
312 0 : ) {
313 0 : let req_host = req.safekeeper.skp.host.clone();
314 0 : let success;
315 0 : match req.kind {
316 : SafekeeperTimelineOpKind::Pull => {
317 0 : let Some(timeline_id) = req.timeline_id else {
318 0 : tracing::warn!(
319 0 : "ignoring invalid schedule request: timeline_id is empty for `pull`"
320 : );
321 0 : return;
322 : };
323 0 : let our_id = req.safekeeper.get_id();
324 0 : let http_hosts = req
325 0 : .host_list
326 0 : .iter()
327 0 : .filter(|(node_id, _hostname)| *node_id != our_id)
328 0 : .map(|(_, hostname)| hostname.clone())
329 0 : .collect::<Vec<_>>();
330 0 : let pull_req = PullTimelineRequest {
331 0 : http_hosts,
332 0 : tenant_id: req.tenant_id,
333 0 : timeline_id,
334 0 : ignore_tombstone: Some(false),
335 0 : };
336 0 : success = self
337 0 : .reconcile_inner(
338 0 : &req,
339 0 : async |client| client.pull_timeline(&pull_req).await,
340 0 : |resp| {
341 0 : if let Some(host) = resp.safekeeper_host {
342 0 : tracing::info!("pulled timeline from {host} onto {req_host}");
343 : } else {
344 0 : tracing::info!(
345 0 : "timeline already present on safekeeper on {req_host}"
346 : );
347 : }
348 0 : },
349 0 : req_cancel,
350 0 : )
351 0 : .await;
352 : }
353 : SafekeeperTimelineOpKind::Exclude => {
354 : // TODO actually exclude instead of delete here
355 0 : let tenant_id = req.tenant_id;
356 0 : let Some(timeline_id) = req.timeline_id else {
357 0 : tracing::warn!(
358 0 : "ignoring invalid schedule request: timeline_id is empty for `exclude`"
359 : );
360 0 : return;
361 : };
362 0 : success = self
363 0 : .reconcile_inner(
364 0 : &req,
365 0 : async |client| client.delete_timeline(tenant_id, timeline_id).await,
366 0 : |_resp| {
367 0 : tracing::info!("deleted timeline from {req_host}");
368 0 : },
369 0 : req_cancel,
370 0 : )
371 0 : .await;
372 : }
373 : SafekeeperTimelineOpKind::Delete => {
374 0 : let tenant_id = req.tenant_id;
375 0 : if let Some(timeline_id) = req.timeline_id {
376 0 : success = self
377 0 : .reconcile_inner(
378 0 : &req,
379 0 : async |client| client.delete_timeline(tenant_id, timeline_id).await,
380 0 : |_resp| {
381 0 : tracing::info!("deleted timeline from {req_host}");
382 0 : },
383 0 : req_cancel,
384 0 : )
385 0 : .await;
386 0 : if success {
387 0 : self.delete_timeline_from_db(tenant_id, timeline_id).await;
388 0 : }
389 : } else {
390 0 : success = self
391 0 : .reconcile_inner(
392 0 : &req,
393 0 : async |client| client.delete_tenant(tenant_id).await,
394 0 : |_resp| {
395 0 : tracing::info!(%tenant_id, "deleted tenant from {req_host}");
396 0 : },
397 0 : req_cancel,
398 0 : )
399 0 : .await;
400 0 : if success {
401 0 : self.delete_tenant_timelines_from_db(tenant_id).await;
402 0 : }
403 : }
404 : }
405 : }
406 0 : if success {
407 0 : self.ongoing_tokens.remove_if(
408 0 : &(req.tenant_id, req.timeline_id),
409 0 : |_ttid, (_cancel, token_id)| {
410 0 : // Ensure that this request is indeed the request we just finished and not a new one
411 0 : req_token_id == *token_id
412 0 : },
413 0 : );
414 0 : }
415 0 : }
416 0 : async fn delete_timeline_from_db(&self, tenant_id: TenantId, timeline_id: TimelineId) {
417 0 : match self
418 0 : .service
419 0 : .persistence
420 0 : .list_pending_ops_for_timeline(tenant_id, timeline_id)
421 0 : .await
422 : {
423 0 : Ok(list) => {
424 0 : if !list.is_empty() {
425 : // duplicate the timeline_id here because it might be None in the reconcile context
426 0 : tracing::info!(%timeline_id, "not deleting timeline from db as there is {} open reconciles", list.len());
427 0 : return;
428 0 : }
429 : }
430 0 : Err(e) => {
431 0 : tracing::warn!(%timeline_id, "couldn't query pending ops: {e}");
432 0 : return;
433 : }
434 : }
435 0 : tracing::info!(%tenant_id, %timeline_id, "deleting timeline from db after all reconciles succeeded");
436 : // In theory we could crash right after deleting the op from the db and right before reaching this,
437 : // but then we'll boot up with a timeline that has deleted_at set, so hopefully we'll issue deletion ops for it again.
438 0 : if let Err(err) = self
439 0 : .service
440 0 : .persistence
441 0 : .delete_timeline(tenant_id, timeline_id)
442 0 : .await
443 : {
444 0 : tracing::warn!(%tenant_id, %timeline_id, "couldn't delete timeline from db: {err}");
445 0 : }
446 0 : }
447 0 : async fn delete_tenant_timelines_from_db(&self, tenant_id: TenantId) {
448 0 : let timeline_list = match self
449 0 : .service
450 0 : .persistence
451 0 : .list_timelines_for_tenant(tenant_id)
452 0 : .await
453 : {
454 0 : Ok(timeline_list) => timeline_list,
455 0 : Err(e) => {
456 0 : tracing::warn!(%tenant_id, "couldn't query timelines: {e}");
457 0 : return;
458 : }
459 : };
460 0 : for timeline in timeline_list {
461 0 : let Ok(timeline_id) = TimelineId::from_str(&timeline.timeline_id) else {
462 0 : tracing::warn!("Invalid timeline ID in database {}", timeline.timeline_id);
463 0 : continue;
464 : };
465 0 : self.delete_timeline_from_db(tenant_id, timeline_id).await;
466 : }
467 0 : }
468 : /// Returns whether the reconciliation happened successfully (or we got cancelled)
469 0 : async fn reconcile_inner<T, F, U>(
470 0 : &self,
471 0 : req: &ScheduleRequest,
472 0 : closure: impl Fn(SafekeeperClient) -> F,
473 0 : log_success: impl FnOnce(T) -> U,
474 0 : req_cancel: CancellationToken,
475 0 : ) -> bool
476 0 : where
477 0 : F: Future<Output = Result<T, safekeeper_client::mgmt_api::Error>>,
478 0 : {
479 0 : let jwt = self
480 0 : .service
481 0 : .config
482 0 : .safekeeper_jwt_token
483 0 : .clone()
484 0 : .map(SecretString::from);
485 : loop {
486 0 : let res = req
487 0 : .safekeeper
488 0 : .with_client_retries(
489 0 : |client| {
490 0 : let closure = &closure;
491 0 : async move { closure(client).await }
492 0 : },
493 0 : self.service.get_http_client(),
494 0 : &jwt,
495 0 : 3,
496 0 : 10,
497 0 : Duration::from_secs(10),
498 0 : &req_cancel,
499 0 : )
500 0 : .await;
501 0 : match res {
502 0 : Ok(resp) => {
503 0 : log_success(resp);
504 0 : let res = self
505 0 : .service
506 0 : .persistence
507 0 : .remove_pending_op(
508 0 : req.tenant_id,
509 0 : req.timeline_id,
510 0 : req.safekeeper.get_id(),
511 0 : req.generation,
512 0 : )
513 0 : .await;
514 0 : if let Err(err) = res {
515 0 : tracing::info!(
516 0 : "couldn't remove reconciliation request onto {} from persistence: {err:?}",
517 : req.safekeeper.skp.host
518 : );
519 0 : }
520 0 : return true;
521 : }
522 : Err(mgmt_api::Error::Cancelled) => {
523 : // On cancellation, the code that issued it will take care of removing db entries (if needed)
524 0 : return false;
525 : }
526 0 : Err(e) => {
527 0 : tracing::info!(
528 0 : "Reconcile attempt for safekeeper {} failed, retrying after sleep: {e:?}",
529 : req.safekeeper.skp.host
530 : );
531 : const SLEEP_TIME: Duration = Duration::from_secs(1);
532 0 : tokio::time::sleep(SLEEP_TIME).await;
533 : }
534 : }
535 : }
536 0 : }
537 : }
|