Line data Source code
1 : use std::sync::Arc;
2 : use std::{collections::HashMap, time::Duration};
3 :
4 : use control_plane::endpoint::{ComputeControlPlane, EndpointStatus};
5 : use control_plane::local_env::LocalEnv;
6 : use futures::StreamExt;
7 : use hyper::StatusCode;
8 : use pageserver_api::shard::{ShardCount, ShardNumber, ShardStripeSize, TenantShardId};
9 : use postgres_connection::parse_host_port;
10 : use serde::{Deserialize, Serialize};
11 : use tokio_util::sync::CancellationToken;
12 : use tracing::{info_span, Instrument};
13 : use utils::{
14 : backoff::{self},
15 : id::{NodeId, TenantId},
16 : };
17 :
18 : use crate::service::Config;
19 :
20 : const SLOWDOWN_DELAY: Duration = Duration::from_secs(5);
21 :
22 : const NOTIFY_REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
23 :
24 : pub(crate) const API_CONCURRENCY: usize = 32;
25 :
26 : struct UnshardedComputeHookTenant {
27 : // Which node is this tenant attached to
28 : node_id: NodeId,
29 :
30 : // Must hold this lock to send a notification.
31 : send_lock: Arc<tokio::sync::Mutex<Option<ComputeHookNotifyRequest>>>,
32 : }
33 : struct ShardedComputeHookTenant {
34 : stripe_size: ShardStripeSize,
35 : shard_count: ShardCount,
36 : shards: Vec<(ShardNumber, NodeId)>,
37 :
38 : // Must hold this lock to send a notification. The contents represent
39 : // the last successfully sent notification, and are used to coalesce multiple
40 : // updates by only sending when there is a chance since our last successful send.
41 : send_lock: Arc<tokio::sync::Mutex<Option<ComputeHookNotifyRequest>>>,
42 : }
43 :
44 : enum ComputeHookTenant {
45 : Unsharded(UnshardedComputeHookTenant),
46 : Sharded(ShardedComputeHookTenant),
47 : }
48 :
49 : impl ComputeHookTenant {
50 : /// Construct with at least one shard's information
51 2 : fn new(tenant_shard_id: TenantShardId, stripe_size: ShardStripeSize, node_id: NodeId) -> Self {
52 2 : if tenant_shard_id.shard_count.count() > 1 {
53 1 : Self::Sharded(ShardedComputeHookTenant {
54 1 : shards: vec![(tenant_shard_id.shard_number, node_id)],
55 1 : stripe_size,
56 1 : shard_count: tenant_shard_id.shard_count,
57 1 : send_lock: Arc::default(),
58 1 : })
59 : } else {
60 1 : Self::Unsharded(UnshardedComputeHookTenant {
61 1 : node_id,
62 1 : send_lock: Arc::default(),
63 1 : })
64 : }
65 2 : }
66 :
67 4 : fn get_send_lock(&self) -> &Arc<tokio::sync::Mutex<Option<ComputeHookNotifyRequest>>> {
68 4 : match self {
69 2 : Self::Unsharded(unsharded_tenant) => &unsharded_tenant.send_lock,
70 2 : Self::Sharded(sharded_tenant) => &sharded_tenant.send_lock,
71 : }
72 4 : }
73 :
74 : /// Set one shard's location. If stripe size or shard count have changed, Self is reset
75 : /// and drops existing content.
76 2 : fn update(
77 2 : &mut self,
78 2 : tenant_shard_id: TenantShardId,
79 2 : stripe_size: ShardStripeSize,
80 2 : node_id: NodeId,
81 2 : ) {
82 1 : match self {
83 1 : Self::Unsharded(unsharded_tenant) if tenant_shard_id.shard_count.count() == 1 => {
84 0 : unsharded_tenant.node_id = node_id
85 : }
86 1 : Self::Sharded(sharded_tenant)
87 1 : if sharded_tenant.stripe_size == stripe_size
88 1 : && sharded_tenant.shard_count == tenant_shard_id.shard_count =>
89 : {
90 1 : if let Some(existing) = sharded_tenant
91 1 : .shards
92 1 : .iter()
93 1 : .position(|s| s.0 == tenant_shard_id.shard_number)
94 0 : {
95 0 : sharded_tenant.shards.get_mut(existing).unwrap().1 = node_id;
96 0 : } else {
97 1 : sharded_tenant
98 1 : .shards
99 1 : .push((tenant_shard_id.shard_number, node_id));
100 2 : sharded_tenant.shards.sort_by_key(|s| s.0)
101 : }
102 : }
103 1 : _ => {
104 1 : // Shard count changed: reset struct.
105 1 : *self = Self::new(tenant_shard_id, stripe_size, node_id);
106 1 : }
107 : }
108 2 : }
109 : }
110 :
111 0 : #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
112 : struct ComputeHookNotifyRequestShard {
113 : node_id: NodeId,
114 : shard_number: ShardNumber,
115 : }
116 :
117 : /// Request body that we send to the control plane to notify it of where a tenant is attached
118 0 : #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
119 : struct ComputeHookNotifyRequest {
120 : tenant_id: TenantId,
121 : stripe_size: Option<ShardStripeSize>,
122 : shards: Vec<ComputeHookNotifyRequestShard>,
123 : }
124 :
125 : /// Error type for attempts to call into the control plane compute notification hook
126 0 : #[derive(thiserror::Error, Debug)]
127 : pub(crate) enum NotifyError {
128 : // Request was not send successfully, e.g. transport error
129 : #[error("Sending request: {0}")]
130 : Request(#[from] reqwest::Error),
131 : // Request could not be serviced right now due to ongoing Operation in control plane, but should be possible soon.
132 : #[error("Control plane tenant busy")]
133 : Busy,
134 : // Explicit 429 response asking us to retry less frequently
135 : #[error("Control plane overloaded")]
136 : SlowDown,
137 : // A 503 response indicates the control plane can't handle the request right now
138 : #[error("Control plane unavailable (status {0})")]
139 : Unavailable(StatusCode),
140 : // API returned unexpected non-success status. We will retry, but log a warning.
141 : #[error("Control plane returned unexpected status {0}")]
142 : Unexpected(StatusCode),
143 : // We shutdown while sending
144 : #[error("Shutting down")]
145 : ShuttingDown,
146 : // A response indicates we will never succeed, such as 400 or 404
147 : #[error("Non-retryable error {0}")]
148 : Fatal(StatusCode),
149 :
150 : #[error("neon_local error: {0}")]
151 : NeonLocal(anyhow::Error),
152 : }
153 :
154 : enum MaybeSendResult {
155 : // Please send this request while holding the lock, and if you succeed then write
156 : // the request into the lock.
157 : Transmit(
158 : (
159 : ComputeHookNotifyRequest,
160 : tokio::sync::OwnedMutexGuard<Option<ComputeHookNotifyRequest>>,
161 : ),
162 : ),
163 : // Something requires sending, but you must wait for a current sender then call again
164 : AwaitLock(Arc<tokio::sync::Mutex<Option<ComputeHookNotifyRequest>>>),
165 : // Nothing requires sending
166 : Noop,
167 : }
168 :
169 : impl ComputeHookTenant {
170 4 : fn maybe_send(
171 4 : &self,
172 4 : tenant_id: TenantId,
173 4 : lock: Option<tokio::sync::OwnedMutexGuard<Option<ComputeHookNotifyRequest>>>,
174 4 : ) -> MaybeSendResult {
175 4 : let locked = match lock {
176 0 : Some(already_locked) => already_locked,
177 : None => {
178 : // Lock order: this _must_ be only a try_lock, because we are called inside of the [`ComputeHook::state`] lock.
179 4 : let Ok(locked) = self.get_send_lock().clone().try_lock_owned() else {
180 0 : return MaybeSendResult::AwaitLock(self.get_send_lock().clone());
181 : };
182 4 : locked
183 : }
184 : };
185 :
186 4 : let request = match self {
187 2 : Self::Unsharded(unsharded_tenant) => Some(ComputeHookNotifyRequest {
188 2 : tenant_id,
189 2 : shards: vec![ComputeHookNotifyRequestShard {
190 2 : shard_number: ShardNumber(0),
191 2 : node_id: unsharded_tenant.node_id,
192 2 : }],
193 2 : stripe_size: None,
194 2 : }),
195 2 : Self::Sharded(sharded_tenant)
196 2 : if sharded_tenant.shards.len() == sharded_tenant.shard_count.count() as usize =>
197 1 : {
198 1 : Some(ComputeHookNotifyRequest {
199 1 : tenant_id,
200 1 : shards: sharded_tenant
201 1 : .shards
202 1 : .iter()
203 2 : .map(|(shard_number, node_id)| ComputeHookNotifyRequestShard {
204 2 : shard_number: *shard_number,
205 2 : node_id: *node_id,
206 2 : })
207 1 : .collect(),
208 1 : stripe_size: Some(sharded_tenant.stripe_size),
209 1 : })
210 : }
211 1 : Self::Sharded(sharded_tenant) => {
212 1 : // Sharded tenant doesn't yet have information for all its shards
213 1 :
214 1 : tracing::info!(
215 0 : "ComputeHookTenant::maybe_send: not enough shards ({}/{})",
216 0 : sharded_tenant.shards.len(),
217 0 : sharded_tenant.shard_count.count()
218 : );
219 1 : None
220 : }
221 : };
222 :
223 3 : match request {
224 : None => {
225 : // Not yet ready to emit a notification
226 1 : tracing::info!("Tenant isn't yet ready to emit a notification");
227 1 : MaybeSendResult::Noop
228 : }
229 3 : Some(request) if Some(&request) == locked.as_ref() => {
230 1 : // No change from the last value successfully sent
231 1 : MaybeSendResult::Noop
232 : }
233 2 : Some(request) => MaybeSendResult::Transmit((request, locked)),
234 : }
235 4 : }
236 : }
237 :
238 : /// The compute hook is a destination for notifications about changes to tenant:pageserver
239 : /// mapping. It aggregates updates for the shards in a tenant, and when appropriate reconfigures
240 : /// the compute connection string.
241 : pub(super) struct ComputeHook {
242 : config: Config,
243 : state: std::sync::Mutex<HashMap<TenantId, ComputeHookTenant>>,
244 : authorization_header: Option<String>,
245 :
246 : // Concurrency limiter, so that we do not overload the cloud control plane when updating
247 : // large numbers of tenants (e.g. when failing over after a node failure)
248 : api_concurrency: tokio::sync::Semaphore,
249 :
250 : // This lock is only used in testing enviroments, to serialize calls into neon_lock
251 : neon_local_lock: tokio::sync::Mutex<()>,
252 :
253 : // We share a client across all notifications to enable connection re-use etc when
254 : // sending large numbers of notifications
255 : client: reqwest::Client,
256 : }
257 :
258 : impl ComputeHook {
259 0 : pub(super) fn new(config: Config) -> Self {
260 0 : let authorization_header = config
261 0 : .control_plane_jwt_token
262 0 : .clone()
263 0 : .map(|jwt| format!("Bearer {}", jwt));
264 0 :
265 0 : let client = reqwest::ClientBuilder::new()
266 0 : .timeout(NOTIFY_REQUEST_TIMEOUT)
267 0 : .build()
268 0 : .expect("Failed to construct HTTP client");
269 0 :
270 0 : Self {
271 0 : state: Default::default(),
272 0 : config,
273 0 : authorization_header,
274 0 : neon_local_lock: Default::default(),
275 0 : api_concurrency: tokio::sync::Semaphore::new(API_CONCURRENCY),
276 0 : client,
277 0 : }
278 0 : }
279 :
280 : /// For test environments: use neon_local's LocalEnv to update compute
281 0 : async fn do_notify_local(
282 0 : &self,
283 0 : reconfigure_request: &ComputeHookNotifyRequest,
284 0 : ) -> Result<(), NotifyError> {
285 : // neon_local updates are not safe to call concurrently, use a lock to serialize
286 : // all calls to this function
287 0 : let _locked = self.neon_local_lock.lock().await;
288 :
289 0 : let Some(repo_dir) = self.config.neon_local_repo_dir.as_deref() else {
290 0 : tracing::warn!(
291 0 : "neon_local_repo_dir not set, likely a bug in neon_local; skipping compute update"
292 : );
293 0 : return Ok(());
294 : };
295 0 : let env = match LocalEnv::load_config(repo_dir) {
296 0 : Ok(e) => e,
297 0 : Err(e) => {
298 0 : tracing::warn!("Couldn't load neon_local config, skipping compute update ({e})");
299 0 : return Ok(());
300 : }
301 : };
302 0 : let cplane =
303 0 : ComputeControlPlane::load(env.clone()).expect("Error loading compute control plane");
304 0 : let ComputeHookNotifyRequest {
305 0 : tenant_id,
306 0 : shards,
307 0 : stripe_size,
308 0 : } = reconfigure_request;
309 0 :
310 0 : let compute_pageservers = shards
311 0 : .iter()
312 0 : .map(|shard| {
313 0 : let ps_conf = env
314 0 : .get_pageserver_conf(shard.node_id)
315 0 : .expect("Unknown pageserver");
316 0 : let (pg_host, pg_port) = parse_host_port(&ps_conf.listen_pg_addr)
317 0 : .expect("Unable to parse listen_pg_addr");
318 0 : (pg_host, pg_port.unwrap_or(5432))
319 0 : })
320 0 : .collect::<Vec<_>>();
321 :
322 0 : for (endpoint_name, endpoint) in &cplane.endpoints {
323 0 : if endpoint.tenant_id == *tenant_id && endpoint.status() == EndpointStatus::Running {
324 0 : tracing::info!("Reconfiguring endpoint {}", endpoint_name,);
325 0 : endpoint
326 0 : .reconfigure(compute_pageservers.clone(), *stripe_size, None)
327 0 : .await
328 0 : .map_err(NotifyError::NeonLocal)?;
329 0 : }
330 : }
331 :
332 0 : Ok(())
333 0 : }
334 :
335 0 : async fn do_notify_iteration(
336 0 : &self,
337 0 : url: &String,
338 0 : reconfigure_request: &ComputeHookNotifyRequest,
339 0 : cancel: &CancellationToken,
340 0 : ) -> Result<(), NotifyError> {
341 0 : let req = self.client.request(reqwest::Method::PUT, url);
342 0 : let req = if let Some(value) = &self.authorization_header {
343 0 : req.header(reqwest::header::AUTHORIZATION, value)
344 : } else {
345 0 : req
346 : };
347 :
348 0 : tracing::info!(
349 0 : "Sending notify request to {} ({:?})",
350 : url,
351 : reconfigure_request
352 : );
353 0 : let send_result = req.json(&reconfigure_request).send().await;
354 0 : let response = match send_result {
355 0 : Ok(r) => r,
356 0 : Err(e) => return Err(e.into()),
357 : };
358 :
359 : // Treat all 2xx responses as success
360 0 : if response.status() >= reqwest::StatusCode::OK
361 0 : && response.status() < reqwest::StatusCode::MULTIPLE_CHOICES
362 : {
363 0 : if response.status() != reqwest::StatusCode::OK {
364 : // Non-200 2xx response: it doesn't make sense to retry, but this is unexpected, so
365 : // log a warning.
366 0 : tracing::warn!(
367 0 : "Unexpected 2xx response code {} from control plane",
368 0 : response.status()
369 : );
370 0 : }
371 :
372 0 : return Ok(());
373 0 : }
374 0 :
375 0 : // Error response codes
376 0 : match response.status() {
377 : reqwest::StatusCode::TOO_MANY_REQUESTS => {
378 : // TODO: 429 handling should be global: set some state visible to other requests
379 : // so that they will delay before starting, rather than all notifications trying
380 : // once before backing off.
381 0 : tokio::time::timeout(SLOWDOWN_DELAY, cancel.cancelled())
382 0 : .await
383 0 : .ok();
384 0 : Err(NotifyError::SlowDown)
385 : }
386 : reqwest::StatusCode::LOCKED => {
387 : // We consider this fatal, because it's possible that the operation blocking the control one is
388 : // also the one that is waiting for this reconcile. We should let the reconciler calling
389 : // this hook fail, to give control plane a chance to un-lock.
390 0 : tracing::info!("Control plane reports tenant is locked, dropping out of notify");
391 0 : Err(NotifyError::Busy)
392 : }
393 : reqwest::StatusCode::SERVICE_UNAVAILABLE => {
394 0 : Err(NotifyError::Unavailable(StatusCode::SERVICE_UNAVAILABLE))
395 : }
396 : reqwest::StatusCode::GATEWAY_TIMEOUT => {
397 0 : Err(NotifyError::Unavailable(StatusCode::GATEWAY_TIMEOUT))
398 : }
399 : reqwest::StatusCode::BAD_GATEWAY => {
400 0 : Err(NotifyError::Unavailable(StatusCode::BAD_GATEWAY))
401 : }
402 :
403 0 : reqwest::StatusCode::BAD_REQUEST => Err(NotifyError::Fatal(StatusCode::BAD_REQUEST)),
404 0 : reqwest::StatusCode::UNAUTHORIZED => Err(NotifyError::Fatal(StatusCode::UNAUTHORIZED)),
405 0 : reqwest::StatusCode::FORBIDDEN => Err(NotifyError::Fatal(StatusCode::FORBIDDEN)),
406 0 : status => Err(NotifyError::Unexpected(
407 0 : hyper::StatusCode::from_u16(status.as_u16())
408 0 : .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR),
409 0 : )),
410 : }
411 0 : }
412 :
413 0 : async fn do_notify(
414 0 : &self,
415 0 : url: &String,
416 0 : reconfigure_request: &ComputeHookNotifyRequest,
417 0 : cancel: &CancellationToken,
418 0 : ) -> Result<(), NotifyError> {
419 : // We hold these semaphore units across all retries, rather than only across each
420 : // HTTP request: this is to preserve fairness and avoid a situation where a retry might
421 : // time out waiting for a semaphore.
422 0 : let _units = self
423 0 : .api_concurrency
424 0 : .acquire()
425 0 : .await
426 : // Interpret closed semaphore as shutdown
427 0 : .map_err(|_| NotifyError::ShuttingDown)?;
428 :
429 0 : backoff::retry(
430 0 : || self.do_notify_iteration(url, reconfigure_request, cancel),
431 0 : |e| {
432 0 : matches!(
433 0 : e,
434 : NotifyError::Fatal(_) | NotifyError::Unexpected(_) | NotifyError::Busy
435 : )
436 0 : },
437 0 : 3,
438 0 : 10,
439 0 : "Send compute notification",
440 0 : cancel,
441 0 : )
442 0 : .await
443 0 : .ok_or_else(|| NotifyError::ShuttingDown)
444 0 : .and_then(|x| x)
445 0 : }
446 :
447 : /// Synchronous phase: update the per-tenant state for the next intended notification
448 0 : fn notify_prepare(
449 0 : &self,
450 0 : tenant_shard_id: TenantShardId,
451 0 : node_id: NodeId,
452 0 : stripe_size: ShardStripeSize,
453 0 : ) -> MaybeSendResult {
454 0 : let mut state_locked = self.state.lock().unwrap();
455 :
456 : use std::collections::hash_map::Entry;
457 0 : let tenant = match state_locked.entry(tenant_shard_id.tenant_id) {
458 0 : Entry::Vacant(e) => e.insert(ComputeHookTenant::new(
459 0 : tenant_shard_id,
460 0 : stripe_size,
461 0 : node_id,
462 0 : )),
463 0 : Entry::Occupied(e) => {
464 0 : let tenant = e.into_mut();
465 0 : tenant.update(tenant_shard_id, stripe_size, node_id);
466 0 : tenant
467 : }
468 : };
469 0 : tenant.maybe_send(tenant_shard_id.tenant_id, None)
470 0 : }
471 :
472 0 : async fn notify_execute(
473 0 : &self,
474 0 : maybe_send_result: MaybeSendResult,
475 0 : tenant_shard_id: TenantShardId,
476 0 : cancel: &CancellationToken,
477 0 : ) -> Result<(), NotifyError> {
478 : // Process result: we may get an update to send, or we may have to wait for a lock
479 : // before trying again.
480 0 : let (request, mut send_lock_guard) = match maybe_send_result {
481 : MaybeSendResult::Noop => {
482 0 : return Ok(());
483 : }
484 0 : MaybeSendResult::AwaitLock(send_lock) => {
485 0 : let send_locked = tokio::select! {
486 0 : guard = send_lock.lock_owned() => {guard},
487 0 : _ = cancel.cancelled() => {
488 0 : return Err(NotifyError::ShuttingDown)
489 : }
490 : };
491 :
492 : // Lock order: maybe_send is called within the `[Self::state]` lock, and takes the send lock, but here
493 : // we have acquired the send lock and take `[Self::state]` lock. This is safe because maybe_send only uses
494 : // try_lock.
495 0 : let state_locked = self.state.lock().unwrap();
496 0 : let Some(tenant) = state_locked.get(&tenant_shard_id.tenant_id) else {
497 0 : return Ok(());
498 : };
499 0 : match tenant.maybe_send(tenant_shard_id.tenant_id, Some(send_locked)) {
500 : MaybeSendResult::AwaitLock(_) => {
501 0 : unreachable!("We supplied lock guard")
502 : }
503 : MaybeSendResult::Noop => {
504 0 : return Ok(());
505 : }
506 0 : MaybeSendResult::Transmit((request, lock)) => (request, lock),
507 : }
508 : }
509 0 : MaybeSendResult::Transmit((request, lock)) => (request, lock),
510 : };
511 :
512 0 : let result = if let Some(notify_url) = &self.config.compute_hook_url {
513 0 : self.do_notify(notify_url, &request, cancel).await
514 : } else {
515 0 : self.do_notify_local(&request).await.map_err(|e| {
516 0 : // This path is for testing only, so munge the error into our prod-style error type.
517 0 : tracing::error!("neon_local notification hook failed: {e}");
518 0 : NotifyError::Fatal(StatusCode::INTERNAL_SERVER_ERROR)
519 0 : })
520 : };
521 :
522 0 : if result.is_ok() {
523 0 : // Before dropping the send lock, stash the request we just sent so that
524 0 : // subsequent callers can avoid redundantly re-sending the same thing.
525 0 : *send_lock_guard = Some(request);
526 0 : }
527 0 : result
528 0 : }
529 :
530 : /// Infallible synchronous fire-and-forget version of notify(), that sends its results to
531 : /// a channel. Something should consume the channel and arrange to try notifying again
532 : /// if something failed.
533 0 : pub(super) fn notify_background(
534 0 : self: &Arc<Self>,
535 0 : notifications: Vec<(TenantShardId, NodeId, ShardStripeSize)>,
536 0 : result_tx: tokio::sync::mpsc::Sender<Result<(), (TenantShardId, NotifyError)>>,
537 0 : cancel: &CancellationToken,
538 0 : ) {
539 0 : let mut maybe_sends = Vec::new();
540 0 : for (tenant_shard_id, node_id, stripe_size) in notifications {
541 0 : let maybe_send_result = self.notify_prepare(tenant_shard_id, node_id, stripe_size);
542 0 : maybe_sends.push((tenant_shard_id, maybe_send_result))
543 : }
544 :
545 0 : let this = self.clone();
546 0 : let cancel = cancel.clone();
547 0 :
548 0 : tokio::task::spawn(async move {
549 0 : // Construct an async stream of futures to invoke the compute notify function: we do this
550 0 : // in order to subsequently use .buffered() on the stream to execute with bounded parallelism. The
551 0 : // ComputeHook semaphore already limits concurrency, but this way we avoid constructing+polling lots of futures which
552 0 : // would mostly just be waiting on that semaphore.
553 0 : let mut stream = futures::stream::iter(maybe_sends)
554 0 : .map(|(tenant_shard_id, maybe_send_result)| {
555 0 : let this = this.clone();
556 0 : let cancel = cancel.clone();
557 :
558 0 : async move {
559 0 : this
560 0 : .notify_execute(maybe_send_result, tenant_shard_id, &cancel)
561 0 : .await.map_err(|e| (tenant_shard_id, e))
562 0 : }.instrument(info_span!(
563 0 : "notify_background", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()
564 : ))
565 0 : })
566 0 : .buffered(API_CONCURRENCY);
567 :
568 : loop {
569 0 : tokio::select! {
570 0 : next = stream.next() => {
571 0 : match next {
572 0 : Some(r) => {
573 0 : result_tx.send(r).await.ok();
574 : },
575 : None => {
576 0 : tracing::info!("Finished sending background compute notifications");
577 0 : break;
578 : }
579 : }
580 : },
581 0 : _ = cancel.cancelled() => {
582 0 : tracing::info!("Shutdown while running background compute notifications");
583 0 : break;
584 : }
585 : };
586 : }
587 0 : });
588 0 : }
589 :
590 : /// Call this to notify the compute (postgres) tier of new pageservers to use
591 : /// for a tenant. notify() is called by each shard individually, and this function
592 : /// will decide whether an update to the tenant is sent. An update is sent on the
593 : /// condition that:
594 : /// - We know a pageserver for every shard.
595 : /// - All the shards have the same shard_count (i.e. we are not mid-split)
596 : ///
597 : /// Cancellation token enables callers to drop out, e.g. if calling from a Reconciler
598 : /// that is cancelled.
599 : ///
600 : /// This function is fallible, including in the case that the control plane is transiently
601 : /// unavailable. A limited number of retries are done internally to efficiently hide short unavailability
602 : /// periods, but we don't retry forever. The **caller** is responsible for handling failures and
603 : /// ensuring that they eventually call again to ensure that the compute is eventually notified of
604 : /// the proper pageserver nodes for a tenant.
605 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), node_id))]
606 : pub(super) async fn notify(
607 : &self,
608 : tenant_shard_id: TenantShardId,
609 : node_id: NodeId,
610 : stripe_size: ShardStripeSize,
611 : cancel: &CancellationToken,
612 : ) -> Result<(), NotifyError> {
613 : let maybe_send_result = self.notify_prepare(tenant_shard_id, node_id, stripe_size);
614 : self.notify_execute(maybe_send_result, tenant_shard_id, cancel)
615 : .await
616 : }
617 : }
618 :
619 : #[cfg(test)]
620 : pub(crate) mod tests {
621 : use pageserver_api::shard::{ShardCount, ShardNumber};
622 : use utils::id::TenantId;
623 :
624 : use super::*;
625 :
626 : #[test]
627 1 : fn tenant_updates() -> anyhow::Result<()> {
628 1 : let tenant_id = TenantId::generate();
629 1 : let mut tenant_state = ComputeHookTenant::new(
630 1 : TenantShardId {
631 1 : tenant_id,
632 1 : shard_count: ShardCount::new(0),
633 1 : shard_number: ShardNumber(0),
634 1 : },
635 1 : ShardStripeSize(12345),
636 1 : NodeId(1),
637 1 : );
638 1 :
639 1 : // An unsharded tenant is always ready to emit a notification, but won't
640 1 : // send the same one twice
641 1 : let send_result = tenant_state.maybe_send(tenant_id, None);
642 1 : let MaybeSendResult::Transmit((request, mut guard)) = send_result else {
643 0 : anyhow::bail!("Wrong send result");
644 : };
645 1 : assert_eq!(request.shards.len(), 1);
646 1 : assert!(request.stripe_size.is_none());
647 :
648 : // Simulate successful send
649 1 : *guard = Some(request);
650 1 : drop(guard);
651 1 :
652 1 : // Try asking again: this should be a no-op
653 1 : let send_result = tenant_state.maybe_send(tenant_id, None);
654 1 : assert!(matches!(send_result, MaybeSendResult::Noop));
655 :
656 : // Writing the first shard of a multi-sharded situation (i.e. in a split)
657 : // resets the tenant state and puts it in an non-notifying state (need to
658 : // see all shards)
659 1 : tenant_state.update(
660 1 : TenantShardId {
661 1 : tenant_id,
662 1 : shard_count: ShardCount::new(2),
663 1 : shard_number: ShardNumber(1),
664 1 : },
665 1 : ShardStripeSize(32768),
666 1 : NodeId(1),
667 1 : );
668 1 : assert!(matches!(
669 1 : tenant_state.maybe_send(tenant_id, None),
670 : MaybeSendResult::Noop
671 : ));
672 :
673 : // Writing the second shard makes it ready to notify
674 1 : tenant_state.update(
675 1 : TenantShardId {
676 1 : tenant_id,
677 1 : shard_count: ShardCount::new(2),
678 1 : shard_number: ShardNumber(0),
679 1 : },
680 1 : ShardStripeSize(32768),
681 1 : NodeId(1),
682 1 : );
683 1 :
684 1 : let send_result = tenant_state.maybe_send(tenant_id, None);
685 1 : let MaybeSendResult::Transmit((request, mut guard)) = send_result else {
686 0 : anyhow::bail!("Wrong send result");
687 : };
688 1 : assert_eq!(request.shards.len(), 2);
689 1 : assert_eq!(request.stripe_size, Some(ShardStripeSize(32768)));
690 :
691 : // Simulate successful send
692 1 : *guard = Some(request);
693 1 : drop(guard);
694 1 :
695 1 : Ok(())
696 1 : }
697 : }
|