Line data Source code
1 : use std::borrow::Cow;
2 : use std::error::Error as _;
3 : use std::sync::Arc;
4 : use std::{collections::HashMap, time::Duration};
5 :
6 : use control_plane::endpoint::{ComputeControlPlane, EndpointStatus};
7 : use control_plane::local_env::LocalEnv;
8 : use futures::StreamExt;
9 : use hyper::StatusCode;
10 : use pageserver_api::controller_api::AvailabilityZone;
11 : use pageserver_api::shard::{ShardCount, ShardNumber, ShardStripeSize, TenantShardId};
12 : use postgres_connection::parse_host_port;
13 : use serde::{Deserialize, Serialize};
14 : use tokio_util::sync::CancellationToken;
15 : use tracing::{info_span, Instrument};
16 : use utils::{
17 : backoff::{self},
18 : id::{NodeId, TenantId},
19 : };
20 :
21 : use crate::service::Config;
22 :
23 : const SLOWDOWN_DELAY: Duration = Duration::from_secs(5);
24 :
25 : const NOTIFY_REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
26 :
27 : pub(crate) const API_CONCURRENCY: usize = 32;
28 :
29 : struct UnshardedComputeHookTenant {
30 : // Which node is this tenant attached to
31 : node_id: NodeId,
32 :
33 : // The tenant's preferred AZ, so that we may pass this on to the control plane
34 : preferred_az: Option<AvailabilityZone>,
35 :
36 : // Must hold this lock to send a notification.
37 : send_lock: Arc<tokio::sync::Mutex<Option<ComputeRemoteState>>>,
38 : }
39 : struct ShardedComputeHookTenant {
40 : stripe_size: ShardStripeSize,
41 : shard_count: ShardCount,
42 : shards: Vec<(ShardNumber, NodeId)>,
43 :
44 : // The tenant's preferred AZ, so that we may pass this on to the control plane
45 : preferred_az: Option<AvailabilityZone>,
46 :
47 : // Must hold this lock to send a notification. The contents represent
48 : // the last successfully sent notification, and are used to coalesce multiple
49 : // updates by only sending when there is a chance since our last successful send.
50 : send_lock: Arc<tokio::sync::Mutex<Option<ComputeRemoteState>>>,
51 : }
52 :
53 : /// Represents our knowledge of the compute's state: we can update this when we get a
54 : /// response from a notify API call, which tells us what has been applied.
55 : ///
56 : /// Should be wrapped in an Option<>, as we cannot always know the remote state.
57 : #[derive(PartialEq, Eq, Debug)]
58 : struct ComputeRemoteState {
59 : // The request body which was acked by the compute
60 : request: ComputeHookNotifyRequest,
61 :
62 : // Whether the cplane indicated that the state was applied to running computes, or just
63 : // persisted. In the Neon control plane, this is the difference between a 423 response (meaning
64 : // persisted but not applied), and a 2xx response (both persisted and applied)
65 : applied: bool,
66 : }
67 :
68 : enum ComputeHookTenant {
69 : Unsharded(UnshardedComputeHookTenant),
70 : Sharded(ShardedComputeHookTenant),
71 : }
72 :
73 : impl ComputeHookTenant {
74 : /// Construct with at least one shard's information
75 2 : fn new(
76 2 : tenant_shard_id: TenantShardId,
77 2 : stripe_size: ShardStripeSize,
78 2 : preferred_az: Option<AvailabilityZone>,
79 2 : node_id: NodeId,
80 2 : ) -> Self {
81 2 : if tenant_shard_id.shard_count.count() > 1 {
82 1 : Self::Sharded(ShardedComputeHookTenant {
83 1 : shards: vec![(tenant_shard_id.shard_number, node_id)],
84 1 : stripe_size,
85 1 : shard_count: tenant_shard_id.shard_count,
86 1 : preferred_az,
87 1 : send_lock: Arc::default(),
88 1 : })
89 : } else {
90 1 : Self::Unsharded(UnshardedComputeHookTenant {
91 1 : node_id,
92 1 : preferred_az,
93 1 : send_lock: Arc::default(),
94 1 : })
95 : }
96 2 : }
97 :
98 4 : fn get_send_lock(&self) -> &Arc<tokio::sync::Mutex<Option<ComputeRemoteState>>> {
99 4 : match self {
100 2 : Self::Unsharded(unsharded_tenant) => &unsharded_tenant.send_lock,
101 2 : Self::Sharded(sharded_tenant) => &sharded_tenant.send_lock,
102 : }
103 4 : }
104 :
105 0 : fn is_sharded(&self) -> bool {
106 0 : matches!(self, ComputeHookTenant::Sharded(_))
107 0 : }
108 :
109 : /// Clear compute hook state for the specified shard.
110 : /// Only valid for [`ComputeHookTenant::Sharded`] instances.
111 0 : fn remove_shard(&mut self, tenant_shard_id: TenantShardId, stripe_size: ShardStripeSize) {
112 0 : match self {
113 0 : ComputeHookTenant::Sharded(sharded) => {
114 0 : if sharded.stripe_size != stripe_size
115 0 : || sharded.shard_count != tenant_shard_id.shard_count
116 : {
117 0 : tracing::warn!("Shard split detected while handling detach")
118 0 : }
119 :
120 0 : let shard_idx = sharded.shards.iter().position(|(shard_number, _node_id)| {
121 0 : *shard_number == tenant_shard_id.shard_number
122 0 : });
123 :
124 0 : if let Some(shard_idx) = shard_idx {
125 0 : sharded.shards.remove(shard_idx);
126 0 : } else {
127 0 : tracing::warn!("Shard not found while handling detach")
128 : }
129 : }
130 : ComputeHookTenant::Unsharded(_) => {
131 0 : unreachable!("Detach of unsharded tenants is handled externally");
132 : }
133 : }
134 0 : }
135 :
136 : /// Set one shard's location. If stripe size or shard count have changed, Self is reset
137 : /// and drops existing content.
138 2 : fn update(&mut self, shard_update: ShardUpdate) {
139 2 : let tenant_shard_id = shard_update.tenant_shard_id;
140 2 : let node_id = shard_update.node_id;
141 2 : let stripe_size = shard_update.stripe_size;
142 2 : let preferred_az = shard_update.preferred_az;
143 :
144 1 : match self {
145 1 : Self::Unsharded(unsharded_tenant) if tenant_shard_id.shard_count.count() == 1 => {
146 0 : unsharded_tenant.node_id = node_id;
147 0 : if unsharded_tenant.preferred_az.as_ref()
148 0 : != preferred_az.as_ref().map(|az| az.as_ref())
149 0 : {
150 0 : unsharded_tenant.preferred_az = preferred_az.map(|az| az.as_ref().clone());
151 0 : }
152 : }
153 1 : Self::Sharded(sharded_tenant)
154 1 : if sharded_tenant.stripe_size == stripe_size
155 1 : && sharded_tenant.shard_count == tenant_shard_id.shard_count =>
156 : {
157 1 : if let Some(existing) = sharded_tenant
158 1 : .shards
159 1 : .iter()
160 1 : .position(|s| s.0 == tenant_shard_id.shard_number)
161 0 : {
162 0 : sharded_tenant.shards.get_mut(existing).unwrap().1 = node_id;
163 0 : } else {
164 1 : sharded_tenant
165 1 : .shards
166 1 : .push((tenant_shard_id.shard_number, node_id));
167 2 : sharded_tenant.shards.sort_by_key(|s| s.0)
168 : }
169 :
170 1 : if sharded_tenant.preferred_az.as_ref()
171 1 : != preferred_az.as_ref().map(|az| az.as_ref())
172 0 : {
173 0 : sharded_tenant.preferred_az = preferred_az.map(|az| az.as_ref().clone());
174 1 : }
175 : }
176 1 : _ => {
177 1 : // Shard count changed: reset struct.
178 1 : *self = Self::new(
179 1 : tenant_shard_id,
180 1 : stripe_size,
181 1 : preferred_az.map(|az| az.into_owned()),
182 1 : node_id,
183 1 : );
184 1 : }
185 : }
186 2 : }
187 : }
188 :
189 0 : #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
190 : struct ComputeHookNotifyRequestShard {
191 : node_id: NodeId,
192 : shard_number: ShardNumber,
193 : }
194 :
195 : /// Request body that we send to the control plane to notify it of where a tenant is attached
196 0 : #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
197 : struct ComputeHookNotifyRequest {
198 : tenant_id: TenantId,
199 : preferred_az: Option<String>,
200 : stripe_size: Option<ShardStripeSize>,
201 : shards: Vec<ComputeHookNotifyRequestShard>,
202 : }
203 :
204 : /// Error type for attempts to call into the control plane compute notification hook
205 : #[derive(thiserror::Error, Debug)]
206 : pub(crate) enum NotifyError {
207 : // Request was not send successfully, e.g. transport error
208 0 : #[error("Sending request: {0}{}", .0.source().map(|e| format!(": {e}")).unwrap_or_default())]
209 : Request(#[from] reqwest::Error),
210 : // Request could not be serviced right now due to ongoing Operation in control plane, but should be possible soon.
211 : #[error("Control plane tenant busy")]
212 : Busy,
213 : // Explicit 429 response asking us to retry less frequently
214 : #[error("Control plane overloaded")]
215 : SlowDown,
216 : // A 503 response indicates the control plane can't handle the request right now
217 : #[error("Control plane unavailable (status {0})")]
218 : Unavailable(StatusCode),
219 : // API returned unexpected non-success status. We will retry, but log a warning.
220 : #[error("Control plane returned unexpected status {0}")]
221 : Unexpected(StatusCode),
222 : // We shutdown while sending
223 : #[error("Shutting down")]
224 : ShuttingDown,
225 : // A response indicates we will never succeed, such as 400 or 404
226 : #[error("Non-retryable error {0}")]
227 : Fatal(StatusCode),
228 :
229 : #[error("neon_local error: {0}")]
230 : NeonLocal(anyhow::Error),
231 : }
232 :
233 : enum MaybeSendResult {
234 : // Please send this request while holding the lock, and if you succeed then write
235 : // the request into the lock.
236 : Transmit(
237 : (
238 : ComputeHookNotifyRequest,
239 : tokio::sync::OwnedMutexGuard<Option<ComputeRemoteState>>,
240 : ),
241 : ),
242 : // Something requires sending, but you must wait for a current sender then call again
243 : AwaitLock(Arc<tokio::sync::Mutex<Option<ComputeRemoteState>>>),
244 : // Nothing requires sending
245 : Noop,
246 : }
247 :
248 : impl ComputeHookTenant {
249 4 : fn maybe_send(
250 4 : &self,
251 4 : tenant_id: TenantId,
252 4 : lock: Option<tokio::sync::OwnedMutexGuard<Option<ComputeRemoteState>>>,
253 4 : ) -> MaybeSendResult {
254 4 : let locked = match lock {
255 0 : Some(already_locked) => already_locked,
256 : None => {
257 : // Lock order: this _must_ be only a try_lock, because we are called inside of the [`ComputeHook::state`] lock.
258 4 : let Ok(locked) = self.get_send_lock().clone().try_lock_owned() else {
259 0 : return MaybeSendResult::AwaitLock(self.get_send_lock().clone());
260 : };
261 4 : locked
262 : }
263 : };
264 :
265 4 : let request = match self {
266 2 : Self::Unsharded(unsharded_tenant) => Some(ComputeHookNotifyRequest {
267 2 : tenant_id,
268 2 : shards: vec![ComputeHookNotifyRequestShard {
269 2 : shard_number: ShardNumber(0),
270 2 : node_id: unsharded_tenant.node_id,
271 2 : }],
272 2 : stripe_size: None,
273 2 : preferred_az: unsharded_tenant
274 2 : .preferred_az
275 2 : .as_ref()
276 2 : .map(|az| az.0.clone()),
277 2 : }),
278 2 : Self::Sharded(sharded_tenant)
279 2 : if sharded_tenant.shards.len() == sharded_tenant.shard_count.count() as usize =>
280 1 : {
281 1 : Some(ComputeHookNotifyRequest {
282 1 : tenant_id,
283 1 : shards: sharded_tenant
284 1 : .shards
285 1 : .iter()
286 2 : .map(|(shard_number, node_id)| ComputeHookNotifyRequestShard {
287 2 : shard_number: *shard_number,
288 2 : node_id: *node_id,
289 2 : })
290 1 : .collect(),
291 1 : stripe_size: Some(sharded_tenant.stripe_size),
292 1 : preferred_az: sharded_tenant.preferred_az.as_ref().map(|az| az.0.clone()),
293 1 : })
294 : }
295 1 : Self::Sharded(sharded_tenant) => {
296 1 : // Sharded tenant doesn't yet have information for all its shards
297 1 :
298 1 : tracing::info!(
299 0 : "ComputeHookTenant::maybe_send: not enough shards ({}/{})",
300 0 : sharded_tenant.shards.len(),
301 0 : sharded_tenant.shard_count.count()
302 : );
303 1 : None
304 : }
305 : };
306 :
307 3 : match request {
308 : None => {
309 : // Not yet ready to emit a notification
310 1 : tracing::info!("Tenant isn't yet ready to emit a notification");
311 1 : MaybeSendResult::Noop
312 : }
313 1 : Some(request)
314 3 : if Some(&request) == locked.as_ref().map(|s| &s.request)
315 1 : && locked.as_ref().map(|s| s.applied).unwrap_or(false) =>
316 1 : {
317 1 : tracing::info!(
318 0 : "Skipping notification because remote state already matches ({:?})",
319 0 : &request
320 : );
321 : // No change from the last value successfully sent, and our state indicates that the last
322 : // value sent was fully applied on the control plane side.
323 1 : MaybeSendResult::Noop
324 : }
325 2 : Some(request) => {
326 2 : // Our request differs from the last one sent, or the last one sent was not fully applied on the compute side
327 2 : MaybeSendResult::Transmit((request, locked))
328 : }
329 : }
330 4 : }
331 : }
332 :
333 : /// The compute hook is a destination for notifications about changes to tenant:pageserver
334 : /// mapping. It aggregates updates for the shards in a tenant, and when appropriate reconfigures
335 : /// the compute connection string.
336 : pub(super) struct ComputeHook {
337 : config: Config,
338 : state: std::sync::Mutex<HashMap<TenantId, ComputeHookTenant>>,
339 : authorization_header: Option<String>,
340 :
341 : // Concurrency limiter, so that we do not overload the cloud control plane when updating
342 : // large numbers of tenants (e.g. when failing over after a node failure)
343 : api_concurrency: tokio::sync::Semaphore,
344 :
345 : // This lock is only used in testing enviroments, to serialize calls into neon_lock
346 : neon_local_lock: tokio::sync::Mutex<()>,
347 :
348 : // We share a client across all notifications to enable connection re-use etc when
349 : // sending large numbers of notifications
350 : client: reqwest::Client,
351 : }
352 :
353 : /// Callers may give us a list of these when asking us to send a bulk batch
354 : /// of notifications in the background. This is a 'notification' in the sense of
355 : /// other code notifying us of a shard's status, rather than being the final notification
356 : /// that we send upwards to the control plane for the whole tenant.
357 : pub(crate) struct ShardUpdate<'a> {
358 : pub(crate) tenant_shard_id: TenantShardId,
359 : pub(crate) node_id: NodeId,
360 : pub(crate) stripe_size: ShardStripeSize,
361 : pub(crate) preferred_az: Option<Cow<'a, AvailabilityZone>>,
362 : }
363 :
364 : impl ComputeHook {
365 0 : pub(super) fn new(config: Config) -> Self {
366 0 : let authorization_header = config
367 0 : .control_plane_jwt_token
368 0 : .clone()
369 0 : .map(|jwt| format!("Bearer {}", jwt));
370 0 :
371 0 : let client = reqwest::ClientBuilder::new()
372 0 : .timeout(NOTIFY_REQUEST_TIMEOUT)
373 0 : .build()
374 0 : .expect("Failed to construct HTTP client");
375 0 :
376 0 : Self {
377 0 : state: Default::default(),
378 0 : config,
379 0 : authorization_header,
380 0 : neon_local_lock: Default::default(),
381 0 : api_concurrency: tokio::sync::Semaphore::new(API_CONCURRENCY),
382 0 : client,
383 0 : }
384 0 : }
385 :
386 : /// For test environments: use neon_local's LocalEnv to update compute
387 0 : async fn do_notify_local(
388 0 : &self,
389 0 : reconfigure_request: &ComputeHookNotifyRequest,
390 0 : ) -> Result<(), NotifyError> {
391 : // neon_local updates are not safe to call concurrently, use a lock to serialize
392 : // all calls to this function
393 0 : let _locked = self.neon_local_lock.lock().await;
394 :
395 0 : let Some(repo_dir) = self.config.neon_local_repo_dir.as_deref() else {
396 0 : tracing::warn!(
397 0 : "neon_local_repo_dir not set, likely a bug in neon_local; skipping compute update"
398 : );
399 0 : return Ok(());
400 : };
401 0 : let env = match LocalEnv::load_config(repo_dir) {
402 0 : Ok(e) => e,
403 0 : Err(e) => {
404 0 : tracing::warn!("Couldn't load neon_local config, skipping compute update ({e})");
405 0 : return Ok(());
406 : }
407 : };
408 0 : let cplane =
409 0 : ComputeControlPlane::load(env.clone()).expect("Error loading compute control plane");
410 0 : let ComputeHookNotifyRequest {
411 0 : tenant_id,
412 0 : shards,
413 0 : stripe_size,
414 0 : preferred_az: _preferred_az,
415 0 : } = reconfigure_request;
416 0 :
417 0 : let compute_pageservers = shards
418 0 : .iter()
419 0 : .map(|shard| {
420 0 : let ps_conf = env
421 0 : .get_pageserver_conf(shard.node_id)
422 0 : .expect("Unknown pageserver");
423 0 : let (pg_host, pg_port) = parse_host_port(&ps_conf.listen_pg_addr)
424 0 : .expect("Unable to parse listen_pg_addr");
425 0 : (pg_host, pg_port.unwrap_or(5432))
426 0 : })
427 0 : .collect::<Vec<_>>();
428 :
429 0 : for (endpoint_name, endpoint) in &cplane.endpoints {
430 0 : if endpoint.tenant_id == *tenant_id && endpoint.status() == EndpointStatus::Running {
431 0 : tracing::info!("Reconfiguring endpoint {}", endpoint_name,);
432 0 : endpoint
433 0 : .reconfigure(compute_pageservers.clone(), *stripe_size, None)
434 0 : .await
435 0 : .map_err(NotifyError::NeonLocal)?;
436 0 : }
437 : }
438 :
439 0 : Ok(())
440 0 : }
441 :
442 0 : async fn do_notify_iteration(
443 0 : &self,
444 0 : url: &String,
445 0 : reconfigure_request: &ComputeHookNotifyRequest,
446 0 : cancel: &CancellationToken,
447 0 : ) -> Result<(), NotifyError> {
448 0 : let req = self.client.request(reqwest::Method::PUT, url);
449 0 : let req = if let Some(value) = &self.authorization_header {
450 0 : req.header(reqwest::header::AUTHORIZATION, value)
451 : } else {
452 0 : req
453 : };
454 :
455 0 : tracing::info!(
456 0 : "Sending notify request to {} ({:?})",
457 : url,
458 : reconfigure_request
459 : );
460 0 : let send_result = req.json(&reconfigure_request).send().await;
461 0 : let response = match send_result {
462 0 : Ok(r) => r,
463 0 : Err(e) => return Err(e.into()),
464 : };
465 :
466 : // Treat all 2xx responses as success
467 0 : if response.status() >= reqwest::StatusCode::OK
468 0 : && response.status() < reqwest::StatusCode::MULTIPLE_CHOICES
469 : {
470 0 : if response.status() != reqwest::StatusCode::OK {
471 : // Non-200 2xx response: it doesn't make sense to retry, but this is unexpected, so
472 : // log a warning.
473 0 : tracing::warn!(
474 0 : "Unexpected 2xx response code {} from control plane",
475 0 : response.status()
476 : );
477 0 : }
478 :
479 0 : return Ok(());
480 0 : }
481 0 :
482 0 : // Error response codes
483 0 : match response.status() {
484 : reqwest::StatusCode::TOO_MANY_REQUESTS => {
485 : // TODO: 429 handling should be global: set some state visible to other requests
486 : // so that they will delay before starting, rather than all notifications trying
487 : // once before backing off.
488 0 : tokio::time::timeout(SLOWDOWN_DELAY, cancel.cancelled())
489 0 : .await
490 0 : .ok();
491 0 : Err(NotifyError::SlowDown)
492 : }
493 : reqwest::StatusCode::LOCKED => {
494 : // We consider this fatal, because it's possible that the operation blocking the control one is
495 : // also the one that is waiting for this reconcile. We should let the reconciler calling
496 : // this hook fail, to give control plane a chance to un-lock.
497 0 : tracing::info!("Control plane reports tenant is locked, dropping out of notify");
498 0 : Err(NotifyError::Busy)
499 : }
500 : reqwest::StatusCode::SERVICE_UNAVAILABLE => {
501 0 : Err(NotifyError::Unavailable(StatusCode::SERVICE_UNAVAILABLE))
502 : }
503 : reqwest::StatusCode::GATEWAY_TIMEOUT => {
504 0 : Err(NotifyError::Unavailable(StatusCode::GATEWAY_TIMEOUT))
505 : }
506 : reqwest::StatusCode::BAD_GATEWAY => {
507 0 : Err(NotifyError::Unavailable(StatusCode::BAD_GATEWAY))
508 : }
509 :
510 0 : reqwest::StatusCode::BAD_REQUEST => Err(NotifyError::Fatal(StatusCode::BAD_REQUEST)),
511 0 : reqwest::StatusCode::UNAUTHORIZED => Err(NotifyError::Fatal(StatusCode::UNAUTHORIZED)),
512 0 : reqwest::StatusCode::FORBIDDEN => Err(NotifyError::Fatal(StatusCode::FORBIDDEN)),
513 0 : status => Err(NotifyError::Unexpected(
514 0 : hyper::StatusCode::from_u16(status.as_u16())
515 0 : .unwrap_or(StatusCode::INTERNAL_SERVER_ERROR),
516 0 : )),
517 : }
518 0 : }
519 :
520 0 : async fn do_notify(
521 0 : &self,
522 0 : url: &String,
523 0 : reconfigure_request: &ComputeHookNotifyRequest,
524 0 : cancel: &CancellationToken,
525 0 : ) -> Result<(), NotifyError> {
526 : // We hold these semaphore units across all retries, rather than only across each
527 : // HTTP request: this is to preserve fairness and avoid a situation where a retry might
528 : // time out waiting for a semaphore.
529 0 : let _units = self
530 0 : .api_concurrency
531 0 : .acquire()
532 0 : .await
533 : // Interpret closed semaphore as shutdown
534 0 : .map_err(|_| NotifyError::ShuttingDown)?;
535 :
536 0 : backoff::retry(
537 0 : || self.do_notify_iteration(url, reconfigure_request, cancel),
538 0 : |e| {
539 0 : matches!(
540 0 : e,
541 : NotifyError::Fatal(_) | NotifyError::Unexpected(_) | NotifyError::Busy
542 : )
543 0 : },
544 0 : 3,
545 0 : 10,
546 0 : "Send compute notification",
547 0 : cancel,
548 0 : )
549 0 : .await
550 0 : .ok_or_else(|| NotifyError::ShuttingDown)
551 0 : .and_then(|x| x)
552 0 : }
553 :
554 : /// Synchronous phase: update the per-tenant state for the next intended notification
555 0 : fn notify_prepare(&self, shard_update: ShardUpdate) -> MaybeSendResult {
556 0 : let mut state_locked = self.state.lock().unwrap();
557 :
558 : use std::collections::hash_map::Entry;
559 0 : let tenant_shard_id = shard_update.tenant_shard_id;
560 :
561 0 : let tenant = match state_locked.entry(tenant_shard_id.tenant_id) {
562 0 : Entry::Vacant(e) => {
563 0 : let ShardUpdate {
564 0 : tenant_shard_id,
565 0 : node_id,
566 0 : stripe_size,
567 0 : preferred_az,
568 0 : } = shard_update;
569 0 : e.insert(ComputeHookTenant::new(
570 0 : tenant_shard_id,
571 0 : stripe_size,
572 0 : preferred_az.map(|az| az.into_owned()),
573 0 : node_id,
574 0 : ))
575 : }
576 0 : Entry::Occupied(e) => {
577 0 : let tenant = e.into_mut();
578 0 : tenant.update(shard_update);
579 0 : tenant
580 : }
581 : };
582 0 : tenant.maybe_send(tenant_shard_id.tenant_id, None)
583 0 : }
584 :
585 0 : async fn notify_execute(
586 0 : &self,
587 0 : maybe_send_result: MaybeSendResult,
588 0 : tenant_shard_id: TenantShardId,
589 0 : cancel: &CancellationToken,
590 0 : ) -> Result<(), NotifyError> {
591 : // Process result: we may get an update to send, or we may have to wait for a lock
592 : // before trying again.
593 0 : let (request, mut send_lock_guard) = match maybe_send_result {
594 : MaybeSendResult::Noop => {
595 0 : return Ok(());
596 : }
597 0 : MaybeSendResult::AwaitLock(send_lock) => {
598 0 : let send_locked = tokio::select! {
599 0 : guard = send_lock.lock_owned() => {guard},
600 0 : _ = cancel.cancelled() => {
601 0 : return Err(NotifyError::ShuttingDown)
602 : }
603 : };
604 :
605 : // Lock order: maybe_send is called within the `[Self::state]` lock, and takes the send lock, but here
606 : // we have acquired the send lock and take `[Self::state]` lock. This is safe because maybe_send only uses
607 : // try_lock.
608 0 : let state_locked = self.state.lock().unwrap();
609 0 : let Some(tenant) = state_locked.get(&tenant_shard_id.tenant_id) else {
610 0 : return Ok(());
611 : };
612 0 : match tenant.maybe_send(tenant_shard_id.tenant_id, Some(send_locked)) {
613 : MaybeSendResult::AwaitLock(_) => {
614 0 : unreachable!("We supplied lock guard")
615 : }
616 : MaybeSendResult::Noop => {
617 0 : return Ok(());
618 : }
619 0 : MaybeSendResult::Transmit((request, lock)) => (request, lock),
620 : }
621 : }
622 0 : MaybeSendResult::Transmit((request, lock)) => (request, lock),
623 : };
624 :
625 0 : let result = if let Some(notify_url) = &self.config.compute_hook_url {
626 0 : self.do_notify(notify_url, &request, cancel).await
627 : } else {
628 0 : self.do_notify_local(&request).await.map_err(|e| {
629 0 : // This path is for testing only, so munge the error into our prod-style error type.
630 0 : tracing::error!("neon_local notification hook failed: {e}");
631 0 : NotifyError::Fatal(StatusCode::INTERNAL_SERVER_ERROR)
632 0 : })
633 : };
634 :
635 0 : match result {
636 0 : Ok(_) => {
637 0 : // Before dropping the send lock, stash the request we just sent so that
638 0 : // subsequent callers can avoid redundantly re-sending the same thing.
639 0 : *send_lock_guard = Some(ComputeRemoteState {
640 0 : request,
641 0 : applied: true,
642 0 : });
643 0 : }
644 0 : Err(NotifyError::Busy) => {
645 0 : // Busy result means that the server responded and has stored the new configuration,
646 0 : // but was not able to fully apply it to the compute
647 0 : *send_lock_guard = Some(ComputeRemoteState {
648 0 : request,
649 0 : applied: false,
650 0 : });
651 0 : }
652 0 : Err(_) => {
653 0 : // General error case: we can no longer know the remote state, so clear it. This will result in
654 0 : // the logic in maybe_send recognizing that we should call the hook again.
655 0 : *send_lock_guard = None;
656 0 : }
657 : }
658 0 : result
659 0 : }
660 :
661 : /// Infallible synchronous fire-and-forget version of notify(), that sends its results to
662 : /// a channel. Something should consume the channel and arrange to try notifying again
663 : /// if something failed.
664 0 : pub(super) fn notify_background(
665 0 : self: &Arc<Self>,
666 0 : notifications: Vec<ShardUpdate>,
667 0 : result_tx: tokio::sync::mpsc::Sender<Result<(), (TenantShardId, NotifyError)>>,
668 0 : cancel: &CancellationToken,
669 0 : ) {
670 0 : let mut maybe_sends = Vec::new();
671 0 : for shard_update in notifications {
672 0 : let tenant_shard_id = shard_update.tenant_shard_id;
673 0 : let maybe_send_result = self.notify_prepare(shard_update);
674 0 : maybe_sends.push((tenant_shard_id, maybe_send_result))
675 : }
676 :
677 0 : let this = self.clone();
678 0 : let cancel = cancel.clone();
679 0 :
680 0 : tokio::task::spawn(async move {
681 0 : // Construct an async stream of futures to invoke the compute notify function: we do this
682 0 : // in order to subsequently use .buffered() on the stream to execute with bounded parallelism. The
683 0 : // ComputeHook semaphore already limits concurrency, but this way we avoid constructing+polling lots of futures which
684 0 : // would mostly just be waiting on that semaphore.
685 0 : let mut stream = futures::stream::iter(maybe_sends)
686 0 : .map(|(tenant_shard_id, maybe_send_result)| {
687 0 : let this = this.clone();
688 0 : let cancel = cancel.clone();
689 :
690 0 : async move {
691 0 : this
692 0 : .notify_execute(maybe_send_result, tenant_shard_id, &cancel)
693 0 : .await.map_err(|e| (tenant_shard_id, e))
694 0 : }.instrument(info_span!(
695 0 : "notify_background", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()
696 : ))
697 0 : })
698 0 : .buffered(API_CONCURRENCY);
699 :
700 : loop {
701 0 : tokio::select! {
702 0 : next = stream.next() => {
703 0 : match next {
704 0 : Some(r) => {
705 0 : result_tx.send(r).await.ok();
706 : },
707 : None => {
708 0 : tracing::info!("Finished sending background compute notifications");
709 0 : break;
710 : }
711 : }
712 : },
713 0 : _ = cancel.cancelled() => {
714 0 : tracing::info!("Shutdown while running background compute notifications");
715 0 : break;
716 : }
717 : };
718 : }
719 0 : });
720 0 : }
721 :
722 : /// Call this to notify the compute (postgres) tier of new pageservers to use
723 : /// for a tenant. notify() is called by each shard individually, and this function
724 : /// will decide whether an update to the tenant is sent. An update is sent on the
725 : /// condition that:
726 : /// - We know a pageserver for every shard.
727 : /// - All the shards have the same shard_count (i.e. we are not mid-split)
728 : ///
729 : /// Cancellation token enables callers to drop out, e.g. if calling from a Reconciler
730 : /// that is cancelled.
731 : ///
732 : /// This function is fallible, including in the case that the control plane is transiently
733 : /// unavailable. A limited number of retries are done internally to efficiently hide short unavailability
734 : /// periods, but we don't retry forever. The **caller** is responsible for handling failures and
735 : /// ensuring that they eventually call again to ensure that the compute is eventually notified of
736 : /// the proper pageserver nodes for a tenant.
737 0 : #[tracing::instrument(skip_all, fields(tenant_id=%shard_update.tenant_shard_id.tenant_id, shard_id=%shard_update.tenant_shard_id.shard_slug(), node_id))]
738 : pub(super) async fn notify<'a>(
739 : &self,
740 : shard_update: ShardUpdate<'a>,
741 : cancel: &CancellationToken,
742 : ) -> Result<(), NotifyError> {
743 : let tenant_shard_id = shard_update.tenant_shard_id;
744 : let maybe_send_result = self.notify_prepare(shard_update);
745 : self.notify_execute(maybe_send_result, tenant_shard_id, cancel)
746 : .await
747 : }
748 :
749 : /// Reflect a detach for a particular shard in the compute hook state.
750 : ///
751 : /// The goal is to avoid sending compute notifications with stale information (i.e.
752 : /// including detach pageservers).
753 0 : #[tracing::instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
754 : pub(super) fn handle_detach(
755 : &self,
756 : tenant_shard_id: TenantShardId,
757 : stripe_size: ShardStripeSize,
758 : ) {
759 : use std::collections::hash_map::Entry;
760 :
761 : let mut state_locked = self.state.lock().unwrap();
762 : match state_locked.entry(tenant_shard_id.tenant_id) {
763 : Entry::Vacant(_) => {
764 : tracing::warn!("Compute hook tenant not found for detach");
765 : }
766 : Entry::Occupied(mut e) => {
767 : let sharded = e.get().is_sharded();
768 : if !sharded {
769 : e.remove();
770 : } else {
771 : e.get_mut().remove_shard(tenant_shard_id, stripe_size);
772 : }
773 :
774 : tracing::debug!("Compute hook handled shard detach");
775 : }
776 : }
777 : }
778 : }
779 :
780 : #[cfg(test)]
781 : pub(crate) mod tests {
782 : use pageserver_api::shard::{ShardCount, ShardNumber};
783 : use utils::id::TenantId;
784 :
785 : use super::*;
786 :
787 : #[test]
788 1 : fn tenant_updates() -> anyhow::Result<()> {
789 1 : let tenant_id = TenantId::generate();
790 1 : let mut tenant_state = ComputeHookTenant::new(
791 1 : TenantShardId {
792 1 : tenant_id,
793 1 : shard_count: ShardCount::new(0),
794 1 : shard_number: ShardNumber(0),
795 1 : },
796 1 : ShardStripeSize(12345),
797 1 : None,
798 1 : NodeId(1),
799 1 : );
800 1 :
801 1 : // An unsharded tenant is always ready to emit a notification, but won't
802 1 : // send the same one twice
803 1 : let send_result = tenant_state.maybe_send(tenant_id, None);
804 1 : let MaybeSendResult::Transmit((request, mut guard)) = send_result else {
805 0 : anyhow::bail!("Wrong send result");
806 : };
807 1 : assert_eq!(request.shards.len(), 1);
808 1 : assert!(request.stripe_size.is_none());
809 :
810 : // Simulate successful send
811 1 : *guard = Some(ComputeRemoteState {
812 1 : request,
813 1 : applied: true,
814 1 : });
815 1 : drop(guard);
816 1 :
817 1 : // Try asking again: this should be a no-op
818 1 : let send_result = tenant_state.maybe_send(tenant_id, None);
819 1 : assert!(matches!(send_result, MaybeSendResult::Noop));
820 :
821 : // Writing the first shard of a multi-sharded situation (i.e. in a split)
822 : // resets the tenant state and puts it in an non-notifying state (need to
823 : // see all shards)
824 1 : tenant_state.update(ShardUpdate {
825 1 : tenant_shard_id: TenantShardId {
826 1 : tenant_id,
827 1 : shard_count: ShardCount::new(2),
828 1 : shard_number: ShardNumber(1),
829 1 : },
830 1 : stripe_size: ShardStripeSize(32768),
831 1 : preferred_az: None,
832 1 : node_id: NodeId(1),
833 1 : });
834 1 : assert!(matches!(
835 1 : tenant_state.maybe_send(tenant_id, None),
836 : MaybeSendResult::Noop
837 : ));
838 :
839 : // Writing the second shard makes it ready to notify
840 1 : tenant_state.update(ShardUpdate {
841 1 : tenant_shard_id: TenantShardId {
842 1 : tenant_id,
843 1 : shard_count: ShardCount::new(2),
844 1 : shard_number: ShardNumber(0),
845 1 : },
846 1 : stripe_size: ShardStripeSize(32768),
847 1 : preferred_az: None,
848 1 : node_id: NodeId(1),
849 1 : });
850 1 :
851 1 : let send_result = tenant_state.maybe_send(tenant_id, None);
852 1 : let MaybeSendResult::Transmit((request, mut guard)) = send_result else {
853 0 : anyhow::bail!("Wrong send result");
854 : };
855 1 : assert_eq!(request.shards.len(), 2);
856 1 : assert_eq!(request.stripe_size, Some(ShardStripeSize(32768)));
857 :
858 : // Simulate successful send
859 1 : *guard = Some(ComputeRemoteState {
860 1 : request,
861 1 : applied: true,
862 1 : });
863 1 : drop(guard);
864 1 :
865 1 : Ok(())
866 1 : }
867 : }
|