Line data Source code
1 : //! Simple pub-sub based on grpc (tonic) and Tokio broadcast channel for storage
2 : //! nodes messaging.
3 : //!
4 : //! Subscriptions to 1) single timeline 2) all timelines are possible. We could
5 : //! add subscription to the set of timelines to save grpc streams, but testing
6 : //! shows many individual streams is also ok.
7 : //!
8 : //! Message is dropped if subscriber can't consume it, not affecting other
9 : //! subscribers.
10 : //!
11 : //! Only safekeeper message is supported, but it is not hard to add something
12 : //! else with generics.
13 : use std::collections::HashMap;
14 : use std::convert::Infallible;
15 : use std::net::SocketAddr;
16 : use std::pin::Pin;
17 : use std::sync::Arc;
18 : use std::time::Duration;
19 :
20 : use clap::{Parser, command};
21 : use futures_core::Stream;
22 : use futures_util::StreamExt;
23 : use http_body_util::Full;
24 : use hyper::body::Incoming;
25 : use hyper::header::CONTENT_TYPE;
26 : use hyper::service::service_fn;
27 : use hyper::{Method, StatusCode};
28 : use hyper_util::rt::{TokioExecutor, TokioIo, TokioTimer};
29 : use metrics::{Encoder, TextEncoder};
30 : use parking_lot::RwLock;
31 : use storage_broker::metrics::{
32 : BROADCAST_DROPPED_MESSAGES_TOTAL, BROADCASTED_MESSAGES_TOTAL, NUM_PUBS, NUM_SUBS_ALL,
33 : NUM_SUBS_TIMELINE, PROCESSED_MESSAGES_TOTAL, PUBLISHED_ONEOFF_MESSAGES_TOTAL,
34 : };
35 : use storage_broker::proto::broker_service_server::{BrokerService, BrokerServiceServer};
36 : use storage_broker::proto::subscribe_safekeeper_info_request::SubscriptionKey as ProtoSubscriptionKey;
37 : use storage_broker::proto::{
38 : FilterTenantTimelineId, MessageType, SafekeeperDiscoveryRequest, SafekeeperDiscoveryResponse,
39 : SafekeeperTimelineInfo, SubscribeByFilterRequest, SubscribeSafekeeperInfoRequest, TypedMessage,
40 : };
41 : use storage_broker::{DEFAULT_KEEPALIVE_INTERVAL, DEFAULT_LISTEN_ADDR, parse_proto_ttid};
42 : use tokio::net::TcpListener;
43 : use tokio::sync::broadcast;
44 : use tokio::sync::broadcast::error::RecvError;
45 : use tokio::time;
46 : use tonic::body::{self, BoxBody, empty_body};
47 : use tonic::codegen::Service;
48 : use tonic::{Code, Request, Response, Status};
49 : use tracing::*;
50 : use utils::id::TenantTimelineId;
51 : use utils::logging::{self, LogFormat};
52 : use utils::sentry_init::init_sentry;
53 : use utils::signals::ShutdownSignals;
54 : use utils::{project_build_tag, project_git_version};
55 :
56 : project_git_version!(GIT_VERSION);
57 : project_build_tag!(BUILD_TAG);
58 :
59 : const DEFAULT_CHAN_SIZE: usize = 32;
60 : const DEFAULT_ALL_KEYS_CHAN_SIZE: usize = 16384;
61 :
62 : #[derive(Parser, Debug)]
63 : #[command(version = GIT_VERSION, about = "Broker for neon storage nodes communication", long_about = None)]
64 : struct Args {
65 : /// Endpoint to listen on.
66 : #[arg(short, long, default_value = DEFAULT_LISTEN_ADDR)]
67 0 : listen_addr: SocketAddr,
68 : /// Size of the queue to the per timeline subscriber.
69 0 : #[arg(long, default_value_t = DEFAULT_CHAN_SIZE)]
70 0 : timeline_chan_size: usize,
71 : /// Size of the queue to the all keys subscriber.
72 0 : #[arg(long, default_value_t = DEFAULT_ALL_KEYS_CHAN_SIZE)]
73 0 : all_keys_chan_size: usize,
74 : /// HTTP/2 keepalive interval.
75 : #[arg(long, value_parser= humantime::parse_duration, default_value = DEFAULT_KEEPALIVE_INTERVAL)]
76 0 : http2_keepalive_interval: Duration,
77 : /// Format for logging, either 'plain' or 'json'.
78 : #[arg(long, default_value = "plain")]
79 0 : log_format: String,
80 : }
81 :
82 : /// Id of publisher for registering in maps
83 : type PubId = u64;
84 :
85 : /// Id of subscriber for registering in maps
86 : type SubId = u64;
87 :
88 : /// Single enum type for all messages.
89 : #[derive(Clone, Debug, PartialEq)]
90 : #[allow(clippy::enum_variant_names)]
91 : enum Message {
92 : SafekeeperTimelineInfo(SafekeeperTimelineInfo),
93 : SafekeeperDiscoveryRequest(SafekeeperDiscoveryRequest),
94 : SafekeeperDiscoveryResponse(SafekeeperDiscoveryResponse),
95 : }
96 :
97 : impl Message {
98 : /// Convert proto message to internal message.
99 : #[allow(clippy::result_large_err, reason = "TODO")]
100 0 : pub fn from(proto_msg: TypedMessage) -> Result<Self, Status> {
101 0 : match proto_msg.r#type() {
102 : MessageType::SafekeeperTimelineInfo => Ok(Message::SafekeeperTimelineInfo(
103 0 : proto_msg.safekeeper_timeline_info.ok_or_else(|| {
104 0 : Status::new(Code::InvalidArgument, "missing safekeeper_timeline_info")
105 0 : })?,
106 : )),
107 : MessageType::SafekeeperDiscoveryRequest => Ok(Message::SafekeeperDiscoveryRequest(
108 0 : proto_msg.safekeeper_discovery_request.ok_or_else(|| {
109 0 : Status::new(
110 0 : Code::InvalidArgument,
111 0 : "missing safekeeper_discovery_request",
112 0 : )
113 0 : })?,
114 : )),
115 : MessageType::SafekeeperDiscoveryResponse => Ok(Message::SafekeeperDiscoveryResponse(
116 0 : proto_msg.safekeeper_discovery_response.ok_or_else(|| {
117 0 : Status::new(
118 0 : Code::InvalidArgument,
119 0 : "missing safekeeper_discovery_response",
120 0 : )
121 0 : })?,
122 : )),
123 0 : MessageType::Unknown => Err(Status::new(
124 0 : Code::InvalidArgument,
125 0 : format!("invalid message type: {:?}", proto_msg.r#type),
126 0 : )),
127 : }
128 0 : }
129 :
130 : /// Get the tenant_timeline_id from the message.
131 : #[allow(clippy::result_large_err, reason = "TODO")]
132 2 : pub fn tenant_timeline_id(&self) -> Result<Option<TenantTimelineId>, Status> {
133 2 : match self {
134 2 : Message::SafekeeperTimelineInfo(msg) => Ok(msg
135 2 : .tenant_timeline_id
136 2 : .as_ref()
137 2 : .map(parse_proto_ttid)
138 2 : .transpose()?),
139 0 : Message::SafekeeperDiscoveryRequest(msg) => Ok(msg
140 0 : .tenant_timeline_id
141 0 : .as_ref()
142 0 : .map(parse_proto_ttid)
143 0 : .transpose()?),
144 0 : Message::SafekeeperDiscoveryResponse(msg) => Ok(msg
145 0 : .tenant_timeline_id
146 0 : .as_ref()
147 0 : .map(parse_proto_ttid)
148 0 : .transpose()?),
149 : }
150 2 : }
151 :
152 : /// Convert internal message to the protobuf struct.
153 0 : pub fn as_typed_message(&self) -> TypedMessage {
154 0 : let mut res = TypedMessage {
155 0 : r#type: self.message_type() as i32,
156 0 : ..Default::default()
157 0 : };
158 0 : match self {
159 0 : Message::SafekeeperTimelineInfo(msg) => {
160 0 : res.safekeeper_timeline_info = Some(msg.clone())
161 : }
162 0 : Message::SafekeeperDiscoveryRequest(msg) => {
163 0 : res.safekeeper_discovery_request = Some(msg.clone())
164 : }
165 0 : Message::SafekeeperDiscoveryResponse(msg) => {
166 0 : res.safekeeper_discovery_response = Some(msg.clone())
167 : }
168 : }
169 0 : res
170 0 : }
171 :
172 : /// Get the message type.
173 0 : pub fn message_type(&self) -> MessageType {
174 0 : match self {
175 0 : Message::SafekeeperTimelineInfo(_) => MessageType::SafekeeperTimelineInfo,
176 0 : Message::SafekeeperDiscoveryRequest(_) => MessageType::SafekeeperDiscoveryRequest,
177 0 : Message::SafekeeperDiscoveryResponse(_) => MessageType::SafekeeperDiscoveryResponse,
178 : }
179 0 : }
180 : }
181 :
182 : #[derive(Copy, Clone, Debug)]
183 : enum SubscriptionKey {
184 : All,
185 : Timeline(TenantTimelineId),
186 : }
187 :
188 : impl SubscriptionKey {
189 : /// Parse protobuf subkey (protobuf doesn't have fixed size bytes, we get vectors).
190 : #[allow(clippy::result_large_err, reason = "TODO")]
191 0 : pub fn from_proto_subscription_key(key: ProtoSubscriptionKey) -> Result<Self, Status> {
192 0 : match key {
193 0 : ProtoSubscriptionKey::All(_) => Ok(SubscriptionKey::All),
194 0 : ProtoSubscriptionKey::TenantTimelineId(proto_ttid) => {
195 0 : Ok(SubscriptionKey::Timeline(parse_proto_ttid(&proto_ttid)?))
196 : }
197 : }
198 0 : }
199 :
200 : /// Parse from FilterTenantTimelineId
201 : #[allow(clippy::result_large_err, reason = "TODO")]
202 0 : pub fn from_proto_filter_tenant_timeline_id(
203 0 : opt: Option<&FilterTenantTimelineId>,
204 0 : ) -> Result<Self, Status> {
205 0 : if opt.is_none() {
206 0 : return Ok(SubscriptionKey::All);
207 0 : }
208 0 :
209 0 : let f = opt.unwrap();
210 0 : if !f.enabled {
211 0 : return Ok(SubscriptionKey::All);
212 0 : }
213 :
214 0 : let ttid =
215 0 : parse_proto_ttid(f.tenant_timeline_id.as_ref().ok_or_else(|| {
216 0 : Status::new(Code::InvalidArgument, "missing tenant_timeline_id")
217 0 : })?)?;
218 0 : Ok(SubscriptionKey::Timeline(ttid))
219 0 : }
220 : }
221 :
222 : /// Channel to timeline subscribers.
223 : struct ChanToTimelineSub {
224 : chan: broadcast::Sender<Message>,
225 : /// Tracked separately to know when delete the shmem entry. receiver_count()
226 : /// is unhandy for that as unregistering and dropping the receiver side
227 : /// happens at different moments.
228 : num_subscribers: u64,
229 : }
230 :
231 : struct SharedState {
232 : next_pub_id: PubId,
233 : num_pubs: i64,
234 : next_sub_id: SubId,
235 : num_subs_to_timelines: i64,
236 : chans_to_timeline_subs: HashMap<TenantTimelineId, ChanToTimelineSub>,
237 : num_subs_to_all: i64,
238 : chan_to_all_subs: broadcast::Sender<Message>,
239 : }
240 :
241 : impl SharedState {
242 1 : pub fn new(all_keys_chan_size: usize) -> Self {
243 1 : SharedState {
244 1 : next_pub_id: 0,
245 1 : num_pubs: 0,
246 1 : next_sub_id: 0,
247 1 : num_subs_to_timelines: 0,
248 1 : chans_to_timeline_subs: HashMap::new(),
249 1 : num_subs_to_all: 0,
250 1 : chan_to_all_subs: broadcast::channel(all_keys_chan_size).0,
251 1 : }
252 1 : }
253 :
254 : // Register new publisher.
255 1 : pub fn register_publisher(&mut self) -> PubId {
256 1 : let pub_id = self.next_pub_id;
257 1 : self.next_pub_id += 1;
258 1 : self.num_pubs += 1;
259 1 : NUM_PUBS.set(self.num_pubs);
260 1 : pub_id
261 1 : }
262 :
263 : // Unregister publisher.
264 1 : pub fn unregister_publisher(&mut self) {
265 1 : self.num_pubs -= 1;
266 1 : NUM_PUBS.set(self.num_pubs);
267 1 : }
268 :
269 : // Register new subscriber.
270 2 : pub fn register_subscriber(
271 2 : &mut self,
272 2 : sub_key: SubscriptionKey,
273 2 : timeline_chan_size: usize,
274 2 : ) -> (SubId, broadcast::Receiver<Message>) {
275 2 : let sub_id = self.next_sub_id;
276 2 : self.next_sub_id += 1;
277 2 : let sub_rx = match sub_key {
278 : SubscriptionKey::All => {
279 1 : self.num_subs_to_all += 1;
280 1 : NUM_SUBS_ALL.set(self.num_subs_to_all);
281 1 : self.chan_to_all_subs.subscribe()
282 : }
283 1 : SubscriptionKey::Timeline(ttid) => {
284 1 : self.num_subs_to_timelines += 1;
285 1 : NUM_SUBS_TIMELINE.set(self.num_subs_to_timelines);
286 1 : // Create new broadcast channel for this key, or subscriber to
287 1 : // the existing one.
288 1 : let chan_to_timeline_sub =
289 1 : self.chans_to_timeline_subs
290 1 : .entry(ttid)
291 1 : .or_insert(ChanToTimelineSub {
292 1 : chan: broadcast::channel(timeline_chan_size).0,
293 1 : num_subscribers: 0,
294 1 : });
295 1 : chan_to_timeline_sub.num_subscribers += 1;
296 1 : chan_to_timeline_sub.chan.subscribe()
297 : }
298 : };
299 2 : (sub_id, sub_rx)
300 2 : }
301 :
302 : // Unregister the subscriber.
303 2 : pub fn unregister_subscriber(&mut self, sub_key: SubscriptionKey) {
304 2 : match sub_key {
305 1 : SubscriptionKey::All => {
306 1 : self.num_subs_to_all -= 1;
307 1 : NUM_SUBS_ALL.set(self.num_subs_to_all);
308 1 : }
309 1 : SubscriptionKey::Timeline(ttid) => {
310 1 : self.num_subs_to_timelines -= 1;
311 1 : NUM_SUBS_TIMELINE.set(self.num_subs_to_timelines);
312 1 :
313 1 : // Remove from the map, destroying the channel, if we are the
314 1 : // last subscriber to this timeline.
315 1 :
316 1 : // Missing entry is a bug; we must have registered.
317 1 : let chan_to_timeline_sub = self
318 1 : .chans_to_timeline_subs
319 1 : .get_mut(&ttid)
320 1 : .expect("failed to find sub entry in shmem during unregister");
321 1 : chan_to_timeline_sub.num_subscribers -= 1;
322 1 : if chan_to_timeline_sub.num_subscribers == 0 {
323 1 : self.chans_to_timeline_subs.remove(&ttid);
324 1 : }
325 : }
326 : }
327 2 : }
328 : }
329 :
330 : // SharedState wrapper.
331 : #[derive(Clone)]
332 : struct Registry {
333 : shared_state: Arc<RwLock<SharedState>>,
334 : timeline_chan_size: usize,
335 : }
336 :
337 : impl Registry {
338 : // Register new publisher in shared state.
339 1 : pub fn register_publisher(&self, remote_addr: SocketAddr) -> Publisher {
340 1 : let pub_id = self.shared_state.write().register_publisher();
341 1 : info!("publication started id={} addr={:?}", pub_id, remote_addr);
342 1 : Publisher {
343 1 : id: pub_id,
344 1 : registry: self.clone(),
345 1 : remote_addr,
346 1 : }
347 1 : }
348 :
349 1 : pub fn unregister_publisher(&self, publisher: &Publisher) {
350 1 : self.shared_state.write().unregister_publisher();
351 1 : info!(
352 0 : "publication ended id={} addr={:?}",
353 : publisher.id, publisher.remote_addr
354 : );
355 1 : }
356 :
357 : // Register new subscriber in shared state.
358 2 : pub fn register_subscriber(
359 2 : &self,
360 2 : sub_key: SubscriptionKey,
361 2 : remote_addr: SocketAddr,
362 2 : ) -> Subscriber {
363 2 : let (sub_id, sub_rx) = self
364 2 : .shared_state
365 2 : .write()
366 2 : .register_subscriber(sub_key, self.timeline_chan_size);
367 2 : info!(
368 0 : "subscription started id={}, key={:?}, addr={:?}",
369 : sub_id, sub_key, remote_addr
370 : );
371 2 : Subscriber {
372 2 : id: sub_id,
373 2 : key: sub_key,
374 2 : sub_rx,
375 2 : registry: self.clone(),
376 2 : remote_addr,
377 2 : }
378 2 : }
379 :
380 : // Unregister the subscriber
381 2 : pub fn unregister_subscriber(&self, subscriber: &Subscriber) {
382 2 : self.shared_state
383 2 : .write()
384 2 : .unregister_subscriber(subscriber.key);
385 2 : info!(
386 0 : "subscription ended id={}, key={:?}, addr={:?}",
387 : subscriber.id, subscriber.key, subscriber.remote_addr
388 : );
389 2 : }
390 :
391 : /// Send msg to relevant subscribers.
392 : #[allow(clippy::result_large_err, reason = "TODO")]
393 2 : pub fn send_msg(&self, msg: &Message) -> Result<(), Status> {
394 2 : PROCESSED_MESSAGES_TOTAL.inc();
395 2 :
396 2 : // send message to subscribers for everything
397 2 : let shared_state = self.shared_state.read();
398 2 : // Err means there is no subscribers, it is fine.
399 2 : shared_state.chan_to_all_subs.send(msg.clone()).ok();
400 :
401 : // send message to per timeline subscribers, if there is ttid
402 2 : let ttid = msg.tenant_timeline_id()?;
403 2 : if let Some(ttid) = ttid {
404 2 : if let Some(subs) = shared_state.chans_to_timeline_subs.get(&ttid) {
405 1 : // Err can't happen here, as tx is destroyed only after removing
406 1 : // from the map the last subscriber along with tx.
407 1 : subs.chan
408 1 : .send(msg.clone())
409 1 : .expect("rx is still in the map with zero subscribers");
410 1 : }
411 0 : }
412 2 : Ok(())
413 2 : }
414 : }
415 :
416 : // Private subscriber state.
417 : struct Subscriber {
418 : id: SubId,
419 : key: SubscriptionKey,
420 : // Subscriber receives messages from publishers here.
421 : sub_rx: broadcast::Receiver<Message>,
422 : // to unregister itself from shared state in Drop
423 : registry: Registry,
424 : // for logging
425 : remote_addr: SocketAddr,
426 : }
427 :
428 : impl Drop for Subscriber {
429 2 : fn drop(&mut self) {
430 2 : self.registry.unregister_subscriber(self);
431 2 : }
432 : }
433 :
434 : // Private publisher state
435 : struct Publisher {
436 : id: PubId,
437 : registry: Registry,
438 : // for logging
439 : remote_addr: SocketAddr,
440 : }
441 :
442 : impl Publisher {
443 : /// Send msg to relevant subscribers.
444 : #[allow(clippy::result_large_err, reason = "TODO")]
445 2 : pub fn send_msg(&mut self, msg: &Message) -> Result<(), Status> {
446 2 : self.registry.send_msg(msg)
447 2 : }
448 : }
449 :
450 : impl Drop for Publisher {
451 1 : fn drop(&mut self) {
452 1 : self.registry.unregister_publisher(self);
453 1 : }
454 : }
455 :
456 : struct Broker {
457 : registry: Registry,
458 : }
459 :
460 : #[tonic::async_trait]
461 : impl BrokerService for Broker {
462 0 : async fn publish_safekeeper_info(
463 0 : &self,
464 0 : request: Request<tonic::Streaming<SafekeeperTimelineInfo>>,
465 0 : ) -> Result<Response<()>, Status> {
466 0 : let &RemoteAddr(remote_addr) = request
467 0 : .extensions()
468 0 : .get()
469 0 : .expect("RemoteAddr inserted by handler");
470 0 : let mut publisher = self.registry.register_publisher(remote_addr);
471 0 :
472 0 : let mut stream = request.into_inner();
473 :
474 : loop {
475 0 : match stream.next().await {
476 0 : Some(Ok(msg)) => publisher.send_msg(&Message::SafekeeperTimelineInfo(msg))?,
477 0 : Some(Err(e)) => return Err(e), // grpc error from the stream
478 0 : None => break, // closed stream
479 0 : }
480 0 : }
481 0 :
482 0 : Ok(Response::new(()))
483 0 : }
484 :
485 : type SubscribeSafekeeperInfoStream =
486 : Pin<Box<dyn Stream<Item = Result<SafekeeperTimelineInfo, Status>> + Send + 'static>>;
487 :
488 0 : async fn subscribe_safekeeper_info(
489 0 : &self,
490 0 : request: Request<SubscribeSafekeeperInfoRequest>,
491 0 : ) -> Result<Response<Self::SubscribeSafekeeperInfoStream>, Status> {
492 0 : let &RemoteAddr(remote_addr) = request
493 0 : .extensions()
494 0 : .get()
495 0 : .expect("RemoteAddr inserted by handler");
496 0 : let proto_key = request
497 0 : .into_inner()
498 0 : .subscription_key
499 0 : .ok_or_else(|| Status::new(Code::InvalidArgument, "missing subscription key"))?;
500 0 : let sub_key = SubscriptionKey::from_proto_subscription_key(proto_key)?;
501 0 : let mut subscriber = self.registry.register_subscriber(sub_key, remote_addr);
502 0 :
503 0 : // transform rx into stream with item = Result, as method result demands
504 0 : let output = async_stream::try_stream! {
505 0 : let mut warn_interval = time::interval(Duration::from_millis(1000));
506 0 : let mut missed_msgs: u64 = 0;
507 0 : loop {
508 0 : match subscriber.sub_rx.recv().await {
509 0 : Ok(info) => {
510 0 : match info {
511 0 : Message::SafekeeperTimelineInfo(info) => yield info,
512 0 : _ => {},
513 0 : }
514 0 : BROADCASTED_MESSAGES_TOTAL.inc();
515 0 : },
516 0 : Err(RecvError::Lagged(skipped_msg)) => {
517 0 : BROADCAST_DROPPED_MESSAGES_TOTAL.inc_by(skipped_msg);
518 0 : missed_msgs += skipped_msg;
519 0 : if (futures::poll!(Box::pin(warn_interval.tick()))).is_ready() {
520 0 : warn!("subscription id={}, key={:?} addr={:?} dropped {} messages, channel is full",
521 0 : subscriber.id, subscriber.key, subscriber.remote_addr, missed_msgs);
522 0 : missed_msgs = 0;
523 0 : }
524 0 : }
525 0 : Err(RecvError::Closed) => {
526 0 : // can't happen, we never drop the channel while there is a subscriber
527 0 : Err(Status::new(Code::Internal, "channel unexpectantly closed"))?;
528 0 : }
529 0 : }
530 0 : }
531 0 : };
532 0 :
533 0 : Ok(Response::new(
534 0 : Box::pin(output) as Self::SubscribeSafekeeperInfoStream
535 0 : ))
536 0 : }
537 :
538 : type SubscribeByFilterStream =
539 : Pin<Box<dyn Stream<Item = Result<TypedMessage, Status>> + Send + 'static>>;
540 :
541 : /// Subscribe to all messages, limited by a filter.
542 0 : async fn subscribe_by_filter(
543 0 : &self,
544 0 : request: Request<SubscribeByFilterRequest>,
545 0 : ) -> std::result::Result<Response<Self::SubscribeByFilterStream>, Status> {
546 0 : let &RemoteAddr(remote_addr) = request
547 0 : .extensions()
548 0 : .get()
549 0 : .expect("RemoteAddr inserted by handler");
550 0 : let proto_filter = request.into_inner();
551 0 : let ttid_filter = proto_filter.tenant_timeline_id.as_ref();
552 :
553 0 : let sub_key = SubscriptionKey::from_proto_filter_tenant_timeline_id(ttid_filter)?;
554 0 : let types_set = proto_filter
555 0 : .types
556 0 : .iter()
557 0 : .map(|t| t.r#type)
558 0 : .collect::<std::collections::HashSet<_>>();
559 0 :
560 0 : let mut subscriber = self.registry.register_subscriber(sub_key, remote_addr);
561 0 :
562 0 : // transform rx into stream with item = Result, as method result demands
563 0 : let output = async_stream::try_stream! {
564 0 : let mut warn_interval = time::interval(Duration::from_millis(1000));
565 0 : let mut missed_msgs: u64 = 0;
566 0 : loop {
567 0 : match subscriber.sub_rx.recv().await {
568 0 : Ok(msg) => {
569 0 : let msg_type = msg.message_type() as i32;
570 0 : if types_set.contains(&msg_type) {
571 0 : yield msg.as_typed_message();
572 0 : BROADCASTED_MESSAGES_TOTAL.inc();
573 0 : }
574 0 : },
575 0 : Err(RecvError::Lagged(skipped_msg)) => {
576 0 : BROADCAST_DROPPED_MESSAGES_TOTAL.inc_by(skipped_msg);
577 0 : missed_msgs += skipped_msg;
578 0 : if (futures::poll!(Box::pin(warn_interval.tick()))).is_ready() {
579 0 : warn!("subscription id={}, key={:?} addr={:?} dropped {} messages, channel is full",
580 0 : subscriber.id, subscriber.key, subscriber.remote_addr, missed_msgs);
581 0 : missed_msgs = 0;
582 0 : }
583 0 : }
584 0 : Err(RecvError::Closed) => {
585 0 : // can't happen, we never drop the channel while there is a subscriber
586 0 : Err(Status::new(Code::Internal, "channel unexpectantly closed"))?;
587 0 : }
588 0 : }
589 0 : }
590 0 : };
591 0 :
592 0 : Ok(Response::new(
593 0 : Box::pin(output) as Self::SubscribeByFilterStream
594 0 : ))
595 0 : }
596 :
597 : /// Publish one message.
598 0 : async fn publish_one(
599 0 : &self,
600 0 : request: Request<TypedMessage>,
601 0 : ) -> std::result::Result<Response<()>, Status> {
602 0 : let msg = Message::from(request.into_inner())?;
603 0 : PUBLISHED_ONEOFF_MESSAGES_TOTAL.inc();
604 0 : self.registry.send_msg(&msg)?;
605 0 : Ok(Response::new(()))
606 0 : }
607 : }
608 :
609 : // We serve only metrics and healthcheck through http1.
610 0 : async fn http1_handler(
611 0 : req: hyper::Request<Incoming>,
612 0 : ) -> Result<hyper::Response<BoxBody>, Infallible> {
613 0 : let resp = match (req.method(), req.uri().path()) {
614 0 : (&Method::GET, "/metrics") => {
615 0 : let mut buffer = vec![];
616 0 : let metrics = metrics::gather();
617 0 : let encoder = TextEncoder::new();
618 0 : encoder.encode(&metrics, &mut buffer).unwrap();
619 0 :
620 0 : hyper::Response::builder()
621 0 : .status(StatusCode::OK)
622 0 : .header(CONTENT_TYPE, encoder.format_type())
623 0 : .body(body::boxed(Full::new(bytes::Bytes::from(buffer))))
624 0 : .unwrap()
625 : }
626 0 : (&Method::GET, "/status") => hyper::Response::builder()
627 0 : .status(StatusCode::OK)
628 0 : .body(empty_body())
629 0 : .unwrap(),
630 0 : _ => hyper::Response::builder()
631 0 : .status(StatusCode::NOT_FOUND)
632 0 : .body(empty_body())
633 0 : .unwrap(),
634 : };
635 0 : Ok(resp)
636 0 : }
637 :
638 : #[derive(Clone, Copy)]
639 : struct RemoteAddr(SocketAddr);
640 :
641 : #[tokio::main]
642 0 : async fn main() -> Result<(), Box<dyn std::error::Error>> {
643 0 : let args = Args::parse();
644 0 :
645 0 : // important to keep the order of:
646 0 : // 1. init logging
647 0 : // 2. tracing panic hook
648 0 : // 3. sentry
649 0 : logging::init(
650 0 : LogFormat::from_config(&args.log_format)?,
651 0 : logging::TracingErrorLayerEnablement::Disabled,
652 0 : logging::Output::Stdout,
653 0 : )?;
654 0 : logging::replace_panic_hook_with_tracing_panic_hook().forget();
655 0 : // initialize sentry if SENTRY_DSN is provided
656 0 : let _sentry_guard = init_sentry(Some(GIT_VERSION.into()), &[]);
657 0 : info!("version: {GIT_VERSION} build_tag: {BUILD_TAG}");
658 0 : metrics::set_build_info_metric(GIT_VERSION, BUILD_TAG);
659 0 :
660 0 : // On any shutdown signal, log receival and exit.
661 0 : std::thread::spawn(move || {
662 0 : ShutdownSignals::handle(|signal| {
663 0 : info!("received {}, terminating", signal.name());
664 0 : std::process::exit(0);
665 0 : })
666 0 : });
667 0 :
668 0 : let registry = Registry {
669 0 : shared_state: Arc::new(RwLock::new(SharedState::new(args.all_keys_chan_size))),
670 0 : timeline_chan_size: args.timeline_chan_size,
671 0 : };
672 0 : let storage_broker_impl = Broker {
673 0 : registry: registry.clone(),
674 0 : };
675 0 : let storage_broker_server = BrokerServiceServer::new(storage_broker_impl);
676 0 :
677 0 : // grpc is served along with http1 for metrics on a single port, hence we
678 0 : // don't use tonic's Server.
679 0 : let tcp_listener = TcpListener::bind(&args.listen_addr).await?;
680 0 : info!("listening on {}", &args.listen_addr);
681 0 : loop {
682 0 : let (stream, addr) = match tcp_listener.accept().await {
683 0 : Ok(v) => v,
684 0 : Err(e) => {
685 0 : info!("couldn't accept connection: {e}");
686 0 : continue;
687 0 : }
688 0 : };
689 0 :
690 0 : let mut builder = hyper_util::server::conn::auto::Builder::new(TokioExecutor::new());
691 0 : builder.http1().timer(TokioTimer::new());
692 0 : builder
693 0 : .http2()
694 0 : .timer(TokioTimer::new())
695 0 : .keep_alive_interval(Some(args.http2_keepalive_interval))
696 0 : // This matches the tonic server default. It allows us to support production-like workloads.
697 0 : .max_concurrent_streams(None);
698 0 :
699 0 : let storage_broker_server_cloned = storage_broker_server.clone();
700 0 : let remote_addr = RemoteAddr(addr);
701 0 : let service_fn_ = async move {
702 0 : service_fn(move |mut req| {
703 0 : // That's what tonic's MakeSvc.call does to pass conninfo to
704 0 : // the request handler (and where its request.remote_addr()
705 0 : // expects it to find).
706 0 : req.extensions_mut().insert(remote_addr);
707 0 :
708 0 : // Technically this second clone is not needed, but consume
709 0 : // by async block is apparently unavoidable. BTW, error
710 0 : // message is enigmatic, see
711 0 : // https://github.com/rust-lang/rust/issues/68119
712 0 : //
713 0 : // We could get away without async block at all, but then we
714 0 : // need to resort to futures::Either to merge the result,
715 0 : // which doesn't caress an eye as well.
716 0 : let mut storage_broker_server_svc = storage_broker_server_cloned.clone();
717 0 : async move {
718 0 : if req.headers().get("content-type").map(|x| x.as_bytes())
719 0 : == Some(b"application/grpc")
720 0 : {
721 0 : let res_resp = storage_broker_server_svc.call(req).await;
722 0 : // Grpc and http1 handlers have slightly different
723 0 : // Response types: it is UnsyncBoxBody for the
724 0 : // former one (not sure why) and plain hyper::Body
725 0 : // for the latter. Both implement HttpBody though,
726 0 : // and `Either` is used to merge them.
727 0 : res_resp.map(|resp| resp.map(http_body_util::Either::Left))
728 0 : } else {
729 0 : let res_resp = http1_handler(req).await;
730 0 : res_resp.map(|resp| resp.map(http_body_util::Either::Right))
731 0 : }
732 0 : }
733 0 : })
734 0 : }
735 0 : .await;
736 0 :
737 0 : tokio::task::spawn(async move {
738 0 : let res = builder
739 0 : .serve_connection(TokioIo::new(stream), service_fn_)
740 0 : .await;
741 0 :
742 0 : if let Err(e) = res {
743 0 : info!("error serving connection from {addr}: {e}");
744 0 : }
745 0 : });
746 0 : }
747 0 : }
748 :
749 : #[cfg(test)]
750 : mod tests {
751 : use storage_broker::proto::TenantTimelineId as ProtoTenantTimelineId;
752 : use tokio::sync::broadcast::error::TryRecvError;
753 : use utils::id::{TenantId, TimelineId};
754 :
755 : use super::*;
756 :
757 2 : fn msg(timeline_id: Vec<u8>) -> Message {
758 2 : Message::SafekeeperTimelineInfo(SafekeeperTimelineInfo {
759 2 : safekeeper_id: 1,
760 2 : tenant_timeline_id: Some(ProtoTenantTimelineId {
761 2 : tenant_id: vec![0x00; 16],
762 2 : timeline_id,
763 2 : }),
764 2 : term: 0,
765 2 : last_log_term: 0,
766 2 : flush_lsn: 1,
767 2 : commit_lsn: 2,
768 2 : backup_lsn: 3,
769 2 : remote_consistent_lsn: 4,
770 2 : peer_horizon_lsn: 5,
771 2 : safekeeper_connstr: "neon-1-sk-1.local:7676".to_owned(),
772 2 : http_connstr: "neon-1-sk-1.local:7677".to_owned(),
773 2 : local_start_lsn: 0,
774 2 : availability_zone: None,
775 2 : standby_horizon: 0,
776 2 : })
777 2 : }
778 :
779 3 : fn tli_from_u64(i: u64) -> Vec<u8> {
780 3 : let mut timeline_id = vec![0xFF; 8];
781 3 : timeline_id.extend_from_slice(&i.to_be_bytes());
782 3 : timeline_id
783 3 : }
784 :
785 3 : fn mock_addr() -> SocketAddr {
786 3 : "127.0.0.1:8080".parse().unwrap()
787 3 : }
788 :
789 : #[tokio::test]
790 1 : async fn test_registry() {
791 1 : let registry = Registry {
792 1 : shared_state: Arc::new(RwLock::new(SharedState::new(16))),
793 1 : timeline_chan_size: 16,
794 1 : };
795 1 :
796 1 : // subscribe to timeline 2
797 1 : let ttid_2 = TenantTimelineId {
798 1 : tenant_id: TenantId::from_slice(&[0x00; 16]).unwrap(),
799 1 : timeline_id: TimelineId::from_slice(&tli_from_u64(2)).unwrap(),
800 1 : };
801 1 : let sub_key_2 = SubscriptionKey::Timeline(ttid_2);
802 1 : let mut subscriber_2 = registry.register_subscriber(sub_key_2, mock_addr());
803 1 : let mut subscriber_all = registry.register_subscriber(SubscriptionKey::All, mock_addr());
804 1 :
805 1 : // send two messages with different keys
806 1 : let msg_1 = msg(tli_from_u64(1));
807 1 : let msg_2 = msg(tli_from_u64(2));
808 1 : let mut publisher = registry.register_publisher(mock_addr());
809 1 : publisher.send_msg(&msg_1).expect("failed to send msg");
810 1 : publisher.send_msg(&msg_2).expect("failed to send msg");
811 1 :
812 1 : // msg with key 2 should arrive to subscriber_2
813 1 : assert_eq!(subscriber_2.sub_rx.try_recv().unwrap(), msg_2);
814 1 :
815 1 : // but nothing more
816 1 : assert_eq!(
817 1 : subscriber_2.sub_rx.try_recv().unwrap_err(),
818 1 : TryRecvError::Empty
819 1 : );
820 1 :
821 1 : // subscriber_all should receive both messages
822 1 : assert_eq!(subscriber_all.sub_rx.try_recv().unwrap(), msg_1);
823 1 : assert_eq!(subscriber_all.sub_rx.try_recv().unwrap(), msg_2);
824 1 : assert_eq!(
825 1 : subscriber_all.sub_rx.try_recv().unwrap_err(),
826 1 : TryRecvError::Empty
827 1 : );
828 1 : }
829 : }
|