Line data Source code
1 : //! Communication with the broker, providing safekeeper peers and pageserver coordination.
2 :
3 : use anyhow::anyhow;
4 : use anyhow::bail;
5 : use anyhow::Context;
6 :
7 : use anyhow::Error;
8 : use anyhow::Result;
9 :
10 : use storage_broker::parse_proto_ttid;
11 :
12 : use storage_broker::proto::subscribe_safekeeper_info_request::SubscriptionKey as ProtoSubscriptionKey;
13 : use storage_broker::proto::FilterTenantTimelineId;
14 : use storage_broker::proto::MessageType;
15 : use storage_broker::proto::SafekeeperDiscoveryResponse;
16 : use storage_broker::proto::SubscribeByFilterRequest;
17 : use storage_broker::proto::SubscribeSafekeeperInfoRequest;
18 : use storage_broker::proto::TypeSubscription;
19 : use storage_broker::proto::TypedMessage;
20 : use storage_broker::Request;
21 :
22 : use std::sync::atomic::AtomicU64;
23 : use std::sync::Arc;
24 : use std::time::Duration;
25 : use std::time::Instant;
26 : use std::time::UNIX_EPOCH;
27 : use tokio::task::JoinHandle;
28 : use tokio::time::sleep;
29 : use tracing::*;
30 :
31 : use crate::metrics::BROKER_ITERATION_TIMELINES;
32 : use crate::metrics::BROKER_PULLED_UPDATES;
33 : use crate::metrics::BROKER_PUSHED_UPDATES;
34 : use crate::metrics::BROKER_PUSH_ALL_UPDATES_SECONDS;
35 : use crate::GlobalTimelines;
36 : use crate::SafeKeeperConf;
37 :
38 : const RETRY_INTERVAL_MSEC: u64 = 1000;
39 : const PUSH_INTERVAL_MSEC: u64 = 1000;
40 :
41 : /// Push once in a while data about all active timelines to the broker.
42 0 : async fn push_loop(
43 0 : conf: Arc<SafeKeeperConf>,
44 0 : global_timelines: Arc<GlobalTimelines>,
45 0 : ) -> anyhow::Result<()> {
46 0 : if conf.disable_periodic_broker_push {
47 0 : info!("broker push_loop is disabled, doing nothing...");
48 0 : futures::future::pending::<()>().await; // sleep forever
49 0 : return Ok(());
50 0 : }
51 0 :
52 0 : let active_timelines_set = global_timelines.get_global_broker_active_set();
53 :
54 0 : let mut client =
55 0 : storage_broker::connect(conf.broker_endpoint.clone(), conf.broker_keepalive_interval)?;
56 0 : let push_interval = Duration::from_millis(PUSH_INTERVAL_MSEC);
57 0 :
58 0 : let outbound = async_stream::stream! {
59 0 : loop {
60 0 : // Note: we lock runtime here and in timeline methods as GlobalTimelines
61 0 : // is under plain mutex. That's ok, all this code is not performance
62 0 : // sensitive and there is no risk of deadlock as we don't await while
63 0 : // lock is held.
64 0 : let now = Instant::now();
65 0 : let all_tlis = active_timelines_set.get_all();
66 0 : let mut n_pushed_tlis = 0;
67 0 : for tli in &all_tlis {
68 0 : let sk_info = tli.get_safekeeper_info(&conf).await;
69 0 : yield sk_info;
70 0 : BROKER_PUSHED_UPDATES.inc();
71 0 : n_pushed_tlis += 1;
72 0 : }
73 0 : let elapsed = now.elapsed();
74 0 :
75 0 : BROKER_PUSH_ALL_UPDATES_SECONDS.observe(elapsed.as_secs_f64());
76 0 : BROKER_ITERATION_TIMELINES.observe(n_pushed_tlis as f64);
77 0 :
78 0 : if elapsed > push_interval / 2 {
79 0 : info!("broker push is too long, pushed {} timeline updates to broker in {:?}", n_pushed_tlis, elapsed);
80 0 : }
81 0 :
82 0 : sleep(push_interval).await;
83 0 : }
84 0 : };
85 0 : client
86 0 : .publish_safekeeper_info(Request::new(outbound))
87 0 : .await?;
88 0 : Ok(())
89 0 : }
90 :
91 : /// Subscribe and fetch all the interesting data from the broker.
92 0 : #[instrument(name = "broker_pull", skip_all)]
93 : async fn pull_loop(
94 : conf: Arc<SafeKeeperConf>,
95 : global_timelines: Arc<GlobalTimelines>,
96 : stats: Arc<BrokerStats>,
97 : ) -> Result<()> {
98 : let mut client =
99 : storage_broker::connect(conf.broker_endpoint.clone(), conf.broker_keepalive_interval)?;
100 :
101 : // TODO: subscribe only to local timelines instead of all
102 : let request = SubscribeSafekeeperInfoRequest {
103 : subscription_key: Some(ProtoSubscriptionKey::All(())),
104 : };
105 :
106 : let mut stream = client
107 : .subscribe_safekeeper_info(request)
108 : .await
109 : .context("subscribe_safekeper_info request failed")?
110 : .into_inner();
111 :
112 : let ok_counter = BROKER_PULLED_UPDATES.with_label_values(&["ok"]);
113 : let not_found = BROKER_PULLED_UPDATES.with_label_values(&["not_found"]);
114 : let err_counter = BROKER_PULLED_UPDATES.with_label_values(&["error"]);
115 :
116 : while let Some(msg) = stream.message().await? {
117 : stats.update_pulled();
118 :
119 : let proto_ttid = msg
120 : .tenant_timeline_id
121 : .as_ref()
122 0 : .ok_or_else(|| anyhow!("missing tenant_timeline_id"))?;
123 : let ttid = parse_proto_ttid(proto_ttid)?;
124 : if let Ok(tli) = global_timelines.get(ttid) {
125 : // Note that we also receive *our own* info. That's
126 : // important, as it is used as an indication of live
127 : // connection to the broker.
128 :
129 : // note: there are blocking operations below, but it's considered fine for now
130 : let res = tli.record_safekeeper_info(msg).await;
131 : if res.is_ok() {
132 : ok_counter.inc();
133 : } else {
134 : err_counter.inc();
135 : }
136 : res?;
137 : } else {
138 : not_found.inc();
139 : }
140 : }
141 : bail!("end of stream");
142 : }
143 :
144 : /// Process incoming discover requests. This is done in a separate task to avoid
145 : /// interfering with the normal pull/push loops.
146 0 : async fn discover_loop(
147 0 : conf: Arc<SafeKeeperConf>,
148 0 : global_timelines: Arc<GlobalTimelines>,
149 0 : stats: Arc<BrokerStats>,
150 0 : ) -> Result<()> {
151 0 : let mut client =
152 0 : storage_broker::connect(conf.broker_endpoint.clone(), conf.broker_keepalive_interval)?;
153 :
154 0 : let request = SubscribeByFilterRequest {
155 0 : types: vec![TypeSubscription {
156 0 : r#type: MessageType::SafekeeperDiscoveryRequest as i32,
157 0 : }],
158 0 : tenant_timeline_id: Some(FilterTenantTimelineId {
159 0 : enabled: false,
160 0 : tenant_timeline_id: None,
161 0 : }),
162 0 : };
163 :
164 0 : let mut stream = client
165 0 : .subscribe_by_filter(request)
166 0 : .await
167 0 : .context("subscribe_by_filter request failed")?
168 0 : .into_inner();
169 0 :
170 0 : let discover_counter = BROKER_PULLED_UPDATES.with_label_values(&["discover"]);
171 :
172 0 : while let Some(typed_msg) = stream.message().await? {
173 0 : stats.update_pulled();
174 0 :
175 0 : match typed_msg.r#type() {
176 : MessageType::SafekeeperDiscoveryRequest => {
177 0 : let msg = typed_msg
178 0 : .safekeeper_discovery_request
179 0 : .expect("proto type mismatch from broker message");
180 :
181 0 : let proto_ttid = msg
182 0 : .tenant_timeline_id
183 0 : .as_ref()
184 0 : .ok_or_else(|| anyhow!("missing tenant_timeline_id"))?;
185 0 : let ttid = parse_proto_ttid(proto_ttid)?;
186 0 : if let Ok(tli) = global_timelines.get(ttid) {
187 : // we received a discovery request for a timeline we know about
188 0 : discover_counter.inc();
189 :
190 : // create and reply with discovery response
191 0 : let sk_info = tli.get_safekeeper_info(&conf).await;
192 0 : let response = SafekeeperDiscoveryResponse {
193 0 : safekeeper_id: sk_info.safekeeper_id,
194 0 : tenant_timeline_id: sk_info.tenant_timeline_id,
195 0 : commit_lsn: sk_info.commit_lsn,
196 0 : safekeeper_connstr: sk_info.safekeeper_connstr,
197 0 : availability_zone: sk_info.availability_zone,
198 0 : standby_horizon: 0,
199 0 : };
200 0 :
201 0 : // note this is a blocking call
202 0 : client
203 0 : .publish_one(TypedMessage {
204 0 : r#type: MessageType::SafekeeperDiscoveryResponse as i32,
205 0 : safekeeper_timeline_info: None,
206 0 : safekeeper_discovery_request: None,
207 0 : safekeeper_discovery_response: Some(response),
208 0 : })
209 0 : .await?;
210 0 : }
211 : }
212 :
213 : _ => {
214 0 : warn!(
215 0 : "unexpected message type i32 {}, {:?}",
216 0 : typed_msg.r#type,
217 0 : typed_msg.r#type()
218 : );
219 : }
220 : }
221 : }
222 0 : bail!("end of stream");
223 0 : }
224 :
225 0 : pub async fn task_main(
226 0 : conf: Arc<SafeKeeperConf>,
227 0 : global_timelines: Arc<GlobalTimelines>,
228 0 : ) -> anyhow::Result<()> {
229 0 : info!("started, broker endpoint {:?}", conf.broker_endpoint);
230 :
231 0 : let mut ticker = tokio::time::interval(Duration::from_millis(RETRY_INTERVAL_MSEC));
232 0 : let mut push_handle: Option<JoinHandle<Result<(), Error>>> = None;
233 0 : let mut pull_handle: Option<JoinHandle<Result<(), Error>>> = None;
234 0 : let mut discover_handle: Option<JoinHandle<Result<(), Error>>> = None;
235 0 :
236 0 : let stats = Arc::new(BrokerStats::new());
237 0 : let stats_task = task_stats(stats.clone());
238 0 : tokio::pin!(stats_task);
239 :
240 : // Selecting on JoinHandles requires some squats; is there a better way to
241 : // reap tasks individually?
242 :
243 : // Handling failures in task itself won't catch panic and in Tokio, task's
244 : // panic doesn't kill the whole executor, so it is better to do reaping
245 : // here.
246 : loop {
247 0 : tokio::select! {
248 0 : res = async { push_handle.as_mut().unwrap().await }, if push_handle.is_some() => {
249 : // was it panic or normal error?
250 0 : let err = match res {
251 0 : Ok(res_internal) => res_internal.unwrap_err(),
252 0 : Err(err_outer) => err_outer.into(),
253 : };
254 0 : warn!("push task failed: {:?}", err);
255 0 : push_handle = None;
256 : },
257 0 : res = async { pull_handle.as_mut().unwrap().await }, if pull_handle.is_some() => {
258 : // was it panic or normal error?
259 0 : match res {
260 0 : Ok(res_internal) => if let Err(err_inner) = res_internal {
261 0 : warn!("pull task failed: {:?}", err_inner);
262 0 : }
263 0 : Err(err_outer) => { warn!("pull task panicked: {:?}", err_outer) }
264 : };
265 0 : pull_handle = None;
266 : },
267 0 : res = async { discover_handle.as_mut().unwrap().await }, if discover_handle.is_some() => {
268 : // was it panic or normal error?
269 0 : match res {
270 0 : Ok(res_internal) => if let Err(err_inner) = res_internal {
271 0 : warn!("discover task failed: {:?}", err_inner);
272 0 : }
273 0 : Err(err_outer) => { warn!("discover task panicked: {:?}", err_outer) }
274 : };
275 0 : discover_handle = None;
276 : },
277 0 : _ = ticker.tick() => {
278 0 : if push_handle.is_none() {
279 0 : push_handle = Some(tokio::spawn(push_loop(conf.clone(), global_timelines.clone())));
280 0 : }
281 0 : if pull_handle.is_none() {
282 0 : pull_handle = Some(tokio::spawn(pull_loop(conf.clone(), global_timelines.clone(), stats.clone())));
283 0 : }
284 0 : if discover_handle.is_none() {
285 0 : discover_handle = Some(tokio::spawn(discover_loop(conf.clone(), global_timelines.clone(), stats.clone())));
286 0 : }
287 : },
288 0 : _ = &mut stats_task => {}
289 : }
290 : }
291 : }
292 :
293 : struct BrokerStats {
294 : /// Timestamp of the last received message from the broker.
295 : last_pulled_ts: AtomicU64,
296 : }
297 :
298 : impl BrokerStats {
299 0 : fn new() -> Self {
300 0 : BrokerStats {
301 0 : last_pulled_ts: AtomicU64::new(0),
302 0 : }
303 0 : }
304 :
305 0 : fn now_millis() -> u64 {
306 0 : std::time::SystemTime::now()
307 0 : .duration_since(UNIX_EPOCH)
308 0 : .expect("time is before epoch")
309 0 : .as_millis() as u64
310 0 : }
311 :
312 : /// Update last_pulled timestamp to current time.
313 0 : fn update_pulled(&self) {
314 0 : self.last_pulled_ts
315 0 : .store(Self::now_millis(), std::sync::atomic::Ordering::Relaxed);
316 0 : }
317 : }
318 :
319 : /// Periodically write to logs if there are issues with receiving data from the broker.
320 0 : async fn task_stats(stats: Arc<BrokerStats>) {
321 0 : let warn_duration = Duration::from_secs(10);
322 0 : let mut ticker = tokio::time::interval(warn_duration);
323 :
324 : loop {
325 0 : tokio::select! {
326 0 : _ = ticker.tick() => {
327 0 : let last_pulled = stats.last_pulled_ts.load(std::sync::atomic::Ordering::SeqCst);
328 0 : if last_pulled == 0 {
329 : // no broker updates yet
330 0 : continue;
331 0 : }
332 0 :
333 0 : let now = BrokerStats::now_millis();
334 0 : if now > last_pulled && now - last_pulled > warn_duration.as_millis() as u64 {
335 0 : let ts = chrono::DateTime::from_timestamp_millis(last_pulled as i64).expect("invalid timestamp");
336 0 : info!("no broker updates for some time, last update: {:?}", ts);
337 0 : }
338 : }
339 : }
340 : }
341 : }
|