Line data Source code
1 : //! Communication with the broker, providing safekeeper peers and pageserver coordination.
2 :
3 : use anyhow::anyhow;
4 : use anyhow::bail;
5 : use anyhow::Context;
6 :
7 : use anyhow::Error;
8 : use anyhow::Result;
9 :
10 : use storage_broker::parse_proto_ttid;
11 :
12 : use storage_broker::proto::subscribe_safekeeper_info_request::SubscriptionKey as ProtoSubscriptionKey;
13 : use storage_broker::proto::FilterTenantTimelineId;
14 : use storage_broker::proto::MessageType;
15 : use storage_broker::proto::SafekeeperDiscoveryResponse;
16 : use storage_broker::proto::SubscribeByFilterRequest;
17 : use storage_broker::proto::SubscribeSafekeeperInfoRequest;
18 : use storage_broker::proto::TypeSubscription;
19 : use storage_broker::proto::TypedMessage;
20 : use storage_broker::Request;
21 :
22 : use std::sync::atomic::AtomicU64;
23 : use std::sync::Arc;
24 : use std::time::Duration;
25 : use std::time::Instant;
26 : use std::time::UNIX_EPOCH;
27 : use tokio::task::JoinHandle;
28 : use tokio::time::sleep;
29 : use tracing::*;
30 :
31 : use crate::metrics::BROKER_ITERATION_TIMELINES;
32 : use crate::metrics::BROKER_PULLED_UPDATES;
33 : use crate::metrics::BROKER_PUSHED_UPDATES;
34 : use crate::metrics::BROKER_PUSH_ALL_UPDATES_SECONDS;
35 : use crate::GlobalTimelines;
36 : use crate::SafeKeeperConf;
37 :
38 : const RETRY_INTERVAL_MSEC: u64 = 1000;
39 : const PUSH_INTERVAL_MSEC: u64 = 1000;
40 :
41 : /// Push once in a while data about all active timelines to the broker.
42 0 : async fn push_loop(conf: SafeKeeperConf) -> anyhow::Result<()> {
43 0 : if conf.disable_periodic_broker_push {
44 0 : info!("broker push_loop is disabled, doing nothing...");
45 0 : futures::future::pending::<()>().await; // sleep forever
46 0 : return Ok(());
47 0 : }
48 0 :
49 0 : let active_timelines_set = GlobalTimelines::get_global_broker_active_set();
50 :
51 0 : let mut client =
52 0 : storage_broker::connect(conf.broker_endpoint.clone(), conf.broker_keepalive_interval)?;
53 0 : let push_interval = Duration::from_millis(PUSH_INTERVAL_MSEC);
54 0 :
55 0 : let outbound = async_stream::stream! {
56 0 : loop {
57 0 : // Note: we lock runtime here and in timeline methods as GlobalTimelines
58 0 : // is under plain mutex. That's ok, all this code is not performance
59 0 : // sensitive and there is no risk of deadlock as we don't await while
60 0 : // lock is held.
61 0 : let now = Instant::now();
62 0 : let all_tlis = active_timelines_set.get_all();
63 0 : let mut n_pushed_tlis = 0;
64 0 : for tli in &all_tlis {
65 0 : let sk_info = tli.get_safekeeper_info(&conf).await;
66 0 : yield sk_info;
67 0 : BROKER_PUSHED_UPDATES.inc();
68 0 : n_pushed_tlis += 1;
69 0 : }
70 0 : let elapsed = now.elapsed();
71 0 :
72 0 : BROKER_PUSH_ALL_UPDATES_SECONDS.observe(elapsed.as_secs_f64());
73 0 : BROKER_ITERATION_TIMELINES.observe(n_pushed_tlis as f64);
74 0 :
75 0 : if elapsed > push_interval / 2 {
76 0 : info!("broker push is too long, pushed {} timeline updates to broker in {:?}", n_pushed_tlis, elapsed);
77 0 : }
78 0 :
79 0 : sleep(push_interval).await;
80 0 : }
81 0 : };
82 0 : client
83 0 : .publish_safekeeper_info(Request::new(outbound))
84 0 : .await?;
85 0 : Ok(())
86 0 : }
87 :
88 : /// Subscribe and fetch all the interesting data from the broker.
89 0 : #[instrument(name = "broker_pull", skip_all)]
90 : async fn pull_loop(conf: SafeKeeperConf, stats: Arc<BrokerStats>) -> Result<()> {
91 : let mut client = storage_broker::connect(conf.broker_endpoint, conf.broker_keepalive_interval)?;
92 :
93 : // TODO: subscribe only to local timelines instead of all
94 : let request = SubscribeSafekeeperInfoRequest {
95 : subscription_key: Some(ProtoSubscriptionKey::All(())),
96 : };
97 :
98 : let mut stream = client
99 : .subscribe_safekeeper_info(request)
100 : .await
101 : .context("subscribe_safekeper_info request failed")?
102 : .into_inner();
103 :
104 : let ok_counter = BROKER_PULLED_UPDATES.with_label_values(&["ok"]);
105 : let not_found = BROKER_PULLED_UPDATES.with_label_values(&["not_found"]);
106 : let err_counter = BROKER_PULLED_UPDATES.with_label_values(&["error"]);
107 :
108 : while let Some(msg) = stream.message().await? {
109 : stats.update_pulled();
110 :
111 : let proto_ttid = msg
112 : .tenant_timeline_id
113 : .as_ref()
114 0 : .ok_or_else(|| anyhow!("missing tenant_timeline_id"))?;
115 : let ttid = parse_proto_ttid(proto_ttid)?;
116 : if let Ok(tli) = GlobalTimelines::get(ttid) {
117 : // Note that we also receive *our own* info. That's
118 : // important, as it is used as an indication of live
119 : // connection to the broker.
120 :
121 : // note: there are blocking operations below, but it's considered fine for now
122 : let res = tli.record_safekeeper_info(msg).await;
123 : if res.is_ok() {
124 : ok_counter.inc();
125 : } else {
126 : err_counter.inc();
127 : }
128 : res?;
129 : } else {
130 : not_found.inc();
131 : }
132 : }
133 : bail!("end of stream");
134 : }
135 :
136 : /// Process incoming discover requests. This is done in a separate task to avoid
137 : /// interfering with the normal pull/push loops.
138 0 : async fn discover_loop(conf: SafeKeeperConf, stats: Arc<BrokerStats>) -> Result<()> {
139 0 : let mut client =
140 0 : storage_broker::connect(conf.broker_endpoint.clone(), conf.broker_keepalive_interval)?;
141 :
142 0 : let request = SubscribeByFilterRequest {
143 0 : types: vec![TypeSubscription {
144 0 : r#type: MessageType::SafekeeperDiscoveryRequest as i32,
145 0 : }],
146 0 : tenant_timeline_id: Some(FilterTenantTimelineId {
147 0 : enabled: false,
148 0 : tenant_timeline_id: None,
149 0 : }),
150 0 : };
151 :
152 0 : let mut stream = client
153 0 : .subscribe_by_filter(request)
154 0 : .await
155 0 : .context("subscribe_by_filter request failed")?
156 0 : .into_inner();
157 0 :
158 0 : let discover_counter = BROKER_PULLED_UPDATES.with_label_values(&["discover"]);
159 :
160 0 : while let Some(typed_msg) = stream.message().await? {
161 0 : stats.update_pulled();
162 0 :
163 0 : match typed_msg.r#type() {
164 : MessageType::SafekeeperDiscoveryRequest => {
165 0 : let msg = typed_msg
166 0 : .safekeeper_discovery_request
167 0 : .expect("proto type mismatch from broker message");
168 :
169 0 : let proto_ttid = msg
170 0 : .tenant_timeline_id
171 0 : .as_ref()
172 0 : .ok_or_else(|| anyhow!("missing tenant_timeline_id"))?;
173 0 : let ttid = parse_proto_ttid(proto_ttid)?;
174 0 : if let Ok(tli) = GlobalTimelines::get(ttid) {
175 : // we received a discovery request for a timeline we know about
176 0 : discover_counter.inc();
177 :
178 : // create and reply with discovery response
179 0 : let sk_info = tli.get_safekeeper_info(&conf).await;
180 0 : let response = SafekeeperDiscoveryResponse {
181 0 : safekeeper_id: sk_info.safekeeper_id,
182 0 : tenant_timeline_id: sk_info.tenant_timeline_id,
183 0 : commit_lsn: sk_info.commit_lsn,
184 0 : safekeeper_connstr: sk_info.safekeeper_connstr,
185 0 : availability_zone: sk_info.availability_zone,
186 0 : standby_horizon: 0,
187 0 : };
188 0 :
189 0 : // note this is a blocking call
190 0 : client
191 0 : .publish_one(TypedMessage {
192 0 : r#type: MessageType::SafekeeperDiscoveryResponse as i32,
193 0 : safekeeper_timeline_info: None,
194 0 : safekeeper_discovery_request: None,
195 0 : safekeeper_discovery_response: Some(response),
196 0 : })
197 0 : .await?;
198 0 : }
199 : }
200 :
201 : _ => {
202 0 : warn!(
203 0 : "unexpected message type i32 {}, {:?}",
204 0 : typed_msg.r#type,
205 0 : typed_msg.r#type()
206 : );
207 : }
208 : }
209 : }
210 0 : bail!("end of stream");
211 0 : }
212 :
213 0 : pub async fn task_main(conf: SafeKeeperConf) -> anyhow::Result<()> {
214 0 : info!("started, broker endpoint {:?}", conf.broker_endpoint);
215 :
216 0 : let mut ticker = tokio::time::interval(Duration::from_millis(RETRY_INTERVAL_MSEC));
217 0 : let mut push_handle: Option<JoinHandle<Result<(), Error>>> = None;
218 0 : let mut pull_handle: Option<JoinHandle<Result<(), Error>>> = None;
219 0 : let mut discover_handle: Option<JoinHandle<Result<(), Error>>> = None;
220 0 :
221 0 : let stats = Arc::new(BrokerStats::new());
222 0 : let stats_task = task_stats(stats.clone());
223 0 : tokio::pin!(stats_task);
224 :
225 : // Selecting on JoinHandles requires some squats; is there a better way to
226 : // reap tasks individually?
227 :
228 : // Handling failures in task itself won't catch panic and in Tokio, task's
229 : // panic doesn't kill the whole executor, so it is better to do reaping
230 : // here.
231 : loop {
232 0 : tokio::select! {
233 0 : res = async { push_handle.as_mut().unwrap().await }, if push_handle.is_some() => {
234 : // was it panic or normal error?
235 0 : let err = match res {
236 0 : Ok(res_internal) => res_internal.unwrap_err(),
237 0 : Err(err_outer) => err_outer.into(),
238 : };
239 0 : warn!("push task failed: {:?}", err);
240 0 : push_handle = None;
241 : },
242 0 : res = async { pull_handle.as_mut().unwrap().await }, if pull_handle.is_some() => {
243 : // was it panic or normal error?
244 0 : match res {
245 0 : Ok(res_internal) => if let Err(err_inner) = res_internal {
246 0 : warn!("pull task failed: {:?}", err_inner);
247 0 : }
248 0 : Err(err_outer) => { warn!("pull task panicked: {:?}", err_outer) }
249 : };
250 0 : pull_handle = None;
251 : },
252 0 : res = async { discover_handle.as_mut().unwrap().await }, if discover_handle.is_some() => {
253 : // was it panic or normal error?
254 0 : match res {
255 0 : Ok(res_internal) => if let Err(err_inner) = res_internal {
256 0 : warn!("discover task failed: {:?}", err_inner);
257 0 : }
258 0 : Err(err_outer) => { warn!("discover task panicked: {:?}", err_outer) }
259 : };
260 0 : discover_handle = None;
261 : },
262 0 : _ = ticker.tick() => {
263 0 : if push_handle.is_none() {
264 0 : push_handle = Some(tokio::spawn(push_loop(conf.clone())));
265 0 : }
266 0 : if pull_handle.is_none() {
267 0 : pull_handle = Some(tokio::spawn(pull_loop(conf.clone(), stats.clone())));
268 0 : }
269 0 : if discover_handle.is_none() {
270 0 : discover_handle = Some(tokio::spawn(discover_loop(conf.clone(), stats.clone())));
271 0 : }
272 : },
273 0 : _ = &mut stats_task => {}
274 : }
275 : }
276 : }
277 :
278 : struct BrokerStats {
279 : /// Timestamp of the last received message from the broker.
280 : last_pulled_ts: AtomicU64,
281 : }
282 :
283 : impl BrokerStats {
284 0 : fn new() -> Self {
285 0 : BrokerStats {
286 0 : last_pulled_ts: AtomicU64::new(0),
287 0 : }
288 0 : }
289 :
290 0 : fn now_millis() -> u64 {
291 0 : std::time::SystemTime::now()
292 0 : .duration_since(UNIX_EPOCH)
293 0 : .expect("time is before epoch")
294 0 : .as_millis() as u64
295 0 : }
296 :
297 : /// Update last_pulled timestamp to current time.
298 0 : fn update_pulled(&self) {
299 0 : self.last_pulled_ts
300 0 : .store(Self::now_millis(), std::sync::atomic::Ordering::Relaxed);
301 0 : }
302 : }
303 :
304 : /// Periodically write to logs if there are issues with receiving data from the broker.
305 0 : async fn task_stats(stats: Arc<BrokerStats>) {
306 0 : let warn_duration = Duration::from_secs(10);
307 0 : let mut ticker = tokio::time::interval(warn_duration);
308 :
309 : loop {
310 0 : tokio::select! {
311 0 : _ = ticker.tick() => {
312 0 : let last_pulled = stats.last_pulled_ts.load(std::sync::atomic::Ordering::SeqCst);
313 0 : if last_pulled == 0 {
314 : // no broker updates yet
315 0 : continue;
316 0 : }
317 0 :
318 0 : let now = BrokerStats::now_millis();
319 0 : if now > last_pulled && now - last_pulled > warn_duration.as_millis() as u64 {
320 0 : let ts = chrono::DateTime::from_timestamp_millis(last_pulled as i64).expect("invalid timestamp");
321 0 : info!("no broker updates for some time, last update: {:?}", ts);
322 0 : }
323 : }
324 : }
325 : }
326 : }
|