Line data Source code
1 : use std::error::Error as _;
2 : use std::time::SystemTime;
3 :
4 : use chrono::{DateTime, Utc};
5 : use consumption_metrics::{Event, EventChunk, IdempotencyKey, CHUNK_SIZE};
6 : use remote_storage::{GenericRemoteStorage, RemotePath};
7 : use tokio::io::AsyncWriteExt;
8 : use tokio_util::sync::CancellationToken;
9 : use tracing::Instrument;
10 :
11 : use super::{metrics::Name, Cache, MetricsKey, NewRawMetric, RawMetric};
12 : use utils::id::{TenantId, TimelineId};
13 :
14 : /// How the metrics from pageserver are identified.
15 216 : #[derive(serde::Serialize, serde::Deserialize, Debug, Clone, Copy, PartialEq)]
16 : struct Ids {
17 : pub(super) tenant_id: TenantId,
18 : #[serde(skip_serializing_if = "Option::is_none")]
19 : pub(super) timeline_id: Option<TimelineId>,
20 : }
21 :
22 : /// Serialize and write metrics to an HTTP endpoint
23 : #[tracing::instrument(skip_all, fields(metrics_total = %metrics.len()))]
24 : pub(super) async fn upload_metrics_http(
25 : client: &reqwest::Client,
26 : metric_collection_endpoint: &reqwest::Url,
27 : cancel: &CancellationToken,
28 : metrics: &[NewRawMetric],
29 : cached_metrics: &mut Cache,
30 : idempotency_keys: &[IdempotencyKey<'_>],
31 : ) -> anyhow::Result<()> {
32 : let mut uploaded = 0;
33 : let mut failed = 0;
34 :
35 : let started_at = std::time::Instant::now();
36 :
37 : let mut iter = serialize_in_chunks(CHUNK_SIZE, metrics, idempotency_keys);
38 :
39 : while let Some(res) = iter.next() {
40 : let (chunk, body) = res?;
41 :
42 : let event_bytes = body.len();
43 :
44 : let is_last = iter.len() == 0;
45 :
46 : let res = upload(client, metric_collection_endpoint, body, cancel, is_last)
47 : .instrument(tracing::info_span!(
48 : "upload",
49 : %event_bytes,
50 : uploaded,
51 : total = metrics.len(),
52 : ))
53 : .await;
54 :
55 : match res {
56 : Ok(()) => {
57 : for item in chunk {
58 : cached_metrics.insert(item.key, item.clone());
59 : }
60 : uploaded += chunk.len();
61 : }
62 : Err(_) => {
63 : // failure(s) have already been logged
64 : //
65 : // however this is an inconsistency: if we crash here, we will start with the
66 : // values as uploaded. in practice, the rejections no longer happen.
67 : failed += chunk.len();
68 : }
69 : }
70 : }
71 :
72 : let elapsed = started_at.elapsed();
73 :
74 : tracing::info!(
75 : uploaded,
76 : failed,
77 : elapsed_ms = elapsed.as_millis(),
78 : "done sending metrics"
79 : );
80 :
81 : Ok(())
82 : }
83 :
84 : /// Serialize and write metrics to a remote storage object
85 : #[tracing::instrument(skip_all, fields(metrics_total = %metrics.len()))]
86 : pub(super) async fn upload_metrics_bucket(
87 : client: &GenericRemoteStorage,
88 : cancel: &CancellationToken,
89 : node_id: &str,
90 : metrics: &[NewRawMetric],
91 : idempotency_keys: &[IdempotencyKey<'_>],
92 : ) -> anyhow::Result<()> {
93 : if metrics.is_empty() {
94 : // Skip uploads if we have no metrics, so that readers don't have to handle the edge case
95 : // of an empty object.
96 : return Ok(());
97 : }
98 :
99 : // Compose object path
100 : let datetime: DateTime<Utc> = SystemTime::now().into();
101 : let ts_prefix = datetime.format("year=%Y/month=%m/day=%d/%H:%M:%SZ");
102 : let path = RemotePath::from_string(&format!("{ts_prefix}_{node_id}.ndjson.gz"))?;
103 :
104 : // Set up a gzip writer into a buffer
105 : let mut compressed_bytes: Vec<u8> = Vec::new();
106 : let compressed_writer = std::io::Cursor::new(&mut compressed_bytes);
107 : let mut gzip_writer = async_compression::tokio::write::GzipEncoder::new(compressed_writer);
108 :
109 : // Serialize and write into compressed buffer
110 : let started_at = std::time::Instant::now();
111 : for res in serialize_in_chunks(CHUNK_SIZE, metrics, idempotency_keys) {
112 : let (_chunk, body) = res?;
113 : gzip_writer.write_all(&body).await?;
114 : }
115 : gzip_writer.flush().await?;
116 : gzip_writer.shutdown().await?;
117 : let compressed_length = compressed_bytes.len();
118 :
119 : // Write to remote storage
120 : client
121 : .upload_storage_object(
122 : futures::stream::once(futures::future::ready(Ok(compressed_bytes.into()))),
123 : compressed_length,
124 : &path,
125 : cancel,
126 : )
127 : .await?;
128 : let elapsed = started_at.elapsed();
129 :
130 : tracing::info!(
131 : compressed_length,
132 : elapsed_ms = elapsed.as_millis(),
133 : "write metrics bucket at {path}",
134 : );
135 :
136 : Ok(())
137 : }
138 :
139 : /// Serializes the input metrics as JSON in chunks of chunk_size. The provided
140 : /// idempotency keys are injected into the corresponding metric events (reused
141 : /// across different metrics sinks), and must have the same length as input.
142 24 : fn serialize_in_chunks<'a>(
143 24 : chunk_size: usize,
144 24 : input: &'a [NewRawMetric],
145 24 : idempotency_keys: &'a [IdempotencyKey<'a>],
146 24 : ) -> impl ExactSizeIterator<Item = Result<(&'a [NewRawMetric], bytes::Bytes), serde_json::Error>> + 'a
147 24 : {
148 : use bytes::BufMut;
149 :
150 24 : assert_eq!(input.len(), idempotency_keys.len());
151 :
152 : struct Iter<'a> {
153 : inner: std::slice::Chunks<'a, NewRawMetric>,
154 : idempotency_keys: std::slice::Iter<'a, IdempotencyKey<'a>>,
155 : chunk_size: usize,
156 :
157 : // write to a BytesMut so that we can cheaply clone the frozen Bytes for retries
158 : buffer: bytes::BytesMut,
159 : // chunk amount of events are reused to produce the serialized document
160 : scratch: Vec<Event<Ids, Name>>,
161 : }
162 :
163 : impl<'a> Iterator for Iter<'a> {
164 : type Item = Result<(&'a [NewRawMetric], bytes::Bytes), serde_json::Error>;
165 :
166 88 : fn next(&mut self) -> Option<Self::Item> {
167 88 : let chunk = self.inner.next()?;
168 :
169 64 : if self.scratch.is_empty() {
170 24 : // first round: create events with N strings
171 24 : self.scratch.extend(
172 24 : chunk
173 24 : .iter()
174 24 : .zip(&mut self.idempotency_keys)
175 84 : .map(|(raw_metric, key)| raw_metric.as_event(key)),
176 24 : );
177 24 : } else {
178 : // next rounds: update_in_place to reuse allocations
179 40 : assert_eq!(self.scratch.len(), self.chunk_size);
180 40 : itertools::izip!(self.scratch.iter_mut(), chunk, &mut self.idempotency_keys)
181 60 : .for_each(|(slot, raw_metric, key)| raw_metric.update_in_place(slot, key));
182 40 : }
183 :
184 64 : let res = serde_json::to_writer(
185 64 : (&mut self.buffer).writer(),
186 64 : &EventChunk {
187 64 : events: (&self.scratch[..chunk.len()]).into(),
188 64 : },
189 64 : );
190 64 :
191 64 : match res {
192 64 : Ok(()) => Some(Ok((chunk, self.buffer.split().freeze()))),
193 0 : Err(e) => Some(Err(e)),
194 : }
195 88 : }
196 :
197 44 : fn size_hint(&self) -> (usize, Option<usize>) {
198 44 : self.inner.size_hint()
199 44 : }
200 : }
201 :
202 : impl ExactSizeIterator for Iter<'_> {}
203 :
204 24 : let buffer = bytes::BytesMut::new();
205 24 : let inner = input.chunks(chunk_size);
206 24 : let idempotency_keys = idempotency_keys.iter();
207 24 : let scratch = Vec::new();
208 24 :
209 24 : Iter {
210 24 : inner,
211 24 : idempotency_keys,
212 24 : chunk_size,
213 24 : buffer,
214 24 : scratch,
215 24 : }
216 24 : }
217 :
218 : trait RawMetricExt {
219 : fn as_event(&self, key: &IdempotencyKey<'_>) -> Event<Ids, Name>;
220 : fn update_in_place(&self, event: &mut Event<Ids, Name>, key: &IdempotencyKey<'_>);
221 : }
222 :
223 : impl RawMetricExt for RawMetric {
224 0 : fn as_event(&self, key: &IdempotencyKey<'_>) -> Event<Ids, Name> {
225 0 : let MetricsKey {
226 0 : metric,
227 0 : tenant_id,
228 0 : timeline_id,
229 0 : } = self.0;
230 0 :
231 0 : let (kind, value) = self.1;
232 0 :
233 0 : Event {
234 0 : kind,
235 0 : metric,
236 0 : idempotency_key: key.to_string(),
237 0 : value,
238 0 : extra: Ids {
239 0 : tenant_id,
240 0 : timeline_id,
241 0 : },
242 0 : }
243 0 : }
244 :
245 0 : fn update_in_place(&self, event: &mut Event<Ids, Name>, key: &IdempotencyKey<'_>) {
246 : use std::fmt::Write;
247 :
248 : let MetricsKey {
249 0 : metric,
250 0 : tenant_id,
251 0 : timeline_id,
252 0 : } = self.0;
253 0 :
254 0 : let (kind, value) = self.1;
255 0 :
256 0 : *event = Event {
257 0 : kind,
258 0 : metric,
259 0 : idempotency_key: {
260 0 : event.idempotency_key.clear();
261 0 : write!(event.idempotency_key, "{key}").unwrap();
262 0 : std::mem::take(&mut event.idempotency_key)
263 0 : },
264 0 : value,
265 0 : extra: Ids {
266 0 : tenant_id,
267 0 : timeline_id,
268 0 : },
269 0 : };
270 0 : }
271 : }
272 :
273 : impl RawMetricExt for NewRawMetric {
274 84 : fn as_event(&self, key: &IdempotencyKey<'_>) -> Event<Ids, Name> {
275 84 : let MetricsKey {
276 84 : metric,
277 84 : tenant_id,
278 84 : timeline_id,
279 84 : } = self.key;
280 84 :
281 84 : let kind = self.kind;
282 84 : let value = self.value;
283 84 :
284 84 : Event {
285 84 : kind,
286 84 : metric,
287 84 : idempotency_key: key.to_string(),
288 84 : value,
289 84 : extra: Ids {
290 84 : tenant_id,
291 84 : timeline_id,
292 84 : },
293 84 : }
294 84 : }
295 :
296 60 : fn update_in_place(&self, event: &mut Event<Ids, Name>, key: &IdempotencyKey<'_>) {
297 : use std::fmt::Write;
298 :
299 : let MetricsKey {
300 60 : metric,
301 60 : tenant_id,
302 60 : timeline_id,
303 60 : } = self.key;
304 60 :
305 60 : let kind = self.kind;
306 60 : let value = self.value;
307 60 :
308 60 : *event = Event {
309 60 : kind,
310 60 : metric,
311 60 : idempotency_key: {
312 60 : event.idempotency_key.clear();
313 60 : write!(event.idempotency_key, "{key}").unwrap();
314 60 : std::mem::take(&mut event.idempotency_key)
315 60 : },
316 60 : value,
317 60 : extra: Ids {
318 60 : tenant_id,
319 60 : timeline_id,
320 60 : },
321 60 : };
322 60 : }
323 : }
324 :
325 : pub(crate) trait KeyGen<'a> {
326 : fn generate(&self) -> IdempotencyKey<'a>;
327 : }
328 :
329 : impl<'a> KeyGen<'a> for &'a str {
330 0 : fn generate(&self) -> IdempotencyKey<'a> {
331 0 : IdempotencyKey::generate(self)
332 0 : }
333 : }
334 :
335 : enum UploadError {
336 : Rejected(reqwest::StatusCode),
337 : Reqwest(reqwest::Error),
338 : Cancelled,
339 : }
340 :
341 : impl std::fmt::Debug for UploadError {
342 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
343 0 : // use same impl because backoff::retry will log this using both
344 0 : std::fmt::Display::fmt(self, f)
345 0 : }
346 : }
347 :
348 : impl std::fmt::Display for UploadError {
349 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
350 : use UploadError::*;
351 :
352 0 : match self {
353 0 : Rejected(code) => write!(f, "server rejected the metrics with {code}"),
354 0 : Reqwest(e) => write!(
355 0 : f,
356 0 : "request failed: {e}{}",
357 0 : e.source().map(|e| format!(": {e}")).unwrap_or_default()
358 0 : ),
359 0 : Cancelled => write!(f, "cancelled"),
360 : }
361 0 : }
362 : }
363 :
364 : impl UploadError {
365 0 : fn is_reject(&self) -> bool {
366 0 : matches!(self, UploadError::Rejected(_))
367 0 : }
368 : }
369 :
370 : // this is consumed by the test verifiers
371 : static LAST_IN_BATCH: reqwest::header::HeaderName =
372 : reqwest::header::HeaderName::from_static("pageserver-metrics-last-upload-in-batch");
373 :
374 0 : async fn upload(
375 0 : client: &reqwest::Client,
376 0 : metric_collection_endpoint: &reqwest::Url,
377 0 : body: bytes::Bytes,
378 0 : cancel: &CancellationToken,
379 0 : is_last: bool,
380 0 : ) -> Result<(), UploadError> {
381 0 : let warn_after = 3;
382 0 : let max_attempts = 10;
383 :
384 : // this is used only with tests so far
385 0 : let last_value = if is_last { "true" } else { "false" };
386 :
387 0 : let res = utils::backoff::retry(
388 0 : || async {
389 0 : let res = client
390 0 : .post(metric_collection_endpoint.clone())
391 0 : .header(reqwest::header::CONTENT_TYPE, "application/json")
392 0 : .header(LAST_IN_BATCH.clone(), last_value)
393 0 : .body(body.clone())
394 0 : .send()
395 0 : .await;
396 :
397 0 : let res = res.and_then(|res| res.error_for_status());
398 0 :
399 0 : // 10 redirects are normally allowed, so we don't need worry about 3xx
400 0 : match res {
401 0 : Ok(_response) => Ok(()),
402 0 : Err(e) => {
403 0 : let status = e.status().filter(|s| s.is_client_error());
404 0 : if let Some(status) = status {
405 : // rejection used to be a thing when the server could reject a
406 : // whole batch of metrics if one metric was bad.
407 0 : Err(UploadError::Rejected(status))
408 : } else {
409 0 : Err(UploadError::Reqwest(e))
410 : }
411 : }
412 : }
413 0 : },
414 0 : UploadError::is_reject,
415 0 : warn_after,
416 0 : max_attempts,
417 0 : "upload consumption_metrics",
418 0 : cancel,
419 0 : )
420 0 : .await
421 0 : .ok_or_else(|| UploadError::Cancelled)
422 0 : .and_then(|x| x);
423 :
424 0 : match &res {
425 0 : Ok(_) => {}
426 0 : Err(e) if e.is_reject() => {
427 0 : // permanent errors currently do not get logged by backoff::retry
428 0 : // display alternate has no effect, but keeping it here for easier pattern matching.
429 0 : tracing::error!("failed to upload metrics: {e:#}");
430 : }
431 0 : Err(_) => {
432 0 : // these have been logged already
433 0 : }
434 : }
435 :
436 0 : res
437 0 : }
438 :
439 : #[cfg(test)]
440 : mod tests {
441 : use crate::consumption_metrics::{
442 : disk_cache::read_metrics_from_serde_value, NewMetricsRefRoot,
443 : };
444 :
445 : use super::*;
446 : use chrono::{DateTime, Utc};
447 : use once_cell::sync::Lazy;
448 :
449 : #[test]
450 4 : fn chunked_serialization() {
451 4 : let examples = metric_samples();
452 4 : assert!(examples.len() > 1);
453 :
454 4 : let now = Utc::now();
455 4 : let idempotency_keys = (0..examples.len())
456 24 : .map(|i| FixedGen::new(now, "1", i as u16).generate())
457 4 : .collect::<Vec<_>>();
458 :
459 : // need to use Event here because serde_json::Value uses default hashmap, not linked
460 : // hashmap
461 64 : #[derive(serde::Deserialize)]
462 : struct EventChunk {
463 : events: Vec<Event<Ids, Name>>,
464 : }
465 :
466 4 : let correct = serialize_in_chunks(examples.len(), &examples, &idempotency_keys)
467 4 : .map(|res| res.unwrap().1)
468 4 : .flat_map(|body| serde_json::from_slice::<EventChunk>(&body).unwrap().events)
469 4 : .collect::<Vec<_>>();
470 :
471 20 : for chunk_size in 1..examples.len() {
472 20 : let actual = serialize_in_chunks(chunk_size, &examples, &idempotency_keys)
473 60 : .map(|res| res.unwrap().1)
474 60 : .flat_map(|body| serde_json::from_slice::<EventChunk>(&body).unwrap().events)
475 20 : .collect::<Vec<_>>();
476 20 :
477 20 : // if these are equal, it means that multi-chunking version works as well
478 20 : assert_eq!(correct, actual);
479 : }
480 4 : }
481 :
482 : #[derive(Clone, Copy)]
483 : struct FixedGen<'a>(chrono::DateTime<chrono::Utc>, &'a str, u16);
484 :
485 : impl<'a> FixedGen<'a> {
486 24 : fn new(now: chrono::DateTime<chrono::Utc>, node_id: &'a str, nonce: u16) -> Self {
487 24 : FixedGen(now, node_id, nonce)
488 24 : }
489 : }
490 :
491 : impl<'a> KeyGen<'a> for FixedGen<'a> {
492 24 : fn generate(&self) -> IdempotencyKey<'a> {
493 24 : IdempotencyKey::for_tests(self.0, self.1, self.2)
494 24 : }
495 : }
496 :
497 12 : static SAMPLES_NOW: Lazy<DateTime<Utc>> = Lazy::new(|| {
498 12 : DateTime::parse_from_rfc3339("2023-09-15T00:00:00.123456789Z")
499 12 : .unwrap()
500 12 : .into()
501 12 : });
502 :
503 : #[test]
504 4 : fn metric_image_stability() {
505 4 : // it is important that these strings stay as they are
506 4 :
507 4 : let examples = [
508 4 : (
509 4 : line!(),
510 4 : r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"written_size","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000","timeline_id":"ffffffffffffffffffffffffffffffff"}"#,
511 4 : ),
512 4 : (
513 4 : line!(),
514 4 : r#"{"type":"incremental","start_time":"2023-09-14T00:00:00.123456789Z","stop_time":"2023-09-15T00:00:00.123456789Z","metric":"written_data_bytes_delta","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000","timeline_id":"ffffffffffffffffffffffffffffffff"}"#,
515 4 : ),
516 4 : (
517 4 : line!(),
518 4 : r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"timeline_logical_size","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000","timeline_id":"ffffffffffffffffffffffffffffffff"}"#,
519 4 : ),
520 4 : (
521 4 : line!(),
522 4 : r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"remote_storage_size","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000"}"#,
523 4 : ),
524 4 : (
525 4 : line!(),
526 4 : r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"resident_size","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":0,"tenant_id":"00000000000000000000000000000000"}"#,
527 4 : ),
528 4 : (
529 4 : line!(),
530 4 : r#"{"type":"absolute","time":"2023-09-15T00:00:00.123456789Z","metric":"synthetic_storage_size","idempotency_key":"2023-09-15 00:00:00.123456789 UTC-1-0000","value":1,"tenant_id":"00000000000000000000000000000000"}"#,
531 4 : ),
532 4 : ];
533 4 :
534 4 : let idempotency_key = consumption_metrics::IdempotencyKey::for_tests(*SAMPLES_NOW, "1", 0);
535 4 : let examples = examples.into_iter().zip(metric_samples());
536 :
537 28 : for ((line, expected), item) in examples {
538 24 : let e = consumption_metrics::Event {
539 24 : kind: item.kind,
540 24 : metric: item.key.metric,
541 24 : idempotency_key: idempotency_key.to_string(),
542 24 : value: item.value,
543 24 : extra: Ids {
544 24 : tenant_id: item.key.tenant_id,
545 24 : timeline_id: item.key.timeline_id,
546 24 : },
547 24 : };
548 24 : let actual = serde_json::to_string(&e).unwrap();
549 24 : assert_eq!(
550 : expected, actual,
551 0 : "example for {:?} from line {line}",
552 : item.kind
553 : );
554 : }
555 4 : }
556 :
557 : #[test]
558 4 : fn disk_format_upgrade() {
559 4 : let old_samples_json = serde_json::to_value(metric_samples_old()).unwrap();
560 4 : let new_samples =
561 4 : serde_json::to_value(NewMetricsRefRoot::new(metric_samples().as_ref())).unwrap();
562 4 : let upgraded_samples = read_metrics_from_serde_value(old_samples_json).unwrap();
563 4 : let new_samples = read_metrics_from_serde_value(new_samples).unwrap();
564 4 : assert_eq!(upgraded_samples, new_samples);
565 4 : }
566 :
567 4 : fn metric_samples_old() -> [RawMetric; 6] {
568 4 : let tenant_id = TenantId::from_array([0; 16]);
569 4 : let timeline_id = TimelineId::from_array([0xff; 16]);
570 4 :
571 4 : let before = DateTime::parse_from_rfc3339("2023-09-14T00:00:00.123456789Z")
572 4 : .unwrap()
573 4 : .into();
574 4 : let [now, before] = [*SAMPLES_NOW, before];
575 4 :
576 4 : super::super::metrics::metric_examples_old(tenant_id, timeline_id, now, before)
577 4 : }
578 :
579 12 : fn metric_samples() -> [NewRawMetric; 6] {
580 12 : let tenant_id = TenantId::from_array([0; 16]);
581 12 : let timeline_id = TimelineId::from_array([0xff; 16]);
582 12 :
583 12 : let before = DateTime::parse_from_rfc3339("2023-09-14T00:00:00.123456789Z")
584 12 : .unwrap()
585 12 : .into();
586 12 : let [now, before] = [*SAMPLES_NOW, before];
587 12 :
588 12 : super::super::metrics::metric_examples(tenant_id, timeline_id, now, before)
589 12 : }
590 : }
|