LCOV - code coverage report
Current view: top level - proxy/src/rate_limiter - limiter.rs (source / functions) Coverage Total Hit
Test: b837401fb09d2d9818b70e630fdb67e9799b7b0d.info Lines: 84.5 % 219 185
Test Date: 2024-04-18 15:32:49 Functions: 65.5 % 55 36

            Line data    Source code
       1              : use std::{
       2              :     borrow::Cow,
       3              :     collections::hash_map::RandomState,
       4              :     hash::{BuildHasher, Hash},
       5              :     sync::{
       6              :         atomic::{AtomicUsize, Ordering},
       7              :         Mutex,
       8              :     },
       9              : };
      10              : 
      11              : use anyhow::bail;
      12              : use dashmap::DashMap;
      13              : use itertools::Itertools;
      14              : use rand::{rngs::StdRng, Rng, SeedableRng};
      15              : use tokio::time::{Duration, Instant};
      16              : use tracing::info;
      17              : 
      18              : use crate::intern::EndpointIdInt;
      19              : 
      20              : pub struct GlobalRateLimiter {
      21              :     data: Vec<RateBucket>,
      22              :     info: Vec<RateBucketInfo>,
      23              : }
      24              : 
      25              : impl GlobalRateLimiter {
      26            0 :     pub fn new(info: Vec<RateBucketInfo>) -> Self {
      27            0 :         Self {
      28            0 :             data: vec![
      29            0 :                 RateBucket {
      30            0 :                     start: Instant::now(),
      31            0 :                     count: 0,
      32            0 :                 };
      33            0 :                 info.len()
      34            0 :             ],
      35            0 :             info,
      36            0 :         }
      37            0 :     }
      38              : 
      39              :     /// Check that number of connections is below `max_rps` rps.
      40            0 :     pub fn check(&mut self) -> bool {
      41            0 :         let now = Instant::now();
      42            0 : 
      43            0 :         let should_allow_request = self
      44            0 :             .data
      45            0 :             .iter_mut()
      46            0 :             .zip(&self.info)
      47            0 :             .all(|(bucket, info)| bucket.should_allow_request(info, now, 1));
      48            0 : 
      49            0 :         if should_allow_request {
      50            0 :             // only increment the bucket counts if the request will actually be accepted
      51            0 :             self.data.iter_mut().for_each(|b| b.inc(1));
      52            0 :         }
      53              : 
      54            0 :         should_allow_request
      55            0 :     }
      56              : }
      57              : 
      58              : // Simple per-endpoint rate limiter.
      59              : //
      60              : // Check that number of connections to the endpoint is below `max_rps` rps.
      61              : // Purposefully ignore user name and database name as clients can reconnect
      62              : // with different names, so we'll end up sending some http requests to
      63              : // the control plane.
      64              : pub type EndpointRateLimiter = BucketRateLimiter<EndpointIdInt, StdRng, RandomState>;
      65              : 
      66              : pub struct BucketRateLimiter<Key, Rand = StdRng, Hasher = RandomState> {
      67              :     map: DashMap<Key, Vec<RateBucket>, Hasher>,
      68              :     info: Cow<'static, [RateBucketInfo]>,
      69              :     access_count: AtomicUsize,
      70              :     rand: Mutex<Rand>,
      71              : }
      72              : 
      73              : #[derive(Clone, Copy)]
      74              : struct RateBucket {
      75              :     start: Instant,
      76              :     count: u32,
      77              : }
      78              : 
      79              : impl RateBucket {
      80      6001830 :     fn should_allow_request(&mut self, info: &RateBucketInfo, now: Instant, n: u32) -> bool {
      81      6001830 :         if now - self.start < info.interval {
      82      6001814 :             self.count + n <= info.max_rpi
      83              :         } else {
      84              :             // bucket expired, reset
      85           16 :             self.count = 0;
      86           16 :             self.start = now;
      87           16 : 
      88           16 :             true
      89              :         }
      90      6001830 :     }
      91              : 
      92      6001818 :     fn inc(&mut self, n: u32) {
      93      6001818 :         self.count += n;
      94      6001818 :     }
      95              : }
      96              : 
      97              : #[derive(Clone, Copy, PartialEq)]
      98              : pub struct RateBucketInfo {
      99              :     pub interval: Duration,
     100              :     // requests per interval
     101              :     pub max_rpi: u32,
     102              : }
     103              : 
     104              : impl std::fmt::Display for RateBucketInfo {
     105           32 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     106           32 :         let rps = (self.max_rpi as u64) * 1000 / self.interval.as_millis() as u64;
     107           32 :         write!(f, "{rps}@{}", humantime::format_duration(self.interval))
     108           32 :     }
     109              : }
     110              : 
     111              : impl std::fmt::Debug for RateBucketInfo {
     112            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     113            0 :         write!(f, "{self}")
     114            0 :     }
     115              : }
     116              : 
     117              : impl std::str::FromStr for RateBucketInfo {
     118              :     type Err = anyhow::Error;
     119              : 
     120           52 :     fn from_str(s: &str) -> Result<Self, Self::Err> {
     121           52 :         let Some((max_rps, interval)) = s.split_once('@') else {
     122            0 :             bail!("invalid rate info")
     123              :         };
     124           52 :         let max_rps = max_rps.parse()?;
     125           52 :         let interval = humantime::parse_duration(interval)?;
     126           52 :         Ok(Self::new(max_rps, interval))
     127           52 :     }
     128              : }
     129              : 
     130              : impl RateBucketInfo {
     131              :     pub const DEFAULT_ENDPOINT_SET: [Self; 3] = [
     132              :         Self::new(300, Duration::from_secs(1)),
     133              :         Self::new(200, Duration::from_secs(60)),
     134              :         Self::new(100, Duration::from_secs(600)),
     135              :     ];
     136              : 
     137            6 :     pub fn validate(info: &mut [Self]) -> anyhow::Result<()> {
     138           16 :         info.sort_unstable_by_key(|info| info.interval);
     139            6 :         let invalid = info
     140            6 :             .iter()
     141            6 :             .tuple_windows()
     142            8 :             .find(|(a, b)| a.max_rpi > b.max_rpi);
     143            6 :         if let Some((a, b)) = invalid {
     144            2 :             bail!(
     145            2 :                 "invalid bucket RPS limits. {b} allows fewer requests per bucket than {a} ({} vs {})",
     146            2 :                 b.max_rpi,
     147            2 :                 a.max_rpi,
     148            2 :             );
     149            4 :         }
     150            4 : 
     151            4 :         Ok(())
     152            6 :     }
     153              : 
     154           60 :     pub const fn new(max_rps: u32, interval: Duration) -> Self {
     155           60 :         Self {
     156           60 :             interval,
     157           60 :             max_rpi: ((max_rps as u64) * (interval.as_millis() as u64) / 1000) as u32,
     158           60 :         }
     159           60 :     }
     160              : }
     161              : 
     162              : impl<K: Hash + Eq> BucketRateLimiter<K> {
     163            8 :     pub fn new(info: impl Into<Cow<'static, [RateBucketInfo]>>) -> Self {
     164            8 :         Self::new_with_rand_and_hasher(info, StdRng::from_entropy(), RandomState::new())
     165            8 :     }
     166              : }
     167              : 
     168              : impl<K: Hash + Eq, R: Rng, S: BuildHasher + Clone> BucketRateLimiter<K, R, S> {
     169           10 :     fn new_with_rand_and_hasher(
     170           10 :         info: impl Into<Cow<'static, [RateBucketInfo]>>,
     171           10 :         rand: R,
     172           10 :         hasher: S,
     173           10 :     ) -> Self {
     174           10 :         let info = info.into();
     175           10 :         info!(buckets = ?info, "endpoint rate limiter");
     176           10 :         Self {
     177           10 :             info,
     178           10 :             map: DashMap::with_hasher_and_shard_amount(hasher, 64),
     179           10 :             access_count: AtomicUsize::new(1), // start from 1 to avoid GC on the first request
     180           10 :             rand: Mutex::new(rand),
     181           10 :         }
     182           10 :     }
     183              : 
     184              :     /// Check that number of connections to the endpoint is below `max_rps` rps.
     185      2000914 :     pub fn check(&self, key: K, n: u32) -> bool {
     186      2000914 :         // do a partial GC every 2k requests. This cleans up ~ 1/64th of the map.
     187      2000914 :         // worst case memory usage is about:
     188      2000914 :         //    = 2 * 2048 * 64 * (48B + 72B)
     189      2000914 :         //    = 30MB
     190      2000914 :         if self.access_count.fetch_add(1, Ordering::AcqRel) % 2048 == 0 {
     191          976 :             self.do_gc();
     192      1999938 :         }
     193              : 
     194      2000914 :         let now = Instant::now();
     195      2000914 :         let mut entry = self.map.entry(key).or_insert_with(|| {
     196      2000008 :             vec![
     197      2000008 :                 RateBucket {
     198      2000008 :                     start: now,
     199      2000008 :                     count: 0,
     200      2000008 :                 };
     201      2000008 :                 self.info.len()
     202      2000008 :             ]
     203      2000914 :         });
     204      2000914 : 
     205      2000914 :         let should_allow_request = entry
     206      2000914 :             .iter_mut()
     207      2000914 :             .zip(&*self.info)
     208      6001830 :             .all(|(bucket, info)| bucket.should_allow_request(info, now, n));
     209      2000914 : 
     210      2000914 :         if should_allow_request {
     211      2000906 :             // only increment the bucket counts if the request will actually be accepted
     212      6001818 :             entry.iter_mut().for_each(|b| b.inc(n));
     213      2000906 :         }
     214              : 
     215      2000914 :         should_allow_request
     216      2000914 :     }
     217              : 
     218              :     /// Clean the map. Simple strategy: remove all entries in a random shard.
     219              :     /// At worst, we'll double the effective max_rps during the cleanup.
     220              :     /// But that way deletion does not aquire mutex on each entry access.
     221          976 :     pub fn do_gc(&self) {
     222          976 :         info!(
     223            0 :             "cleaning up bucket rate limiter, current size = {}",
     224            0 :             self.map.len()
     225            0 :         );
     226          976 :         let n = self.map.shards().len();
     227          976 :         // this lock is ok as the periodic cycle of do_gc makes this very unlikely to collide
     228          976 :         // (impossible, infact, unless we have 2048 threads)
     229          976 :         let shard = self.rand.lock().unwrap().gen_range(0..n);
     230          976 :         self.map.shards()[shard].write().clear();
     231          976 :     }
     232              : }
     233              : 
     234              : #[cfg(test)]
     235              : mod tests {
     236              :     use std::{hash::BuildHasherDefault, time::Duration};
     237              : 
     238              :     use rand::SeedableRng;
     239              :     use rustc_hash::FxHasher;
     240              :     use tokio::time;
     241              : 
     242              :     use super::{BucketRateLimiter, EndpointRateLimiter};
     243              :     use crate::{intern::EndpointIdInt, rate_limiter::RateBucketInfo, EndpointId};
     244              : 
     245              :     #[test]
     246            2 :     fn rate_bucket_rpi() {
     247            2 :         let rate_bucket = RateBucketInfo::new(50, Duration::from_secs(5));
     248            2 :         assert_eq!(rate_bucket.max_rpi, 50 * 5);
     249              : 
     250            2 :         let rate_bucket = RateBucketInfo::new(50, Duration::from_millis(500));
     251            2 :         assert_eq!(rate_bucket.max_rpi, 50 / 2);
     252            2 :     }
     253              : 
     254              :     #[test]
     255            2 :     fn rate_bucket_parse() {
     256            2 :         let rate_bucket: RateBucketInfo = "100@10s".parse().unwrap();
     257            2 :         assert_eq!(rate_bucket.interval, Duration::from_secs(10));
     258            2 :         assert_eq!(rate_bucket.max_rpi, 100 * 10);
     259            2 :         assert_eq!(rate_bucket.to_string(), "100@10s");
     260              : 
     261            2 :         let rate_bucket: RateBucketInfo = "100@1m".parse().unwrap();
     262            2 :         assert_eq!(rate_bucket.interval, Duration::from_secs(60));
     263            2 :         assert_eq!(rate_bucket.max_rpi, 100 * 60);
     264            2 :         assert_eq!(rate_bucket.to_string(), "100@1m");
     265            2 :     }
     266              : 
     267              :     #[test]
     268            2 :     fn default_rate_buckets() {
     269            2 :         let mut defaults = RateBucketInfo::DEFAULT_ENDPOINT_SET;
     270            2 :         RateBucketInfo::validate(&mut defaults[..]).unwrap();
     271            2 :     }
     272              : 
     273              :     #[test]
     274              :     #[should_panic = "invalid bucket RPS limits. 10@10s allows fewer requests per bucket than 300@1s (100 vs 300)"]
     275            2 :     fn rate_buckets_validate() {
     276            2 :         let mut rates: Vec<RateBucketInfo> = ["300@1s", "10@10s"]
     277            2 :             .into_iter()
     278            4 :             .map(|s| s.parse().unwrap())
     279            2 :             .collect();
     280            2 :         RateBucketInfo::validate(&mut rates).unwrap();
     281            2 :     }
     282              : 
     283              :     #[tokio::test]
     284            2 :     async fn test_rate_limits() {
     285            2 :         let mut rates: Vec<RateBucketInfo> = ["100@1s", "20@30s"]
     286            2 :             .into_iter()
     287            4 :             .map(|s| s.parse().unwrap())
     288            2 :             .collect();
     289            2 :         RateBucketInfo::validate(&mut rates).unwrap();
     290            2 :         let limiter = EndpointRateLimiter::new(rates);
     291            2 : 
     292            2 :         let endpoint = EndpointId::from("ep-my-endpoint-1234");
     293            2 :         let endpoint = EndpointIdInt::from(endpoint);
     294            2 : 
     295            2 :         time::pause();
     296            2 : 
     297          202 :         for _ in 0..100 {
     298          200 :             assert!(limiter.check(endpoint, 1));
     299            2 :         }
     300            2 :         // more connections fail
     301            2 :         assert!(!limiter.check(endpoint, 1));
     302            2 : 
     303            2 :         // fail even after 500ms as it's in the same bucket
     304            2 :         time::advance(time::Duration::from_millis(500)).await;
     305            2 :         assert!(!limiter.check(endpoint, 1));
     306            2 : 
     307            2 :         // after a full 1s, 100 requests are allowed again
     308            2 :         time::advance(time::Duration::from_millis(500)).await;
     309           12 :         for _ in 1..6 {
     310          510 :             for _ in 0..50 {
     311          500 :                 assert!(limiter.check(endpoint, 2));
     312            2 :             }
     313           10 :             time::advance(time::Duration::from_millis(1000)).await;
     314            2 :         }
     315            2 : 
     316            2 :         // more connections after 600 will exceed the 20rps@30s limit
     317            2 :         assert!(!limiter.check(endpoint, 1));
     318            2 : 
     319            2 :         // will still fail before the 30 second limit
     320            2 :         time::advance(time::Duration::from_millis(30_000 - 6_000 - 1)).await;
     321            2 :         assert!(!limiter.check(endpoint, 1));
     322            2 : 
     323            2 :         // after the full 30 seconds, 100 requests are allowed again
     324            2 :         time::advance(time::Duration::from_millis(1)).await;
     325          202 :         for _ in 0..100 {
     326          200 :             assert!(limiter.check(endpoint, 1));
     327            2 :         }
     328            2 :     }
     329              : 
     330              :     #[tokio::test]
     331            2 :     async fn test_rate_limits_gc() {
     332            2 :         // fixed seeded random/hasher to ensure that the test is not flaky
     333            2 :         let rand = rand::rngs::StdRng::from_seed([1; 32]);
     334            2 :         let hasher = BuildHasherDefault::<FxHasher>::default();
     335            2 : 
     336            2 :         let limiter = BucketRateLimiter::new_with_rand_and_hasher(
     337            2 :             &RateBucketInfo::DEFAULT_ENDPOINT_SET,
     338            2 :             rand,
     339            2 :             hasher,
     340            2 :         );
     341      2000002 :         for i in 0..1_000_000 {
     342      2000000 :             limiter.check(i, 1);
     343      2000000 :         }
     344            2 :         assert!(limiter.map.len() < 150_000);
     345            2 :     }
     346              : }
        

Generated by: LCOV version 2.1-beta