Line data Source code
1 : use std::borrow::Cow;
2 : use std::collections::hash_map::RandomState;
3 : use std::hash::{BuildHasher, Hash};
4 : use std::sync::atomic::{AtomicUsize, Ordering};
5 : use std::sync::Mutex;
6 :
7 : use anyhow::bail;
8 : use dashmap::DashMap;
9 : use itertools::Itertools;
10 : use rand::rngs::StdRng;
11 : use rand::{Rng, SeedableRng};
12 : use tokio::time::{Duration, Instant};
13 : use tracing::info;
14 :
15 : use crate::intern::EndpointIdInt;
16 :
17 : pub struct GlobalRateLimiter {
18 : data: Vec<RateBucket>,
19 : info: Vec<RateBucketInfo>,
20 : }
21 :
22 : impl GlobalRateLimiter {
23 0 : pub fn new(info: Vec<RateBucketInfo>) -> Self {
24 0 : Self {
25 0 : data: vec![
26 0 : RateBucket {
27 0 : start: Instant::now(),
28 0 : count: 0,
29 0 : };
30 0 : info.len()
31 0 : ],
32 0 : info,
33 0 : }
34 0 : }
35 :
36 : /// Check that number of connections is below `max_rps` rps.
37 0 : pub fn check(&mut self) -> bool {
38 0 : let now = Instant::now();
39 0 :
40 0 : let should_allow_request = self
41 0 : .data
42 0 : .iter_mut()
43 0 : .zip(&self.info)
44 0 : .all(|(bucket, info)| bucket.should_allow_request(info, now, 1));
45 0 :
46 0 : if should_allow_request {
47 0 : // only increment the bucket counts if the request will actually be accepted
48 0 : self.data.iter_mut().for_each(|b| b.inc(1));
49 0 : }
50 :
51 0 : should_allow_request
52 0 : }
53 : }
54 :
55 : // Simple per-endpoint rate limiter.
56 : //
57 : // Check that number of connections to the endpoint is below `max_rps` rps.
58 : // Purposefully ignore user name and database name as clients can reconnect
59 : // with different names, so we'll end up sending some http requests to
60 : // the control plane.
61 : pub type WakeComputeRateLimiter = BucketRateLimiter<EndpointIdInt, StdRng, RandomState>;
62 :
63 : pub struct BucketRateLimiter<Key, Rand = StdRng, Hasher = RandomState> {
64 : map: DashMap<Key, Vec<RateBucket>, Hasher>,
65 : info: Cow<'static, [RateBucketInfo]>,
66 : access_count: AtomicUsize,
67 : rand: Mutex<Rand>,
68 : }
69 :
70 : #[derive(Clone, Copy)]
71 : struct RateBucket {
72 : start: Instant,
73 : count: u32,
74 : }
75 :
76 : impl RateBucket {
77 3000915 : fn should_allow_request(&mut self, info: &RateBucketInfo, now: Instant, n: u32) -> bool {
78 3000915 : if now - self.start < info.interval {
79 3000907 : self.count + n <= info.max_rpi
80 : } else {
81 : // bucket expired, reset
82 8 : self.count = 0;
83 8 : self.start = now;
84 8 :
85 8 : true
86 : }
87 3000915 : }
88 :
89 3000909 : fn inc(&mut self, n: u32) {
90 3000909 : self.count += n;
91 3000909 : }
92 : }
93 :
94 : #[derive(Clone, Copy, PartialEq)]
95 : pub struct RateBucketInfo {
96 : pub(crate) interval: Duration,
97 : // requests per interval
98 : pub(crate) max_rpi: u32,
99 : }
100 :
101 : impl std::fmt::Display for RateBucketInfo {
102 19 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
103 19 : let rps = self.rps().floor() as u64;
104 19 : write!(f, "{rps}@{}", humantime::format_duration(self.interval))
105 19 : }
106 : }
107 :
108 : impl std::fmt::Debug for RateBucketInfo {
109 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
110 0 : write!(f, "{self}")
111 0 : }
112 : }
113 :
114 : impl std::str::FromStr for RateBucketInfo {
115 : type Err = anyhow::Error;
116 :
117 32 : fn from_str(s: &str) -> Result<Self, Self::Err> {
118 32 : let Some((max_rps, interval)) = s.split_once('@') else {
119 0 : bail!("invalid rate info")
120 : };
121 32 : let max_rps = max_rps.parse()?;
122 32 : let interval = humantime::parse_duration(interval)?;
123 32 : Ok(Self::new(max_rps, interval))
124 32 : }
125 : }
126 :
127 : impl RateBucketInfo {
128 : pub const DEFAULT_SET: [Self; 3] = [
129 : Self::new(300, Duration::from_secs(1)),
130 : Self::new(200, Duration::from_secs(60)),
131 : Self::new(100, Duration::from_secs(600)),
132 : ];
133 :
134 : pub const DEFAULT_ENDPOINT_SET: [Self; 3] = [
135 : Self::new(500, Duration::from_secs(1)),
136 : Self::new(300, Duration::from_secs(60)),
137 : Self::new(200, Duration::from_secs(600)),
138 : ];
139 :
140 : /// All of these are per endpoint-maskedip pair.
141 : /// Context: 4096 rounds of pbkdf2 take about 1ms of cpu time to execute (1 milli-cpu-second or 1mcpus).
142 : ///
143 : /// First bucket: 1000mcpus total per endpoint-ip pair
144 : /// * 4096000 requests per second with 1 hash rounds.
145 : /// * 1000 requests per second with 4096 hash rounds.
146 : /// * 6.8 requests per second with 600000 hash rounds.
147 : pub const DEFAULT_AUTH_SET: [Self; 3] = [
148 : Self::new(1000 * 4096, Duration::from_secs(1)),
149 : Self::new(600 * 4096, Duration::from_secs(60)),
150 : Self::new(300 * 4096, Duration::from_secs(600)),
151 : ];
152 :
153 19 : pub fn rps(&self) -> f64 {
154 19 : (self.max_rpi as f64) / self.interval.as_secs_f64()
155 19 : }
156 :
157 3 : pub fn validate(info: &mut [Self]) -> anyhow::Result<()> {
158 8 : info.sort_unstable_by_key(|info| info.interval);
159 3 : let invalid = info
160 3 : .iter()
161 3 : .tuple_windows()
162 4 : .find(|(a, b)| a.max_rpi > b.max_rpi);
163 3 : if let Some((a, b)) = invalid {
164 1 : bail!(
165 1 : "invalid bucket RPS limits. {b} allows fewer requests per bucket than {a} ({} vs {})",
166 1 : b.max_rpi,
167 1 : a.max_rpi,
168 1 : );
169 2 : }
170 2 :
171 2 : Ok(())
172 3 : }
173 :
174 36 : pub const fn new(max_rps: u32, interval: Duration) -> Self {
175 36 : Self {
176 36 : interval,
177 36 : max_rpi: ((max_rps as u64) * (interval.as_millis() as u64) / 1000) as u32,
178 36 : }
179 36 : }
180 : }
181 :
182 : impl<K: Hash + Eq> BucketRateLimiter<K> {
183 4 : pub fn new(info: impl Into<Cow<'static, [RateBucketInfo]>>) -> Self {
184 4 : Self::new_with_rand_and_hasher(info, StdRng::from_entropy(), RandomState::new())
185 4 : }
186 : }
187 :
188 : impl<K: Hash + Eq, R: Rng, S: BuildHasher + Clone> BucketRateLimiter<K, R, S> {
189 5 : fn new_with_rand_and_hasher(
190 5 : info: impl Into<Cow<'static, [RateBucketInfo]>>,
191 5 : rand: R,
192 5 : hasher: S,
193 5 : ) -> Self {
194 5 : let info = info.into();
195 5 : info!(buckets = ?info, "endpoint rate limiter");
196 5 : Self {
197 5 : info,
198 5 : map: DashMap::with_hasher_and_shard_amount(hasher, 64),
199 5 : access_count: AtomicUsize::new(1), // start from 1 to avoid GC on the first request
200 5 : rand: Mutex::new(rand),
201 5 : }
202 5 : }
203 :
204 : /// Check that number of connections to the endpoint is below `max_rps` rps.
205 1000457 : pub(crate) fn check(&self, key: K, n: u32) -> bool {
206 1000457 : // do a partial GC every 2k requests. This cleans up ~ 1/64th of the map.
207 1000457 : // worst case memory usage is about:
208 1000457 : // = 2 * 2048 * 64 * (48B + 72B)
209 1000457 : // = 30MB
210 1000457 : if self.access_count.fetch_add(1, Ordering::AcqRel) % 2048 == 0 {
211 488 : self.do_gc();
212 999969 : }
213 :
214 1000457 : let now = Instant::now();
215 1000457 : let mut entry = self.map.entry(key).or_insert_with(|| {
216 1000004 : vec![
217 1000004 : RateBucket {
218 1000004 : start: now,
219 1000004 : count: 0,
220 1000004 : };
221 1000004 : self.info.len()
222 1000004 : ]
223 1000457 : });
224 1000457 :
225 1000457 : let should_allow_request = entry
226 1000457 : .iter_mut()
227 1000457 : .zip(&*self.info)
228 3000915 : .all(|(bucket, info)| bucket.should_allow_request(info, now, n));
229 1000457 :
230 1000457 : if should_allow_request {
231 1000453 : // only increment the bucket counts if the request will actually be accepted
232 3000909 : entry.iter_mut().for_each(|b| b.inc(n));
233 1000453 : }
234 :
235 1000457 : should_allow_request
236 1000457 : }
237 :
238 : /// Clean the map. Simple strategy: remove all entries in a random shard.
239 : /// At worst, we'll double the effective max_rps during the cleanup.
240 : /// But that way deletion does not aquire mutex on each entry access.
241 488 : pub(crate) fn do_gc(&self) {
242 488 : info!(
243 0 : "cleaning up bucket rate limiter, current size = {}",
244 0 : self.map.len()
245 : );
246 488 : let n = self.map.shards().len();
247 488 : // this lock is ok as the periodic cycle of do_gc makes this very unlikely to collide
248 488 : // (impossible, infact, unless we have 2048 threads)
249 488 : let shard = self.rand.lock().unwrap().gen_range(0..n);
250 488 : self.map.shards()[shard].write().clear();
251 488 : }
252 : }
253 :
254 : #[cfg(test)]
255 : mod tests {
256 : use std::hash::BuildHasherDefault;
257 : use std::time::Duration;
258 :
259 : use rand::SeedableRng;
260 : use rustc_hash::FxHasher;
261 : use tokio::time;
262 :
263 : use super::{BucketRateLimiter, WakeComputeRateLimiter};
264 : use crate::intern::EndpointIdInt;
265 : use crate::rate_limiter::RateBucketInfo;
266 : use crate::types::EndpointId;
267 :
268 : #[test]
269 1 : fn rate_bucket_rpi() {
270 1 : let rate_bucket = RateBucketInfo::new(50, Duration::from_secs(5));
271 1 : assert_eq!(rate_bucket.max_rpi, 50 * 5);
272 :
273 1 : let rate_bucket = RateBucketInfo::new(50, Duration::from_millis(500));
274 1 : assert_eq!(rate_bucket.max_rpi, 50 / 2);
275 1 : }
276 :
277 : #[test]
278 1 : fn rate_bucket_parse() {
279 1 : let rate_bucket: RateBucketInfo = "100@10s".parse().unwrap();
280 1 : assert_eq!(rate_bucket.interval, Duration::from_secs(10));
281 1 : assert_eq!(rate_bucket.max_rpi, 100 * 10);
282 1 : assert_eq!(rate_bucket.to_string(), "100@10s");
283 :
284 1 : let rate_bucket: RateBucketInfo = "100@1m".parse().unwrap();
285 1 : assert_eq!(rate_bucket.interval, Duration::from_secs(60));
286 1 : assert_eq!(rate_bucket.max_rpi, 100 * 60);
287 1 : assert_eq!(rate_bucket.to_string(), "100@1m");
288 1 : }
289 :
290 : #[test]
291 1 : fn default_rate_buckets() {
292 1 : let mut defaults = RateBucketInfo::DEFAULT_SET;
293 1 : RateBucketInfo::validate(&mut defaults[..]).unwrap();
294 1 : }
295 :
296 : #[test]
297 : #[should_panic = "invalid bucket RPS limits. 10@10s allows fewer requests per bucket than 300@1s (100 vs 300)"]
298 1 : fn rate_buckets_validate() {
299 1 : let mut rates: Vec<RateBucketInfo> = ["300@1s", "10@10s"]
300 1 : .into_iter()
301 2 : .map(|s| s.parse().unwrap())
302 1 : .collect();
303 1 : RateBucketInfo::validate(&mut rates).unwrap();
304 1 : }
305 :
306 : #[tokio::test]
307 1 : async fn test_rate_limits() {
308 1 : let mut rates: Vec<RateBucketInfo> = ["100@1s", "20@30s"]
309 1 : .into_iter()
310 2 : .map(|s| s.parse().unwrap())
311 1 : .collect();
312 1 : RateBucketInfo::validate(&mut rates).unwrap();
313 1 : let limiter = WakeComputeRateLimiter::new(rates);
314 1 :
315 1 : let endpoint = EndpointId::from("ep-my-endpoint-1234");
316 1 : let endpoint = EndpointIdInt::from(endpoint);
317 1 :
318 1 : time::pause();
319 1 :
320 101 : for _ in 0..100 {
321 100 : assert!(limiter.check(endpoint, 1));
322 1 : }
323 1 : // more connections fail
324 1 : assert!(!limiter.check(endpoint, 1));
325 1 :
326 1 : // fail even after 500ms as it's in the same bucket
327 1 : time::advance(time::Duration::from_millis(500)).await;
328 1 : assert!(!limiter.check(endpoint, 1));
329 1 :
330 1 : // after a full 1s, 100 requests are allowed again
331 1 : time::advance(time::Duration::from_millis(500)).await;
332 6 : for _ in 1..6 {
333 255 : for _ in 0..50 {
334 250 : assert!(limiter.check(endpoint, 2));
335 1 : }
336 5 : time::advance(time::Duration::from_millis(1000)).await;
337 1 : }
338 1 :
339 1 : // more connections after 600 will exceed the 20rps@30s limit
340 1 : assert!(!limiter.check(endpoint, 1));
341 1 :
342 1 : // will still fail before the 30 second limit
343 1 : time::advance(time::Duration::from_millis(30_000 - 6_000 - 1)).await;
344 1 : assert!(!limiter.check(endpoint, 1));
345 1 :
346 1 : // after the full 30 seconds, 100 requests are allowed again
347 1 : time::advance(time::Duration::from_millis(1)).await;
348 101 : for _ in 0..100 {
349 100 : assert!(limiter.check(endpoint, 1));
350 1 : }
351 1 : }
352 :
353 : #[tokio::test]
354 1 : async fn test_rate_limits_gc() {
355 1 : // fixed seeded random/hasher to ensure that the test is not flaky
356 1 : let rand = rand::rngs::StdRng::from_seed([1; 32]);
357 1 : let hasher = BuildHasherDefault::<FxHasher>::default();
358 1 :
359 1 : let limiter =
360 1 : BucketRateLimiter::new_with_rand_and_hasher(&RateBucketInfo::DEFAULT_SET, rand, hasher);
361 1000001 : for i in 0..1_000_000 {
362 1000000 : limiter.check(i, 1);
363 1000000 : }
364 1 : assert!(limiter.map.len() < 150_000);
365 1 : }
366 : }
|