From 818cccddec27a4905dbd0e419bd3bde4cd583e13 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 2 Apr 2026 09:35:35 +0200 Subject: [PATCH] Run rate limiter garbage collection before inserting new user Move the GC pass from after insertion to before, so that stale entries are reclaimed before allocating a new bucket. This avoids unnecessary growth of the user map between GC cycles. AI tools were used in preparing this commit. --- src/payment/asynchronous/rate_limiter.rs | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/payment/asynchronous/rate_limiter.rs b/src/payment/asynchronous/rate_limiter.rs index 671b1dc72..bf1250892 100644 --- a/src/payment/asynchronous/rate_limiter.rs +++ b/src/payment/asynchronous/rate_limiter.rs @@ -23,6 +23,8 @@ pub(crate) struct RateLimiter { max_idle: Duration, } +const MAX_USERS: usize = 10_000; + struct Bucket { tokens: u32, last_refill: Instant, @@ -36,10 +38,19 @@ impl RateLimiter { pub(crate) fn allow(&mut self, user_id: &[u8]) -> bool { let now = Instant::now(); - let entry = self.users.entry(user_id.to_vec()); - let is_new_user = matches!(entry, std::collections::hash_map::Entry::Vacant(_)); + let is_new_user = !self.users.contains_key(user_id); + + if is_new_user { + self.garbage_collect(self.max_idle); + if self.users.len() >= MAX_USERS { + return false; + } + } - let bucket = entry.or_insert(Bucket { tokens: self.capacity, last_refill: now }); + let bucket = self + .users + .entry(user_id.to_vec()) + .or_insert(Bucket { tokens: self.capacity, last_refill: now }); let elapsed = now.duration_since(bucket.last_refill); let tokens_to_add = (elapsed.as_secs_f64() / self.refill_interval.as_secs_f64()) as u32; @@ -56,11 +67,6 @@ impl RateLimiter { false }; - // Each time a new user is added, we take the opportunity to clean up old rate limits. - if is_new_user { - self.garbage_collect(self.max_idle); - } - allow }