summaryrefslogtreecommitdiffhomepage
path: root/HTTP/RateLimit.hs
diff options
context:
space:
mode:
Diffstat (limited to 'HTTP/RateLimit.hs')
-rw-r--r--HTTP/RateLimit.hs121
1 files changed, 51 insertions, 70 deletions
diff --git a/HTTP/RateLimit.hs b/HTTP/RateLimit.hs
index 194e798..45c6b9a 100644
--- a/HTTP/RateLimit.hs
+++ b/HTTP/RateLimit.hs
@@ -34,22 +34,19 @@ import Control.Monad.IO.Class
-- successively more difficult proof of work access requirement.
--
-- To guard against DOS attacks that reuse the same proof of work,
--- RandomSalt values are used, and bloom filters keep track of
--- the ones that have been assigned and used.
+-- bloom filters keep track of RequestIDs that have been used before.
data RateLimiter = RateLimiter
{ buckets :: TMVar [Bucket]
, unusedBuckets :: TMVar [Bucket]
, fallbackQueue :: FallbackQueue
- , assignedRandomSalts :: BloomFilter
- , assignedRandomSaltsOld :: BloomFilter
- , usedRandomSalts :: BloomFilter
- , usedRandomSaltsOld :: BloomFilter
- , numRandomSalts :: TMVar Int
- , randomSaltGenerationLimiter :: TokenBucket
+ , usedRequestIDs :: BloomFilter
+ , usedRequestIDsOld :: BloomFilter
+ , numUsedRequestIDs :: TMVar Int
+ , requestIDSecret :: RequestIDSecret
, requestCounter :: TMVar Integer
}
-type BloomFilter = TMVar (BloomFilter.MBloom RealWorld RandomSalt)
+type BloomFilter = TMVar (BloomFilter.MBloom RealWorld RequestID)
-- | Buckets fill up at a fixed rate, and accessing a bucket
-- removes one unit from it.
@@ -78,10 +75,8 @@ newRateLimiter cfg storedir logger = do
<*> newFallbackQueue
<*> mkBloomFilter
<*> mkBloomFilter
- <*> mkBloomFilter
- <*> mkBloomFilter
<*> newTMVarIO 0
- <*> newTokenBucket
+ <*> newRequestIDSecret
<*> newTMVarIO 0
_ <- forkIO (adjusterThread cfg storedir rl logger)
return rl
@@ -121,7 +116,7 @@ mkBloomFilter = do
-- of memory.
(bloomhashes, bloomsize) = suggestSizing bloomMaxSize (1/100000)
--- | Maximum number of RandomSalts that can be stored in a bloom filter
+-- | Maximum number of RequestIDs that can be stored in a bloom filter
-- without the false positive rate getting bad.
bloomMaxSize :: Int
bloomMaxSize = 1000000
@@ -130,21 +125,21 @@ bloomMaxSize = 1000000
-- access to, until one is found that accepts it.
rateLimit :: POWIdent p => RateLimiter -> Logger -> Maybe ProofOfWork -> p -> Handler a -> Handler (POWGuarded a)
rateLimit ratelimiter logger mpow p a = do
- validsalt <- liftIO $ checkValidSalt ratelimiter mpow
bs <- getBuckets ratelimiter
- if validsalt
+ validrequest <- liftIO $ checkValidRequestID ratelimiter logger mpow
+ if validrequest
then go bs
- else assignWork ratelimiter logger bs
+ else assignWork ratelimiter bs
where
go [] = fallback ratelimiter logger a
go (b:bs) = case mkProofOfWorkRequirement (proofOfWorkRequired b) of
Nothing -> checkbucket b bs
Just mkreq -> case mpow of
- Nothing -> assignWork ratelimiter logger (b:bs)
- Just pow@(ProofOfWork _ salt) ->
- if isValidProofOfWork pow (mkreq salt) p
+ Nothing -> assignWork ratelimiter (b:bs)
+ Just pow@(ProofOfWork _ rid) ->
+ if isValidProofOfWork pow (mkreq rid) p
then checkbucket b bs
- else assignWork ratelimiter logger (b:bs)
+ else assignWork ratelimiter (b:bs)
checkbucket b bs = do
allowed <- liftIO $ tokenBucketTryAlloc (tokenBucket b)
burstSize (fillInterval b) 1
@@ -152,69 +147,55 @@ rateLimit ratelimiter logger mpow p a = do
then allowRequest ratelimiter a
else go bs
-checkValidSalt :: RateLimiter -> Maybe ProofOfWork -> IO Bool
-checkValidSalt _ Nothing = return True
-checkValidSalt rl (Just (ProofOfWork _ salt)) = do
- assigned <- iselem assignedRandomSalts
- oldassigned <- iselem assignedRandomSaltsOld
- used <- iselem usedRandomSalts
- oldused <- iselem usedRandomSaltsOld
- if assigned && not oldassigned && not used && not oldused
- then do
- withBloomFilter rl usedRandomSalts
- (`BloomFilter.insert` salt)
- return True
- else return False
+checkValidRequestID :: RateLimiter -> Logger -> Maybe ProofOfWork -> IO Bool
+checkValidRequestID _ _ Nothing = return True
+checkValidRequestID rl logger (Just (ProofOfWork _ rid))
+ | validRequestID (requestIDSecret rl) rid = do
+ used <- iselem usedRequestIDs
+ oldused <- iselem usedRequestIDsOld
+ if not used && not oldused
+ then do
+ withBloomFilter rl usedRequestIDs
+ (`BloomFilter.insert` rid)
+ checkbloomsize
+ return True
+ else return False
+ | otherwise = return False
where
- iselem f = withBloomFilter rl f (BloomFilter.elem salt)
-
-assignWork :: RateLimiter -> Logger -> [Bucket] -> Handler (POWGuarded a)
-assignWork ratelimiter logger bs = case mapMaybe (mkProofOfWorkRequirement . proofOfWorkRequired) bs of
- [] -> throwError err404
- (mkreq:_) -> liftIO $ do
- -- This prevents an attacker flooding requests that
- -- cause new random salts to be assigned, in order
- -- to fill up the bloom table and cause salts assigned
- -- to other clients to be rejected.
- -- Since the bloom filters hold 1 million salts,
- -- the attacker would need to send requests for over 10
- -- hours to force a bloom filter rotation, so would not
- -- impact many users.
- tokenBucketWait (randomSaltGenerationLimiter ratelimiter)
- 100 -- burst
- 1000000 -- refill 1 token per second
-
- salt <- liftIO mkRandomSalt
- withBloomFilter ratelimiter assignedRandomSalts
- (`BloomFilter.insert` salt)
+ iselem f = withBloomFilter rl f (BloomFilter.elem rid)
+
+ checkbloomsize = do
needrot <- atomically $ do
- n <- takeTMVar (numRandomSalts ratelimiter)
+ n <- takeTMVar (numUsedRequestIDs rl)
if n > bloomMaxSize `div` 2
- then return Nothing
+ then return (Just n)
else do
- putTMVar (numRandomSalts ratelimiter) (n+1)
- return (Just n)
+ putTMVar (numUsedRequestIDs rl) (n+1)
+ return Nothing
handlerotation needrot
- return $ NeedProofOfWork $ mkreq salt
- where
+
handlerotation Nothing = return ()
handlerotation (Just n) = do
logStderr logger $ "rotating bloom filters after processing " ++ show n ++ " requests"
- newassigned <- mkBloomFilter
newused <- mkBloomFilter
atomically $ do
- oldassigned <- takeTMVar (assignedRandomSalts ratelimiter)
- oldused <- takeTMVar (usedRandomSalts ratelimiter)
- putTMVar (assignedRandomSaltsOld ratelimiter) oldassigned
- putTMVar (usedRandomSaltsOld ratelimiter) oldused
- putTMVar (assignedRandomSalts ratelimiter) =<< takeTMVar newassigned
- putTMVar (usedRandomSalts ratelimiter) =<< takeTMVar newused
- putTMVar (numRandomSalts ratelimiter) 0
+ oldused <- takeTMVar (usedRequestIDs rl)
+ putTMVar (usedRequestIDsOld rl) oldused
+ putTMVar (usedRequestIDs rl) =<< takeTMVar newused
+ putTMVar (numUsedRequestIDs rl) 0
+
+assignWork :: RateLimiter -> [Bucket] -> Handler (POWGuarded a)
+assignWork ratelimiter bs =
+ case mapMaybe (mkProofOfWorkRequirement . proofOfWorkRequired) bs of
+ [] -> throwError err404
+ (mkreq:_) -> do
+ rid <- liftIO $ mkRequestID $ requestIDSecret ratelimiter
+ return $ NeedProofOfWork $ mkreq rid
withBloomFilter
:: RateLimiter
-> (RateLimiter -> BloomFilter)
- -> (BloomFilter.MBloom RealWorld RandomSalt -> ST RealWorld a)
+ -> (BloomFilter.MBloom RealWorld RequestID -> ST RealWorld a)
-> IO a
withBloomFilter rl field a = do
b <- atomically $ readTMVar (field rl)
@@ -294,7 +275,7 @@ fallback ratelimiter logger a =
giveup = do
liftIO $ logStderr logger "** warning: All token buckets are empty and request queue is large; possible DOS attack? Rejected request.."
- assignWork ratelimiter logger =<< getBuckets ratelimiter
+ assignWork ratelimiter =<< getBuckets ratelimiter
-- | How much data could be stored, in bytes per second, assuming all
-- buckets in the rate limiter being constantly drained by requests,