summaryrefslogtreecommitdiffhomepage
path: root/HTTP
diff options
context:
space:
mode:
authorJoey Hess <joeyh@joeyh.name>2016-09-13 16:10:55 -0400
committerJoey Hess <joeyh@joeyh.name>2016-09-13 16:10:55 -0400
commit4d69e01dea8515d9cbccfbf2f793c98a1a752539 (patch)
tree6cfd1215dc392f98b177cbe6429fcd1d7ec5450b /HTTP
parentfa6de9c9e51f271910ed5fd589372700f1b3fe6f (diff)
downloadkeysafe-4d69e01dea8515d9cbccfbf2f793c98a1a752539.tar.gz
improved rate limiter
Now caps total request rate even if attacker is willing to burn infinite CPU on PoW.
Diffstat (limited to 'HTTP')
-rw-r--r--HTTP/RateLimit.hs229
1 files changed, 184 insertions, 45 deletions
diff --git a/HTTP/RateLimit.hs b/HTTP/RateLimit.hs
index 46cbcd8..da22b92 100644
--- a/HTTP/RateLimit.hs
+++ b/HTTP/RateLimit.hs
@@ -16,6 +16,7 @@ import qualified Data.BloomFilter.Mutable as BloomFilter
import qualified Data.BloomFilter.Hash as BloomFilter
import Data.BloomFilter.Easy (suggestSizing)
import Control.Monad.ST
+import Control.Exception.Lifted (bracket)
import System.IO
import Data.Maybe
import Data.Word
@@ -29,32 +30,48 @@ import Control.Monad.IO.Class
-- the ones that have been assigned and used.
data RateLimiter = RateLimiter
{ buckets :: TMVar [Bucket]
+ , unusedBuckets :: TMVar [Bucket]
, assignedRandomSalts :: BloomFilter
, assignedRandomSaltsOld :: BloomFilter
, usedRandomSalts :: BloomFilter
, usedRandomSaltsOld :: BloomFilter
, numRandomSalts :: TMVar Int
, randomSaltGenerationLimiter :: TokenBucket
+ , blockedRequestQueue :: TMVar [()]
}
type BloomFilter = TMVar (BloomFilter.MBloom RealWorld RandomSalt)
--- | Buckets fill up at a fixed rate (which can be tuned by the server
--- operator), and accessing a bucket removes one unit from it.
+-- | Buckets fill up at a fixed rate, and accessing a bucket
+-- removes one unit from it.
data Bucket = Bucket
{ tokenBucket :: TokenBucket
- , mkProofReq :: Maybe (RandomSalt -> ProofOfWorkRequirement)
+ , proofOfWorkRequired :: Seconds
+ , fillInterval :: Word64
}
+minFillInterval :: Word64
+minFillInterval = 2 * 60 * 1000000 -- 1 token every other minute
+
+-- | Size of the bucket. This allows a burst of accesses after an idle
+-- period, which is especially useful when retrieving keys that were
+-- split into multiple chunks. However, setting this too high lets clients
+-- cheaply store lots of data on a server that has been idle for a while,
+-- which could be an attractive way to abuse keysafe servers.
+burstSize :: Word64
+burstSize = 4 -- 256 kb immediate storage
+
newRateLimiter :: IO RateLimiter
newRateLimiter = RateLimiter
<$> (newTMVarIO =<< mkbuckets (sdiv maxProofOfWork 2) [])
+ <*> newTMVarIO []
<*> mkBloomFilter
<*> mkBloomFilter
<*> mkBloomFilter
<*> mkBloomFilter
<*> newTMVarIO 0
<*> newTokenBucket
+ <*> newTMVarIO []
where
-- The last bucket takes half of maxProofOfWork to access, and
-- each earlier bucket quarters that time, down to the first bucket,
@@ -62,15 +79,23 @@ newRateLimiter = RateLimiter
-- where a client keeps getting bumped up to more and more expensive
-- buckets, it doesn't need to do more than maxProofOfWork total work.
mkbuckets s@(Seconds n) bs
- | n <= 0 = return bs
+ | n <= 0 = finalbucket bs
| otherwise = do
- let mkreq = mkProofOfWorkRequirement s
- b <- Bucket
- <$> newTokenBucket
- <*> pure mkreq
- case mkreq of
- Nothing -> return (b:bs)
- Just _ -> mkbuckets (sdiv s 4) (b:bs)
+ case mkProofOfWorkRequirement s of
+ Nothing -> finalbucket bs
+ Just _ -> do
+ b <- Bucket
+ <$> newTokenBucket
+ <*> pure s
+ <*> pure minFillInterval
+ mkbuckets (sdiv s 4) (b:bs)
+ finalbucket bs = do
+ b <- Bucket
+ <$> newTokenBucket
+ <*> pure (Seconds 0)
+ <*> pure minFillInterval
+ return (b:bs)
+
sdiv (Seconds n) d = Seconds (n `div` d)
mkBloomFilter :: IO BloomFilter
@@ -88,46 +113,18 @@ mkBloomFilter = do
bloomMaxSize :: Int
bloomMaxSize = 1000000
--- | Size of the bucket. This allows a burst of accesses after an idle
--- period, which is especially useful when retrieving keys that were
--- split into multiple chunks. However, setting this too high lets clients
--- cheaply store lots of data on a server that has been idle for a while,
--- which could be an attractive way to abuse keysafe servers.
-burstSize :: Word64
-burstSize = 4 -- 256 kb immediate storage
-
--- | Rate that the bucket is filled.
-fillRate :: Word64
-fillRate = 2 * 60 * 1000000 -- 1 token ever other minute
-
--- | How much data could be stored, in bytes per second, assuming all
--- buckets in the rate limiter are kept drained, and all requests are
--- stores.
-maximumStorageRate :: RateLimiter -> IO Int
-maximumStorageRate ratelimiter = do
- let storesize = maximum knownObjectSizes
- bs <- liftIO $ atomically $ readTMVar (buckets ratelimiter)
- return $ (length bs * storesize * 1000000) `div` fromIntegral fillRate
-
-- A request is tried in each bucket in turn which its proof of work allows
--- access to. If all accessible token buckets are empty, generate a
--- new ProofOfWorkRequirement for the client.
---
--- If all buckets are tried and are empty, we must be very overloaded.
--- In this case, the request is still processed, since the client has done
--- quite a lot of work.
+-- access to, until one is found that accepts it.
rateLimit :: POWIdent p => RateLimiter -> Maybe ProofOfWork -> p -> Handler a -> Handler (POWGuarded a)
rateLimit ratelimiter mpow p a = do
validsalt <- liftIO $ checkValidSalt ratelimiter mpow
- bs <- liftIO $ atomically $ readTMVar (buckets ratelimiter)
+ bs <- getBuckets ratelimiter
if validsalt
then go bs
else assignWork ratelimiter bs
where
- go [] = do
- liftIO $ hPutStrLn stderr "** warning: all token buckets are empty; possible DOS attack?"
- Result <$> a
- go (b:bs) = case mkProofReq b of
+ go [] = allBucketsEmpty ratelimiter a
+ go (b:bs) = case mkProofOfWorkRequirement (proofOfWorkRequired b) of
Nothing -> checkbucket b bs
Just mkreq -> case mpow of
Nothing -> assignWork ratelimiter (b:bs)
@@ -137,7 +134,7 @@ rateLimit ratelimiter mpow p a = do
else assignWork ratelimiter (b:bs)
checkbucket b bs = do
allowed <- liftIO $ tokenBucketTryAlloc (tokenBucket b)
- burstSize fillRate 1
+ burstSize (fillInterval b) 1
if allowed
then Result <$> a
else go bs
@@ -159,7 +156,7 @@ checkValidSalt rl (Just (ProofOfWork _ salt)) = do
iselem f = withBloomFilter rl f (BloomFilter.elem salt)
assignWork :: RateLimiter -> [Bucket] -> Handler (POWGuarded a)
-assignWork ratelimiter bs = case mapMaybe mkProofReq bs of
+assignWork ratelimiter bs = case mapMaybe (mkProofOfWorkRequirement . proofOfWorkRequired) bs of
[] -> throwError err404
(mkreq:_) -> liftIO $ do
-- This prevents an attacker flooding requests that
@@ -209,3 +206,145 @@ withBloomFilter
withBloomFilter rl field a = do
b <- atomically $ readTMVar (field rl)
stToIO (a b)
+
+getBuckets :: MonadIO m => RateLimiter -> m [Bucket]
+getBuckets = liftIO . atomically . readTMVar . buckets
+
+putBuckets :: MonadIO m => RateLimiter -> [Bucket] -> m ()
+putBuckets rl bs = liftIO $ atomically $ do
+ _ <- takeTMVar (buckets rl)
+ putTMVar (buckets rl) bs
+
+-- When all buckets are empty, and the client has provided a good enough
+-- proof of work to access the last bucket, the request is still processed,
+-- but blocked until the last token bucket refills.
+--
+-- Only 100 requests are allowed to block this way at a time, since the
+-- requests are taking up server memory while blocked, and because we don't
+-- want to stall legitimate clients too long.
+allBucketsEmpty :: RateLimiter -> Handler a -> Handler (POWGuarded a)
+allBucketsEmpty ratelimiter a = bracket (liftIO addq) (liftIO . removeq) go
+ where
+ q = blockedRequestQueue ratelimiter
+
+ addq = liftIO $ atomically $ do
+ l <- takeTMVar q
+ if length l >= 100
+ then do
+ putTMVar q l
+ return False
+ else do
+ putTMVar q (():l)
+ return True
+
+ removeq False = return ()
+ removeq True = liftIO $ atomically $ do
+ l <- takeTMVar q
+ putTMVar q (drop 1 l)
+
+ waitlast = do
+ bs <- getBuckets ratelimiter
+ case reverse bs of
+ (lastb:_) -> do
+ hPutStrLn stderr "** warning: All token buckets are empty. Delaying request.."
+ tokenBucketWait (tokenBucket lastb) burstSize (fillInterval lastb)
+ return True
+ [] -> return False
+
+ go False = giveup
+ go True = do
+ ok <- liftIO waitlast
+ if ok
+ then Result <$> a
+ else giveup
+
+ giveup = do
+ liftIO $ hPutStrLn stderr "** warning: All token buckets are empty and request queue is large; possible DOS attack? Rejected request.."
+ assignWork ratelimiter =<< getBuckets ratelimiter
+
+-- | How much data could be stored, in bytes per second, assuming all
+-- buckets in the rate limiter being constantly drained by requests,
+-- and all requests store objects.
+maximumStorageRate :: RateLimiter -> IO Int
+maximumStorageRate ratelimiter = do
+ bs <- getBuckets ratelimiter
+ return $ sum $ map calc bs
+ where
+ storesize = maximum knownObjectSizes
+ calc b = (storesize * 1000000) `div` fromIntegral (fillInterval b)
+
+describeRateLimiter :: RateLimiter -> IO String
+describeRateLimiter ratelimiter = do
+ storerate <- maximumStorageRate ratelimiter
+ bs <- getBuckets ratelimiter
+ return $ concat
+ [ "rate limiter buckets: " ++ show bs
+ , " ; maximum allowed storage rate: "
+ , showrate (storerate * 60 * 60 * 24 * 31) ++ "/month"
+ ]
+ where
+ showrate n
+ | n < 1024*1024 = show (n `div` 1024) ++ " KiB"
+ | n < 1024*1024*1024 = show (n `div` (1024 * 1024)) ++ " MiB"
+ | otherwise = show (n `div` (1024 * 1024 * 1024)) ++ " GiB"
+
+instance Show Bucket where
+ show b = show (fillInterval b `div` (60 * 1000000)) ++ " Second/Request"
+ ++ " (PoW=" ++ show (proofOfWorkRequired b) ++ ")"
+
+increaseDifficulty :: RateLimiter -> IO ()
+increaseDifficulty ratelimiter = do
+ bs <- getBuckets ratelimiter
+ case bs of
+ [] -> unable
+ (b:[]) -> do
+ -- Make the remaining bucket take longer to fill.
+ let b' = b { fillInterval = fillInterval b * 2 }
+ putBuckets ratelimiter [b']
+ done
+ (b:rest) -> do
+ -- Remove less expensive to access buckets,
+ -- so that clients have to do some work.
+ -- This is done first to cut off any freeloaders
+ -- that may be abusing the keysafe server.
+ atomically $ do
+ unused <- takeTMVar (unusedBuckets ratelimiter)
+ putTMVar (unusedBuckets ratelimiter) (b:unused)
+ putBuckets ratelimiter rest
+ done
+ where
+ unable = putStrLn "unable to increase difficulty; out of buckets"
+ done = do
+ desc <- describeRateLimiter ratelimiter
+ putStrLn $ "increased difficulty -- " ++ desc
+
+-- Should undo the effect of increaseDifficulty.
+reduceDifficulty :: RateLimiter -> IO ()
+reduceDifficulty ratelimiter = do
+ bs <- getBuckets ratelimiter
+ case bs of
+ (b:[]) | fillInterval b > minFillInterval -> do
+ let b' = b { fillInterval = fillInterval b `div` 2 }
+ putBuckets ratelimiter [b']
+ done
+ _ -> do
+ mb <- getunused
+ case mb of
+ Nothing -> unable
+ Just b -> do
+ putBuckets ratelimiter (b:bs)
+ done
+ where
+ getunused = atomically $ do
+ unused <- takeTMVar (unusedBuckets ratelimiter)
+ case unused of
+ (b:bs) -> do
+ putTMVar (unusedBuckets ratelimiter) bs
+ return (Just b)
+ [] -> do
+ putTMVar (unusedBuckets ratelimiter) []
+ return Nothing
+ unable = return ()
+ done = do
+ desc <- describeRateLimiter ratelimiter
+ putStrLn $ "reduced difficulty -- " ++ desc