summaryrefslogtreecommitdiffhomepage
path: root/HTTP
diff options
context:
space:
mode:
authorJoey Hess <joeyh@joeyh.name>2016-09-13 21:10:16 -0400
committerJoey Hess <joeyh@joeyh.name>2016-09-13 21:10:16 -0400
commit27aef01ba665a14924ece95d5ef4674e3945ef7e (patch)
treeb63b58436ac4686e25b0397430fea22ebf316022 /HTTP
parent768773ca27e34790bb9ece08d30a3974f12626f0 (diff)
downloadkeysafe-27aef01ba665a14924ece95d5ef4674e3945ef7e.tar.gz
eliminate half the bloom filters, using HMAC to verify RequestIDs
Simplifies code, uses less memory, and don't need to protect against flooding generation of RequestIDs, since the server does not store them at all. Note that the RequestIDSecret is only stored in ram, so restarting the server will invalidate any RequestIds given out before. It would be possible now to store that on disk to avoid that problem, but probably not worth it.
Diffstat (limited to 'HTTP')
-rw-r--r--HTTP/ProofOfWork.hs57
-rw-r--r--HTTP/RateLimit.hs121
2 files changed, 96 insertions, 82 deletions
diff --git a/HTTP/ProofOfWork.hs b/HTTP/ProofOfWork.hs
index ef6ecfb..476ba87 100644
--- a/HTTP/ProofOfWork.hs
+++ b/HTTP/ProofOfWork.hs
@@ -3,7 +3,7 @@
- Licensed under the GNU AGPL version 3 or higher.
-}
-{-# LANGUAGE DeriveGeneric #-}
+{-# LANGUAGE DeriveGeneric, OverloadedStrings #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module HTTP.ProofOfWork where
@@ -27,24 +27,36 @@ import Data.Monoid
import Prelude
-- | A value that the client has to do some work to calculate.
-data ProofOfWork = ProofOfWork B.ByteString RandomSalt
+data ProofOfWork = ProofOfWork B.ByteString RequestID
deriving (Show)
data ProofOfWorkRequirement = ProofOfWorkRequirement
{ leadingZeros :: Int
, addedArgon2Iterations :: Word32
- , randomSalt :: RandomSalt
+ , requestID :: RequestID
}
deriving (Generic, Show)
+-- | A request ID has two parts, a RandomSalt and a HMAC.
+-- The server can verify if a request ID is one it generated.
+data RequestID = RequestID
+ { randomSalt :: RandomSalt
+ , requestHMAC :: T.Text
+ }
+ deriving (Generic, Show, Eq)
+
-- | Using Text and not ByteString so that ProofOfWorkRequirement can have a
-- JSON instance.
newtype RandomSalt = RandomSalt { fromRandomSalt :: T.Text }
deriving (Generic, Show, Eq)
-instance Hashable RandomSalt where
- hashIO32 = hashIO32 . encodeUtf8 . fromRandomSalt
- hashIO64 = hashIO64 . encodeUtf8 . fromRandomSalt
+instance Hashable RequestID where
+ hashIO32 = hashIO32 . hashRequestID
+ hashIO64 = hashIO64 . hashRequestID
+
+hashRequestID :: RequestID -> B.ByteString
+hashRequestID rid = encodeUtf8 (fromRandomSalt (randomSalt rid))
+ <> ":" <> encodeUtf8 (requestHMAC rid)
-- | Servers should never demand a proof of work that takes longer than
-- this to generate. Note that if a server changes its mind and doubles
@@ -65,7 +77,7 @@ generationTime req =
let UseArgon2 (CPUCost (Seconds s) _) _ = proofOfWorkHashTunable (addedArgon2Iterations req)
in Seconds ((2^(leadingZeros req)) * s)
-mkProofOfWorkRequirement :: Seconds -> Maybe (RandomSalt -> ProofOfWorkRequirement)
+mkProofOfWorkRequirement :: Seconds -> Maybe (RequestID -> ProofOfWorkRequirement)
mkProofOfWorkRequirement (Seconds n)
| lz < 1 = Nothing
| otherwise = Just $ ProofOfWorkRequirement lz its
@@ -74,6 +86,26 @@ mkProofOfWorkRequirement (Seconds n)
UseArgon2 (CPUCost (Seconds s) _) _ = proofOfWorkHashTunable its
its = 0
+newtype RequestIDSecret = RequestIDSecret (Raaz.Key (Raaz.HMAC Raaz.SHA256))
+
+newRequestIDSecret :: IO RequestIDSecret
+newRequestIDSecret = do
+ prg <- Raaz.newPRG () :: IO Raaz.SystemPRG
+ RequestIDSecret <$> Raaz.random prg
+
+mkRequestID :: RequestIDSecret -> IO RequestID
+mkRequestID secret = mkRequeestID' secret <$> mkRandomSalt
+
+mkRequeestID' :: RequestIDSecret -> RandomSalt -> RequestID
+mkRequeestID' (RequestIDSecret key) salt =
+ let hmac = Raaz.hmacSha256 key (encodeUtf8 $ fromRandomSalt salt)
+ in RequestID salt (T.pack (showBase16 hmac))
+
+validRequestID :: RequestIDSecret -> RequestID -> Bool
+validRequestID secret rid =
+ let rid' = mkRequeestID' secret (randomSalt rid)
+ in requestHMAC rid == requestHMAC rid'
+
mkRandomSalt :: IO RandomSalt
mkRandomSalt = do
prg <- Raaz.newPRG () :: IO Raaz.SystemPRG
@@ -93,15 +125,16 @@ data NoPOWIdent = NoPOWIdent
instance POWIdent NoPOWIdent where
getPOWIdent NoPOWIdent = B.empty
+-- Note that this does not check validRequestID.
isValidProofOfWork :: POWIdent p => ProofOfWork -> ProofOfWorkRequirement -> p -> Bool
-isValidProofOfWork (ProofOfWork pow rsalt) req p =
- samesalts && enoughzeros
+isValidProofOfWork (ProofOfWork pow rid) req p =
+ samerequestids && enoughzeros
where
- samesalts = rsalt == randomSalt req
+ samerequestids = rid == requestID req
enoughzeros = all (== False) (take (leadingZeros req) (setBits b))
tunable = proofOfWorkHashTunable (addedArgon2Iterations req)
salt = Salt $ POWSalt $
- encodeUtf8 (fromRandomSalt (randomSalt req)) <> pow
+ encodeUtf8 (fromRandomSalt (randomSalt (requestID req))) <> pow
ExpensiveHash _ hash = expensiveHash tunable salt (getPOWIdent p)
-- Since expensiveHash generates an ascii encoded hash that
-- includes the parameters, take the sha256 of it to get the
@@ -128,4 +161,4 @@ genProofOfWork req p = go allByteStrings
| isValidProofOfWork candidate req p = candidate
| otherwise = go bs
where
- candidate = ProofOfWork b (randomSalt req)
+ candidate = ProofOfWork b (requestID req)
diff --git a/HTTP/RateLimit.hs b/HTTP/RateLimit.hs
index 194e798..45c6b9a 100644
--- a/HTTP/RateLimit.hs
+++ b/HTTP/RateLimit.hs
@@ -34,22 +34,19 @@ import Control.Monad.IO.Class
-- successively more difficult proof of work access requirement.
--
-- To guard against DOS attacks that reuse the same proof of work,
--- RandomSalt values are used, and bloom filters keep track of
--- the ones that have been assigned and used.
+-- bloom filters keep track of RequestIDs that have been used before.
data RateLimiter = RateLimiter
{ buckets :: TMVar [Bucket]
, unusedBuckets :: TMVar [Bucket]
, fallbackQueue :: FallbackQueue
- , assignedRandomSalts :: BloomFilter
- , assignedRandomSaltsOld :: BloomFilter
- , usedRandomSalts :: BloomFilter
- , usedRandomSaltsOld :: BloomFilter
- , numRandomSalts :: TMVar Int
- , randomSaltGenerationLimiter :: TokenBucket
+ , usedRequestIDs :: BloomFilter
+ , usedRequestIDsOld :: BloomFilter
+ , numUsedRequestIDs :: TMVar Int
+ , requestIDSecret :: RequestIDSecret
, requestCounter :: TMVar Integer
}
-type BloomFilter = TMVar (BloomFilter.MBloom RealWorld RandomSalt)
+type BloomFilter = TMVar (BloomFilter.MBloom RealWorld RequestID)
-- | Buckets fill up at a fixed rate, and accessing a bucket
-- removes one unit from it.
@@ -78,10 +75,8 @@ newRateLimiter cfg storedir logger = do
<*> newFallbackQueue
<*> mkBloomFilter
<*> mkBloomFilter
- <*> mkBloomFilter
- <*> mkBloomFilter
<*> newTMVarIO 0
- <*> newTokenBucket
+ <*> newRequestIDSecret
<*> newTMVarIO 0
_ <- forkIO (adjusterThread cfg storedir rl logger)
return rl
@@ -121,7 +116,7 @@ mkBloomFilter = do
-- of memory.
(bloomhashes, bloomsize) = suggestSizing bloomMaxSize (1/100000)
--- | Maximum number of RandomSalts that can be stored in a bloom filter
+-- | Maximum number of RequestIDs that can be stored in a bloom filter
-- without the false positive rate getting bad.
bloomMaxSize :: Int
bloomMaxSize = 1000000
@@ -130,21 +125,21 @@ bloomMaxSize = 1000000
-- access to, until one is found that accepts it.
rateLimit :: POWIdent p => RateLimiter -> Logger -> Maybe ProofOfWork -> p -> Handler a -> Handler (POWGuarded a)
rateLimit ratelimiter logger mpow p a = do
- validsalt <- liftIO $ checkValidSalt ratelimiter mpow
bs <- getBuckets ratelimiter
- if validsalt
+ validrequest <- liftIO $ checkValidRequestID ratelimiter logger mpow
+ if validrequest
then go bs
- else assignWork ratelimiter logger bs
+ else assignWork ratelimiter bs
where
go [] = fallback ratelimiter logger a
go (b:bs) = case mkProofOfWorkRequirement (proofOfWorkRequired b) of
Nothing -> checkbucket b bs
Just mkreq -> case mpow of
- Nothing -> assignWork ratelimiter logger (b:bs)
- Just pow@(ProofOfWork _ salt) ->
- if isValidProofOfWork pow (mkreq salt) p
+ Nothing -> assignWork ratelimiter (b:bs)
+ Just pow@(ProofOfWork _ rid) ->
+ if isValidProofOfWork pow (mkreq rid) p
then checkbucket b bs
- else assignWork ratelimiter logger (b:bs)
+ else assignWork ratelimiter (b:bs)
checkbucket b bs = do
allowed <- liftIO $ tokenBucketTryAlloc (tokenBucket b)
burstSize (fillInterval b) 1
@@ -152,69 +147,55 @@ rateLimit ratelimiter logger mpow p a = do
then allowRequest ratelimiter a
else go bs
-checkValidSalt :: RateLimiter -> Maybe ProofOfWork -> IO Bool
-checkValidSalt _ Nothing = return True
-checkValidSalt rl (Just (ProofOfWork _ salt)) = do
- assigned <- iselem assignedRandomSalts
- oldassigned <- iselem assignedRandomSaltsOld
- used <- iselem usedRandomSalts
- oldused <- iselem usedRandomSaltsOld
- if assigned && not oldassigned && not used && not oldused
- then do
- withBloomFilter rl usedRandomSalts
- (`BloomFilter.insert` salt)
- return True
- else return False
+checkValidRequestID :: RateLimiter -> Logger -> Maybe ProofOfWork -> IO Bool
+checkValidRequestID _ _ Nothing = return True
+checkValidRequestID rl logger (Just (ProofOfWork _ rid))
+ | validRequestID (requestIDSecret rl) rid = do
+ used <- iselem usedRequestIDs
+ oldused <- iselem usedRequestIDsOld
+ if not used && not oldused
+ then do
+ withBloomFilter rl usedRequestIDs
+ (`BloomFilter.insert` rid)
+ checkbloomsize
+ return True
+ else return False
+ | otherwise = return False
where
- iselem f = withBloomFilter rl f (BloomFilter.elem salt)
-
-assignWork :: RateLimiter -> Logger -> [Bucket] -> Handler (POWGuarded a)
-assignWork ratelimiter logger bs = case mapMaybe (mkProofOfWorkRequirement . proofOfWorkRequired) bs of
- [] -> throwError err404
- (mkreq:_) -> liftIO $ do
- -- This prevents an attacker flooding requests that
- -- cause new random salts to be assigned, in order
- -- to fill up the bloom table and cause salts assigned
- -- to other clients to be rejected.
- -- Since the bloom filters hold 1 million salts,
- -- the attacker would need to send requests for over 10
- -- hours to force a bloom filter rotation, so would not
- -- impact many users.
- tokenBucketWait (randomSaltGenerationLimiter ratelimiter)
- 100 -- burst
- 1000000 -- refill 1 token per second
-
- salt <- liftIO mkRandomSalt
- withBloomFilter ratelimiter assignedRandomSalts
- (`BloomFilter.insert` salt)
+ iselem f = withBloomFilter rl f (BloomFilter.elem rid)
+
+ checkbloomsize = do
needrot <- atomically $ do
- n <- takeTMVar (numRandomSalts ratelimiter)
+ n <- takeTMVar (numUsedRequestIDs rl)
if n > bloomMaxSize `div` 2
- then return Nothing
+ then return (Just n)
else do
- putTMVar (numRandomSalts ratelimiter) (n+1)
- return (Just n)
+ putTMVar (numUsedRequestIDs rl) (n+1)
+ return Nothing
handlerotation needrot
- return $ NeedProofOfWork $ mkreq salt
- where
+
handlerotation Nothing = return ()
handlerotation (Just n) = do
logStderr logger $ "rotating bloom filters after processing " ++ show n ++ " requests"
- newassigned <- mkBloomFilter
newused <- mkBloomFilter
atomically $ do
- oldassigned <- takeTMVar (assignedRandomSalts ratelimiter)
- oldused <- takeTMVar (usedRandomSalts ratelimiter)
- putTMVar (assignedRandomSaltsOld ratelimiter) oldassigned
- putTMVar (usedRandomSaltsOld ratelimiter) oldused
- putTMVar (assignedRandomSalts ratelimiter) =<< takeTMVar newassigned
- putTMVar (usedRandomSalts ratelimiter) =<< takeTMVar newused
- putTMVar (numRandomSalts ratelimiter) 0
+ oldused <- takeTMVar (usedRequestIDs rl)
+ putTMVar (usedRequestIDsOld rl) oldused
+ putTMVar (usedRequestIDs rl) =<< takeTMVar newused
+ putTMVar (numUsedRequestIDs rl) 0
+
+assignWork :: RateLimiter -> [Bucket] -> Handler (POWGuarded a)
+assignWork ratelimiter bs =
+ case mapMaybe (mkProofOfWorkRequirement . proofOfWorkRequired) bs of
+ [] -> throwError err404
+ (mkreq:_) -> do
+ rid <- liftIO $ mkRequestID $ requestIDSecret ratelimiter
+ return $ NeedProofOfWork $ mkreq rid
withBloomFilter
:: RateLimiter
-> (RateLimiter -> BloomFilter)
- -> (BloomFilter.MBloom RealWorld RandomSalt -> ST RealWorld a)
+ -> (BloomFilter.MBloom RealWorld RequestID -> ST RealWorld a)
-> IO a
withBloomFilter rl field a = do
b <- atomically $ readTMVar (field rl)
@@ -294,7 +275,7 @@ fallback ratelimiter logger a =
giveup = do
liftIO $ logStderr logger "** warning: All token buckets are empty and request queue is large; possible DOS attack? Rejected request.."
- assignWork ratelimiter logger =<< getBuckets ratelimiter
+ assignWork ratelimiter =<< getBuckets ratelimiter
-- | How much data could be stored, in bytes per second, assuming all
-- buckets in the rate limiter being constantly drained by requests,