diff options
author | Joey Hess <joeyh@joeyh.name> | 2016-09-13 17:36:30 -0400 |
---|---|---|
committer | Joey Hess <joeyh@joeyh.name> | 2016-09-13 17:41:00 -0400 |
commit | 86cb38c936da30910700c58353a5e716fa94e83c (patch) | |
tree | 61aca15d699c7b158fa943cadf9e13f032cd2855 /HTTP/RateLimit.hs | |
parent | 675c405aa53868cd1246857138f91ecb51d01985 (diff) | |
download | keysafe-86cb38c936da30910700c58353a5e716fa94e83c.tar.gz |
use fast-logger for better logging
Diffstat (limited to 'HTTP/RateLimit.hs')
-rw-r--r-- | HTTP/RateLimit.hs | 67 |
1 files changed, 34 insertions, 33 deletions
diff --git a/HTTP/RateLimit.hs b/HTTP/RateLimit.hs index d9ec752..e09da6e 100644 --- a/HTTP/RateLimit.hs +++ b/HTTP/RateLimit.hs @@ -8,6 +8,7 @@ module HTTP.RateLimit where import Types.Cost import HTTP import HTTP.ProofOfWork +import HTTP.Logger import Tunables import CmdLine (ServerConfig(..)) import Types.Storage @@ -23,7 +24,6 @@ import Data.BloomFilter.Easy (suggestSizing) import Control.Monad import Control.Monad.ST import Control.Exception.Lifted (bracket) -import System.IO import System.DiskSpace import Data.Maybe import Data.Word @@ -69,8 +69,8 @@ minFillInterval = 2 * 60 * 1000000 -- 1 token every other minute burstSize :: Word64 burstSize = 4 -- 256 kb immediate storage -newRateLimiter :: ServerConfig -> Maybe LocalStorageDirectory -> IO RateLimiter -newRateLimiter cfg storedir = do +newRateLimiter :: ServerConfig -> Maybe LocalStorageDirectory -> Logger -> IO RateLimiter +newRateLimiter cfg storedir logger = do rl <- RateLimiter <$> (newTMVarIO =<< mkbuckets (sdiv maxProofOfWork 2) []) <*> newTMVarIO [] @@ -82,7 +82,7 @@ newRateLimiter cfg storedir = do <*> newTokenBucket <*> newTMVarIO [] <*> newTMVarIO 0 - _ <- forkIO (adjusterThread cfg storedir rl) + _ <- forkIO (adjusterThread cfg storedir rl logger) return rl where -- The last bucket takes half of maxProofOfWork to access, and @@ -127,23 +127,23 @@ bloomMaxSize = 1000000 -- A request is tried in each bucket in turn which its proof of work allows -- access to, until one is found that accepts it. -rateLimit :: POWIdent p => RateLimiter -> Maybe ProofOfWork -> p -> Handler a -> Handler (POWGuarded a) -rateLimit ratelimiter mpow p a = do +rateLimit :: POWIdent p => RateLimiter -> Logger -> Maybe ProofOfWork -> p -> Handler a -> Handler (POWGuarded a) +rateLimit ratelimiter logger mpow p a = do validsalt <- liftIO $ checkValidSalt ratelimiter mpow bs <- getBuckets ratelimiter if validsalt then go bs - else assignWork ratelimiter bs + else assignWork ratelimiter logger bs where - go [] = allBucketsEmpty ratelimiter a + go [] = allBucketsEmpty ratelimiter logger a go (b:bs) = case mkProofOfWorkRequirement (proofOfWorkRequired b) of Nothing -> checkbucket b bs Just mkreq -> case mpow of - Nothing -> assignWork ratelimiter (b:bs) + Nothing -> assignWork ratelimiter logger (b:bs) Just pow@(ProofOfWork _ salt) -> if isValidProofOfWork pow (mkreq salt) p then checkbucket b bs - else assignWork ratelimiter (b:bs) + else assignWork ratelimiter logger (b:bs) checkbucket b bs = do allowed <- liftIO $ tokenBucketTryAlloc (tokenBucket b) burstSize (fillInterval b) 1 @@ -167,8 +167,8 @@ checkValidSalt rl (Just (ProofOfWork _ salt)) = do where iselem f = withBloomFilter rl f (BloomFilter.elem salt) -assignWork :: RateLimiter -> [Bucket] -> Handler (POWGuarded a) -assignWork ratelimiter bs = case mapMaybe (mkProofOfWorkRequirement . proofOfWorkRequired) bs of +assignWork :: RateLimiter -> Logger -> [Bucket] -> Handler (POWGuarded a) +assignWork ratelimiter logger bs = case mapMaybe (mkProofOfWorkRequirement . proofOfWorkRequired) bs of [] -> throwError err404 (mkreq:_) -> liftIO $ do -- This prevents an attacker flooding requests that @@ -198,7 +198,7 @@ assignWork ratelimiter bs = case mapMaybe (mkProofOfWorkRequirement . proofOfWor where handlerotation Nothing = return () handlerotation (Just n) = do - hPutStrLn stderr $ "rotating bloom filters after processing " ++ show n ++ " requests" + logStderr logger $ "rotating bloom filters after processing " ++ show n ++ " requests" newassigned <- mkBloomFilter newused <- mkBloomFilter atomically $ do @@ -234,8 +234,9 @@ putBuckets rl bs = liftIO $ atomically $ do -- Only 100 requests are allowed to block this way at a time, since the -- requests are taking up server memory while blocked, and because we don't -- want to stall legitimate clients too long. -allBucketsEmpty :: RateLimiter -> Handler a -> Handler (POWGuarded a) -allBucketsEmpty ratelimiter a = bracket (liftIO addq) (liftIO . removeq) go +allBucketsEmpty :: RateLimiter -> Logger -> Handler a -> Handler (POWGuarded a) +allBucketsEmpty ratelimiter logger a = + bracket (liftIO addq) (liftIO . removeq) go where q = blockedRequestQueue ratelimiter @@ -258,7 +259,7 @@ allBucketsEmpty ratelimiter a = bracket (liftIO addq) (liftIO . removeq) go bs <- getBuckets ratelimiter case reverse bs of (lastb:_) -> do - hPutStrLn stderr "** warning: All token buckets are empty. Delaying request.." + logStderr logger "** warning: All token buckets are empty. Delaying request.." tokenBucketWait (tokenBucket lastb) burstSize (fillInterval lastb) return True [] -> return False @@ -271,8 +272,8 @@ allBucketsEmpty ratelimiter a = bracket (liftIO addq) (liftIO . removeq) go else giveup giveup = do - liftIO $ hPutStrLn stderr "** warning: All token buckets are empty and request queue is large; possible DOS attack? Rejected request.." - assignWork ratelimiter =<< getBuckets ratelimiter + liftIO $ logStderr logger "** warning: All token buckets are empty and request queue is large; possible DOS attack? Rejected request.." + assignWork ratelimiter logger =<< getBuckets ratelimiter -- | How much data could be stored, in bytes per second, assuming all -- buckets in the rate limiter being constantly drained by requests, @@ -306,8 +307,8 @@ instance Show Bucket where show b = show (fillInterval b `div` (60 * 1000000)) ++ " Second/Request" ++ " (PoW=" ++ show (proofOfWorkRequired b) ++ ")" -increaseDifficulty :: RateLimiter -> IO () -increaseDifficulty ratelimiter = do +increaseDifficulty :: Logger -> RateLimiter -> IO () +increaseDifficulty logger ratelimiter = do bs <- getBuckets ratelimiter case bs of [] -> unable @@ -329,14 +330,14 @@ increaseDifficulty ratelimiter = do putBuckets ratelimiter rest done where - unable = putStrLn "Unable to increase difficulty any further!" + unable = logStderr logger "Unable to increase difficulty any further!" done = do desc <- describeRateLimiter ratelimiter - putStrLn $ "increased difficulty -- " ++ desc + logStdout logger $ "increased difficulty -- " ++ desc -- Should undo the effect of increaseDifficulty. -reduceDifficulty :: RateLimiter -> IO () -reduceDifficulty ratelimiter = do +reduceDifficulty :: Logger -> RateLimiter -> IO () +reduceDifficulty logger ratelimiter = do bs <- getBuckets ratelimiter case bs of (b:[]) | fillInterval b > minFillInterval -> do @@ -363,7 +364,7 @@ reduceDifficulty ratelimiter = do unable = return () done = do desc <- describeRateLimiter ratelimiter - putStrLn $ "reduced difficulty -- " ++ desc + logStdout logger $ "reduced difficulty -- " ++ desc allowRequest :: RateLimiter -> Handler a -> Handler (POWGuarded a) allowRequest ratelimiter a = do @@ -380,15 +381,15 @@ addRequest ratelimiter n = liftIO $ atomically $ do -- Thread that wakes up periodically and checks the request rate -- against the available disk space. If the disk is filling too quickly, -- the difficulty is increased. -adjusterThread :: ServerConfig -> Maybe LocalStorageDirectory -> RateLimiter -> IO () -adjusterThread cfg storedir ratelimiter = forever $ do +adjusterThread :: ServerConfig -> Maybe LocalStorageDirectory -> RateLimiter -> Logger -> IO () +adjusterThread cfg storedir ratelimiter logger = forever $ do delay (1000000 * intervalsecs) - checkRequestRate cfg storedir ratelimiter intervalsecs + checkRequestRate cfg storedir ratelimiter logger intervalsecs where intervalsecs = 60*60 -checkRequestRate :: ServerConfig -> Maybe LocalStorageDirectory -> RateLimiter -> Integer -> IO () -checkRequestRate cfg storedir ratelimiter intervalsecs = do +checkRequestRate :: ServerConfig -> Maybe LocalStorageDirectory -> RateLimiter -> Logger -> Integer -> IO () +checkRequestRate cfg storedir ratelimiter logger intervalsecs = do let storesize = maximum knownObjectSizes n <- liftIO $ atomically $ swapTMVar (requestCounter ratelimiter) 0 let maxstoredinterval = n * fromIntegral storesize @@ -398,7 +399,7 @@ checkRequestRate cfg storedir ratelimiter intervalsecs = do let estimate = if maxstoredthismonth <= 0 then 10000 else freespace `div` maxstoredthismonth `div` 2 - putStrLn $ unlines + logStdout logger $ unlines [ "rate limit check" , " free disk space:" ++ showBytes freespace , " number of requests since last check: " ++ show n @@ -406,7 +407,7 @@ checkRequestRate cfg storedir ratelimiter intervalsecs = do , " estimate min " ++ show estimate ++ " months to fill half of disk" ] if estimate > target * 2 - then reduceDifficulty ratelimiter + then reduceDifficulty logger ratelimiter else if estimate < target - then increaseDifficulty ratelimiter + then increaseDifficulty logger ratelimiter else return () |