summaryrefslogtreecommitdiffhomepage
path: root/Benchmark.hs
diff options
context:
space:
mode:
authorJoey Hess <joeyh@joeyh.name>2016-09-13 22:15:18 -0400
committerJoey Hess <joeyh@joeyh.name>2016-09-13 22:32:13 -0400
commit68eb14fdf6debf1e26921a1b2dddf34dbd031471 (patch)
treeb179ae7f113fd89c674862d5c9619282d545a17d /Benchmark.hs
parent27aef01ba665a14924ece95d5ef4674e3945ef7e (diff)
downloadkeysafe-68eb14fdf6debf1e26921a1b2dddf34dbd031471.tar.gz
use less expensive hash for proof of work
The server has to run the hash once to verify a request, so a hash that took 4 seconds could make the server do too much work if it's being flooded with requests. So, made the hash much less expensive. This required keeping track of fractional seconds. Actually, I used Rational for them, to avoid most rounding problems. That turned out nice. I've only tuned the proofOfWorkHashTunable on my fanless overheating laptop so far. It seems to be fairly reasonablly tuned though.
Diffstat (limited to 'Benchmark.hs')
-rw-r--r--Benchmark.hs55
1 files changed, 46 insertions, 9 deletions
diff --git a/Benchmark.hs b/Benchmark.hs
index 21b7ce3..a46e9f4 100644
--- a/Benchmark.hs
+++ b/Benchmark.hs
@@ -1,4 +1,4 @@
-{-# LANGUAGE OverloadedStrings #-}
+{-# LANGUAGE OverloadedStrings, FlexibleInstances #-}
{- Copyright 2016 Joey Hess <id@joeyh.name>
-
@@ -10,6 +10,7 @@ module Benchmark where
import Types
import Tunables
import ExpensiveHash
+import HTTP.ProofOfWork
import Cost
import Serialization ()
import qualified Data.ByteString.UTF8 as BU8
@@ -20,6 +21,15 @@ import Control.Monad
import Data.Monoid
import Data.Maybe
+data BenchmarkResult t = BenchmarkResult { expectedBenchmark :: t, actualBenchmark :: t }
+
+instance Show (BenchmarkResult (Cost op)) where
+ show br = " expected: " ++ showtime (expectedBenchmark br) ++ "s"
+ ++ "\tactual: " ++ showtime (actualBenchmark br) ++ "s"
+ where
+ showtime (CPUCost (Seconds s) _) =
+ show (fromRational s :: Double)
+
benchmarkExpensiveHash :: Int -> ExpensiveHashTunable -> IO (BenchmarkResult (Cost CreationOp))
benchmarkExpensiveHash rounds tunables =
benchmarkExpensiveHash' rounds tunables (getexpected tunables)
@@ -40,36 +50,63 @@ benchmarkExpensiveHash' rounds tunables@(UseArgon2 _ hashopts) expected = do
(base <> "himom")
t `deepseq` return ()
end <- getCurrentTime
- let diff = floor $ end `diffUTCTime` start
+ let diff = floor (end `diffUTCTime` start) :: Integer
let maxthreads = Argon2.hashParallelism hashopts
- let actual = CPUCost (Seconds diff) (Divisibility $ fromIntegral maxthreads)
+ let actual = CPUCost (Seconds (fromIntegral diff))
+ (Divisibility $ fromIntegral maxthreads)
-- The expected cost is for a single core, so adjust it
-- based on the number of cores, up to a maximum of the number
-- of threads that the hash is configred to use.
let usedcores = min maxthreads numcores
- let adjustedexpected = mapCost (`div` fromIntegral usedcores) expected
+ let adjustedexpected = mapCost (/ fromIntegral usedcores) expected
return $ BenchmarkResult
{ expectedBenchmark = adjustedexpected
, actualBenchmark = actual
}
+benchmark :: NFData t => Int -> Cost CreationOp -> (Int -> IO t) -> IO (BenchmarkResult (Cost CreationOp))
+benchmark rounds expected a = do
+ start <- getCurrentTime
+ forM_ [1..rounds] $ \n -> do
+ v <- a n
+ v `deepseq` return ()
+ end <- getCurrentTime
+ let diff = floor (end `diffUTCTime` start) :: Integer
+ return $ BenchmarkResult
+ { expectedBenchmark = expected
+ , actualBenchmark = CPUCost (Seconds (fromIntegral diff)) (Divisibility 1)
+ }
+
+benchmarkPoW :: Int -> Seconds -> IO (BenchmarkResult (Cost CreationOp))
+benchmarkPoW rounds seconds = do
+ let Just mk = mkProofOfWorkRequirement seconds
+ s <- newRequestIDSecret
+ rid <- mkRequestID s
+ benchmark rounds (CPUCost (seconds * Seconds (fromIntegral rounds)) (Divisibility 1))
+ (return . genProofOfWork (mk rid))
+
benchmarkTunables :: Tunables -> IO ()
benchmarkTunables tunables = do
putStrLn "/proc/cpuinfo:"
putStrLn =<< readFile "/proc/cpuinfo"
- putStrLn $ "Benchmarking 16 rounds of proof of work hash..."
- print =<< benchmarkExpensiveHash 16 (proofOfWorkHashTunable 0)
+ putStrLn "Benchmarking 1000 rounds of proof of work hash..."
+ print =<< benchmarkExpensiveHash 1000 (proofOfWorkHashTunable 0)
+
+ putStrLn "Benchmarking 60 rounds of 1 second proofs of work..."
+ print =<< benchmarkPoW 60 (Seconds 1)
+
+ putStrLn "Benchmarking 10 rounds of 8 second proofs of work..."
+ print =<< benchmarkPoW 10 (Seconds 8)
-- Rather than run all 256 rounds of this hash, which would
-- probably take on the order of 1 hour, run only 16, and scale
-- the expected cost accordingly.
- let normalrounds = fromIntegral $
- 256 * randomSaltBytes (keyEncryptionKeyTunable tunables)
+ let normalrounds = 256 * randomSaltBytes (keyEncryptionKeyTunable tunables)
putStrLn $ "Benchmarking 16/" ++ show normalrounds ++ " rounds of key encryption key hash..."
r <- benchmarkExpensiveHash' 16
(keyEncryptionKeyHash $ keyEncryptionKeyTunable tunables)
- (mapCost (`div` (normalrounds `div` 16)) $ randomSaltBytesBruteForceCost $ keyEncryptionKeyTunable tunables)
+ (mapCost (/ (fromIntegral normalrounds / 16)) $ randomSaltBytesBruteForceCost $ keyEncryptionKeyTunable tunables)
print r
putStrLn $ "Estimated time for " ++ show normalrounds ++ " rounds of key encryption key hash..."
print $ BenchmarkResult