summaryrefslogtreecommitdiffhomepage
path: root/Benchmark.hs
blob: c1d0795935f9d991f289c09edbdefe13b4f0015d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
{-# LANGUAGE OverloadedStrings, FlexibleInstances #-}

{- Copyright 2016 Joey Hess <id@joeyh.name>
 -
 - Licensed under the GNU AGPL version 3 or higher.
 -}

module Benchmark where

import Types
import Tunables
import ExpensiveHash
import HTTP.ProofOfWork
import Cost
import Serialization ()
import qualified Data.ByteString.UTF8 as BU8
import qualified Crypto.Argon2 as Argon2
import Data.Time.Clock
import Control.DeepSeq
import Control.Monad
import Data.Monoid
import Data.Maybe

data BenchmarkResult t = BenchmarkResult { expectedBenchmark :: t, actualBenchmark :: t }

instance Show (BenchmarkResult (Cost op)) where
	show br = "   expected: " ++ showtime (expectedBenchmark br) ++ "s"
		++ "\tactual: " ++ showtime (actualBenchmark br) ++ "s"
	  where
		showtime (CPUCost (Seconds s) _) = 
			show (fromRational s :: Double)

benchmarkExpensiveHash :: Int -> ExpensiveHashTunable -> IO (BenchmarkResult (Cost CreationOp))
benchmarkExpensiveHash rounds tunables =
	benchmarkExpensiveHash' rounds tunables (getexpected tunables)
  where
	getexpected (UseArgon2 cost _) = mapCost (* fromIntegral rounds) cost

benchmarkExpensiveHash' :: Int -> ExpensiveHashTunable -> Cost op -> IO (BenchmarkResult (Cost op))
benchmarkExpensiveHash' rounds tunables@(UseArgon2 _ hashopts) expected = do
	numcores <- fromIntegral . fromMaybe (error "Unknown number of physical cores.") 
		<$> getNumCores
	start <- getCurrentTime
	forM_ [1..rounds] $ \n -> do
		-- Must vary the data being hashed to avoid laziness
		-- caching hash results.
		let base = BU8.fromString (show n)
		let ExpensiveHash _ t = expensiveHash tunables
			(Salt (GpgKey (KeyId (base <> "dummy"))))
			(base <> "himom")
		t `deepseq` return ()
	end <- getCurrentTime
	let diff = floor (end `diffUTCTime` start) :: Integer
	let maxthreads = Argon2.hashParallelism hashopts
	let actual = CPUCost (Seconds (fromIntegral diff)) 
		(Divisibility $ fromIntegral maxthreads)
	-- The expected cost is for a single core, so adjust it
	-- based on the number of cores, up to a maximum of the number
	-- of threads that the hash is configred to use.
	let usedcores = min maxthreads numcores
	let adjustedexpected = mapCost (/ fromIntegral usedcores) expected
	return $ BenchmarkResult
		{ expectedBenchmark = adjustedexpected
		, actualBenchmark = actual
		}

benchmark :: NFData t => Int -> Cost CreationOp -> (Int -> IO t) -> IO (BenchmarkResult (Cost CreationOp))
benchmark rounds expected a = do
	start <- getCurrentTime
	forM_ [1..rounds] $ \n -> do
		v <- a n
		v `deepseq` return ()
	end <- getCurrentTime
	let diff = floor (end `diffUTCTime` start) :: Integer
	return $ BenchmarkResult
		{ expectedBenchmark = expected
		, actualBenchmark = CPUCost (Seconds (fromIntegral diff)) (Divisibility 1)
		}

benchmarkPoW :: Int  -> Seconds -> IO (BenchmarkResult (Cost CreationOp))
benchmarkPoW rounds seconds = do
	let Just mk = mkProofOfWorkRequirement seconds
	s <- newRequestIDSecret
	rid <- mkRequestID s
	benchmark rounds (CPUCost (seconds * fromIntegral rounds) (Divisibility 1))
		(return . genProofOfWork (mk rid))

benchmarkTunables :: Tunables -> IO ()
benchmarkTunables tunables = do
	putStrLn "/proc/cpuinfo:"
	putStrLn =<< readFile "/proc/cpuinfo"
	
	putStrLn "Benchmarking 1000 rounds of proof of work hash..."
	print =<< benchmarkExpensiveHash 1000 (proofOfWorkHashTunable 0)

	putStrLn "Benchmarking 60 rounds of 1 second proofs of work..."
	print =<< benchmarkPoW 60 (Seconds 1)
	
	putStrLn "Benchmarking 10 rounds of 8 second proofs of work..."
	print =<< benchmarkPoW 10 (Seconds 8)

	-- Rather than run all 256 rounds of this hash, which would
	-- probably take on the order of 1 hour, run only 16, and scale
	-- the expected cost accordingly.
	let normalrounds = 256 * randomSaltBytes (keyEncryptionKeyTunable tunables)
	putStrLn $ "Benchmarking 16/" ++ show normalrounds ++ " rounds of key encryption key hash..."
	r <- benchmarkExpensiveHash' 16
		(keyEncryptionKeyHash $ keyEncryptionKeyTunable tunables)
		(mapCost (/ (fromIntegral normalrounds / 16)) $ randomSaltBytesBruteForceCost $ keyEncryptionKeyTunable tunables)
	print r
	putStrLn $ "Estimated time for " ++ show normalrounds ++ " rounds of key encryption key hash..."
	print $ BenchmarkResult
		{ expectedBenchmark = mapCost (* 16) (expectedBenchmark r)
		, actualBenchmark = mapCost (* 16) (actualBenchmark r)
		}

	putStrLn "Benchmarking 1 round of name generation hash..."
	print =<< benchmarkExpensiveHash 1
		(nameGenerationHash $ nameGenerationTunable tunables)