summaryrefslogtreecommitdiff
path: root/serialise-speed
diff options
context:
space:
mode:
authorLars Wirzenius <liw@liw.fi>2015-11-01 15:10:11 +0200
committerLars Wirzenius <liw@liw.fi>2015-11-01 15:10:11 +0200
commit310ce5cd9a8e49826f306a21856d4f33a393ac1f (patch)
tree80c0a6bcf8e5a517c2c90e6038e226ceda078afd /serialise-speed
parenta0a793977eb7da818ee0d3249785ae3c101dddea (diff)
downloadobnam-310ce5cd9a8e49826f306a21856d4f33a393ac1f.tar.gz
Allow user to give object to serialise-speed
Also, make output slightly more useful.
Diffstat (limited to 'serialise-speed')
-rwxr-xr-xserialise-speed32
1 files changed, 23 insertions, 9 deletions
diff --git a/serialise-speed b/serialise-speed
index d4530126..4b4335c3 100755
--- a/serialise-speed
+++ b/serialise-speed
@@ -19,6 +19,7 @@ import sys
import time
import cliapp
+import yaml
import obnamlib
@@ -35,7 +36,24 @@ class MicroBenchmark(cliapp.Application):
def process_args(self, args):
n = int(args[0])
- obj = {
+ if len(args) > 1:
+ obj = self.read_object(args[1])
+ else:
+ obj = self.get_builtin_object()
+ encoded = obnamlib.serialise_object(obj)
+
+ calibrate = measure(n, lambda: None)
+ encode = measure(n, lambda: obnamlib.serialise_object(obj))
+ self.report('encode', n, encode - calibrate)
+ decode = measure(n, lambda: obnamlib.deserialise_object(encoded))
+ self.report('decode', n, decode - calibrate)
+
+ def read_object(self, filename):
+ with open(filename) as f:
+ return yaml.safe_load(f)
+
+ def get_builtin_object(self):
+ return {
'foo': 'bar',
'big': 'x' * 1024**2,
'dict': {
@@ -44,14 +62,10 @@ class MicroBenchmark(cliapp.Application):
}
}
- encoded = obnamlib.serialise_object(obj)
-
- calibrate = measure(n, lambda: None)
- encode = measure(n, lambda: obnamlib.serialise_object(obj))
- decode = measure(n, lambda: obnamlib.deserialise_object(encoded))
- print 'encode: %.1f/s' % (n/(encode - calibrate))
- print 'decode: %.1f/s' % (n/(decode - calibrate))
-
+ def report(self, what, num_iters, duration):
+ self.output.write(
+ '%s: %s ms/iter (%.1f/s)\n' %
+ (what, 1000.0 * duration/num_iters, num_iters/duration))
if __name__ == '__main__':
MicroBenchmark().run()