1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
|
#!/usr/bin/python
# Copyright 2015 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# =*= License: GPL-3+ =*=
import os
import shutil
import tempfile
import time
import cliapp
import yaml
class ObnamBenchmarker(cliapp.Application):
def process_args(self, args):
if not args:
raise cliapp.AppException('Need benchmark spec filename')
spec = self.read_benchmark_spec(args[0])
tempdir = tempfile.mkdtemp()
for treeish in args[1:]:
self.run_all_benchmarks(spec, treeish, tempdir)
self.generate_html(spec)
shutil.rmtree(tempdir)
def read_benchmark_spec(self, filename):
with open(filename) as f:
return yaml.safe_load(f)
def run_all_benchmarks(self, spec, treeish, tempdir):
checkout = self.get_treeish(spec, treeish, tempdir)
for benchmark in spec.get('benchmarks', []):
result = self.run_one_benchmark(benchmark, checkout)
self.save_result(spec, result)
def get_treeish(self, spec, treeish, tempdir):
checkout = os.path.join(tempdir, 'git')
if not os.path.exists(checkout):
cliapp.runcmd(['git', 'clone', spec['git'], checkout])
cliapp.runcmd(['git', 'checkout', treeish], cwd=checkout)
cliapp.runcmd(['git', 'clean', '-fdxq'], cwd=checkout)
return checkout
def run_one_benchmark(self, benchmark, checkout):
result = BenchmarkResult()
result.collect_info_from_spec(benchmark)
result.collect_info_from_checkout(checkout)
for step in benchmark.get('steps', []):
self.run_benchmark_step(step, checkout, result)
return result
def run_benchmark_step(self, step, checkout, result):
step_info = dict(step)
if 'live' in step:
cliapp.runcmd(['sh', '-euc', step['live']], cwd=checkout)
started = time.time()
ended = time.time()
step_info['duration'] = ended - started
result.add_step(step_info)
def save_result(self, spec, result):
obj = result.as_dict()
pathname = self.get_report_pathname(spec, result)
with open(pathname, 'w') as f:
yaml.safe_dump(obj, stream=f, default_flow_style=False, indent=4)
def get_report_pathname(self, spec, result):
return os.path.join(
spec['reports-dir'],
result.get_commit_id() + '.yaml')
def generate_html(self, spec):
pass
class BenchmarkResult(object):
def __init__(self):
self._dict = {}
def as_dict(self):
return self._dict
def collect_info_from_spec(self, spec):
self._dict['name'] = spec['name']
def collect_info_from_checkout(self, checkout):
output = cliapp.runcmd(['git', 'rev-parse', 'HEAD'], cwd=checkout)
self._dict['commit_id'] = output.strip()
def add_step(self, step_info):
self._dict['steps'] = self._dict.get('steps', []) + [step_info]
def get_commit_id(self):
return self._dict['commit_id']
if __name__ == '__main__':
ObnamBenchmarker().run()
|