summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLars Wirzenius <liw@liw.fi>2015-12-25 13:06:49 +0100
committerLars Wirzenius <liw@liw.fi>2015-12-26 16:44:58 +0100
commitf097c61b1c1435d4849a33930cb75332dc7158dc (patch)
treef7632248ce5bb584ac11fe9de64dfba7e67a366f
parentda859589e5295e5d050abff73773c006cf81ead1 (diff)
downloadobnam-benchmarks-f097c61b1c1435d4849a33930cb75332dc7158dc.tar.gz
Rewrite obbench, adding yarns and Debian packaging
-rw-r--r--benchmark-specs/e2obbench-6.yaml (renamed from e2obbench-6.yaml)0
-rw-r--r--benchmark-specs/e2obbench.yaml (renamed from e2obbench.yaml)0
-rw-r--r--benchmark-specs/production.yaml (renamed from production.yaml)0
-rwxr-xr-xcheck5
-rw-r--r--debian/changelog6
-rw-r--r--debian/compat1
-rw-r--r--debian/control13
-rw-r--r--debian/copyright23
-rwxr-xr-xdebian/rules4
-rw-r--r--debian/source/format1
-rwxr-xr-xobbench491
-rw-r--r--obbenchlib/__init__.py3
-rw-r--r--obbenchlib/benchmarker.py172
-rw-r--r--obbenchlib/htmlgen.py232
-rw-r--r--obbenchlib/obbench.css (renamed from benchmark.css)0
-rw-r--r--obbenchlib/result.py57
-rw-r--r--obbenchlib/templates/benchmark.j238
-rw-r--r--obbenchlib/templates/index.j228
-rw-r--r--setup.py31
-rwxr-xr-xtest-run10
-rw-r--r--test.yaml25
-rw-r--r--yarns/000.yarn6
-rw-r--r--yarns/100-intro.yarn26
-rw-r--r--yarns/200-config.yarn74
-rw-r--r--yarns/300-running.yarn70
-rw-r--r--yarns/900-implements.yarn58
-rw-r--r--yarns/Makefile30
27 files changed, 923 insertions, 481 deletions
diff --git a/e2obbench-6.yaml b/benchmark-specs/e2obbench-6.yaml
index bd67a8c..bd67a8c 100644
--- a/e2obbench-6.yaml
+++ b/benchmark-specs/e2obbench-6.yaml
diff --git a/e2obbench.yaml b/benchmark-specs/e2obbench.yaml
index c3a0734..c3a0734 100644
--- a/e2obbench.yaml
+++ b/benchmark-specs/e2obbench.yaml
diff --git a/production.yaml b/benchmark-specs/production.yaml
index 8a2c88c..8a2c88c 100644
--- a/production.yaml
+++ b/benchmark-specs/production.yaml
diff --git a/check b/check
new file mode 100755
index 0000000..dcbadda
--- /dev/null
+++ b/check
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+set -e
+
+yarn --cd-datadir --shell python2 --shell-arg '' yarns/*.yarn "$@"
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..69f0cc8
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,6 @@
+obbench (0.1-1) UNRELEASED; urgency=low
+
+ * Initial packaging. This is not intended to be uploaded to Debian, so
+ no closing of an ITP bug.
+
+ -- Lars Wirzenius <liw@liw.fi> Sat, 08 May 2010 11:10:24 +1200
diff --git a/debian/compat b/debian/compat
new file mode 100644
index 0000000..ec63514
--- /dev/null
+++ b/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..9af89fe
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,13 @@
+Source: obbench
+Maintainer: Lars Wirzenius <liw@liw.fi>
+Section: python
+Priority: optional
+Standards-Version: 3.9.6
+Build-Depends: debhelper (>= 9~), python-all (>= 2.7~), dh-python
+X-Python-Version: >= 2.7
+
+Package: obbench
+Architecture: all
+Depends: ${python:Depends}, ${misc:Depends}, python (>= 2.7)
+Description: benchmarking tool for Obnam
+ Run benchmarks for the Obnam backup program.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..ae6205b
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,23 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: obbench
+Upstream-Contact: Lars Wirzenius <liw@liw.fi>
+Source: http://git.liw.fi/obnam-benchmarks
+
+Files: *
+Copyright: 2015, Lars Wirzenius
+License: GPL-3+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+ .
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+ .
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+ .
+ On a Debian system, you can find a copy of GPL version 3 at
+ /usr/share/common-licenses/GPL-3 .
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..2d33f6a
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,4 @@
+#!/usr/bin/make -f
+
+%:
+ dh $@
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..163aaf8
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/obbench b/obbench
index be11190..32418a8 100755
--- a/obbench
+++ b/obbench
@@ -17,471 +17,70 @@
# =*= License: GPL-3+ =*=
-import glob
import os
-import shutil
-import sys
-import tempfile
-import time
import cliapp
-import jinja2
import yaml
-
-summary_j2 = '''\
-{% autoescape true %}
-<html>
- <head>
- <title>Obnam benchmark: summary</title>
- <link rel="stylesheet" href="benchmark.css" type="text/css" />
- </head>
- <body>
- <h1>Obnam benchmark: summary</h1>
-
- <h2>Benchmark results</h2>
-
- <table>
- <tr>
- <th>date</th>
- <th>commit</th>
- <th>commit msg</th>
- {% for name in benchmark_names %}
- <th>{{ name }} (seconds) <br/>(% of goal)</th>
- {% endfor %}
- </tr>
-
- {% for run in runs %}
- <tr>
- <td class="date">{{ run.date }}</td>
- <td class="commitid">{{ run.commit_id }}</td>
- <td class="commitmsg">{{ run.commit_msg }}</td>
-
- {% for name in benchmark_names %}
- <td class="duration">
- <a href="{{ run.links[name] }}">
- {{ run.durations[name] }}</a>
- ({{ run.references[name] }})
- </td>
- {% endfor %}
- </tr>
- {% endfor %}
-
- </table>
-
- <h2>Benchmark spec</h2>
- <p><pre>{{ spec }}</pre></p>
-
- </body>
-</html>
-
-{% endautoescape %}
-'''
-
-
-benchmark_j2 = '''\
-{% autoescape true %}
-<html>
- <head>
- <title>Obnam benchmark: {{ obj.commit_id }} {{ obj.name }}</title>
- <link rel="stylesheet" href="benchmark.css" type="text/css" />
- </head>
- <body>
-
- <h1>Obnam benchmark: {{ obj.commit_id }} {{ obj.name }}</h1>
-
- <table>
- <tr>
- {% for step in obj.steps %}
- {% if 'obnam' in step %}
- <th>{{ step.obnam }} seconds<br/> (% of goal)</th>
- {% endif %}
- {% endfor %}
- </tr>
-
- <tr>
- {% for step in obj.steps %}
- {% if 'obnam' in step %}
- <td><a href="{{ step.profile_filename }}">
- {{ step.duration_fmt }}</a> ({{step.reference_fmt }})</td>
- {% endif %}
- {% endfor %}
- </tr>
- </table>
- </body>
-</html>
-{% endautoescape %}
-'''
+import obbenchlib
class ObnamBenchmarker(cliapp.Application):
+ def add_settings(self):
+ self.settings.string(
+ ['state'],
+ 'keep state in DIR between runs',
+ metavar='DIR',
+ default='.')
+
def process_args(self, args):
- if not args:
- raise cliapp.AppException('Need benchmark spec filename')
spec = self.read_benchmark_spec(args[0])
- state = self.read_state(spec)
- self.logger = IndentedLogger()
-
- tempdir = tempfile.mkdtemp()
- for treeish in args[1:]:
- self.logger.msg('Benchmarking treeish %s' % treeish)
- with self.logger:
- self.run_all_benchmarks(spec, state, treeish, tempdir)
- self.save_state(spec, state)
-
- self.logger.msg('Generating HTML')
- self.generate_html(spec)
-
- self.logger.msg('Cleaning up')
- shutil.rmtree(tempdir)
+ statedir = self.create_state_directory()
+ self.clone_or_update_git(statedir, spec)
+ self.run_benchmarks(statedir, spec, args[1:])
+ self.produce_html(statedir, spec)
def read_benchmark_spec(self, filename):
with open(filename) as f:
return yaml.safe_load(f)
- def read_state(self, spec):
- try:
- with open(spec['state']) as f:
- return yaml.safe_load(f)
- except EnvironmentError:
- return { 'commit_id': None }
-
- def save_state(self, spec, state):
- with open(spec['state'], 'w') as f:
- return yaml.safe_dump(state, stream=f)
-
- def run_all_benchmarks(self, spec, state, treeish, tempdir):
- checkout = self.get_treeish(spec, treeish, tempdir)
- commit_id = self.get_commit_id(checkout)
- if commit_id == state['commit_id']:
- self.logger.msg('Already benchmarked')
- else:
- self.prepare_obnam(checkout)
- for benchmark in spec.get('benchmarks', []):
- result = self.run_one_benchmark(
- spec, benchmark, tempdir, checkout)
- self.save_result(spec, benchmark, result)
- state['commit_id'] = commit_id
-
- def get_treeish(self, spec, treeish, tempdir):
- checkout = os.path.join(tempdir, 'git')
- if not os.path.exists(checkout):
- cliapp.runcmd(['git', 'clone', spec['git'], checkout])
- cliapp.runcmd(['git', 'checkout', treeish], cwd=checkout)
- cliapp.runcmd(['git', 'clean', '-fdxq'], cwd=checkout)
- return checkout
-
- def get_commit_id(self, checkout):
- output = cliapp.runcmd(['git', 'rev-parse', 'HEAD'], cwd=checkout)
- return output.strip()
-
- def prepare_obnam(self, checkout):
- cliapp.runcmd(['python', 'setup.py', 'build_ext', '-i'], cwd=checkout)
-
- def run_one_benchmark(self, spec, benchmark, tempdir, checkout):
- self.logger.msg('Running benchmark %s' % benchmark['name'])
- with self.logger:
- result = BenchmarkResult()
- result.collect_info_from_spec(benchmark)
- result.collect_info_from_checkout(checkout)
-
- config = self.create_obnam_config(spec, benchmark, tempdir)
-
- live = self.create_live_dir(tempdir)
- for step in benchmark.get('steps', []):
- self.run_benchmark_step(
- step, tempdir, checkout, config, live, result)
- return result
-
- def create_obnam_config(self, spec, benchmark, tempdir):
- config = os.path.join(tempdir, 'obnam.conf')
- with open(config, 'w') as f:
- f.write('[config]\n')
- f.write('quiet = yes\n')
- f.write('repository = %s\n' % os.path.join(tempdir, 'repo'))
- f.write('root = %s\n' % self.get_live_data(tempdir))
- f.write('log = %s\n' % os.path.join(tempdir, 'obnam.log'))
- for key, value in spec.get('obnam_config', {}).items():
- f.write('%s = %s\n' % (key, value))
- for key, value in benchmark.get('obnam_config', {}).items():
- f.write('%s = %s\n' % (key, value))
- return config
-
- def get_live_data(self, tempdir):
- return os.path.join(tempdir, 'live')
-
- def create_live_dir(self, tempdir):
- live = self.get_live_data(tempdir)
- if os.path.exists(live):
- shutil.rmtree(live)
- os.mkdir(live)
- return live
-
- def run_benchmark_step(self,
- step, tempdir, checkout, config, live, result):
- step_info = dict(step)
-
- if 'live' in step:
- self.logger.msg('Creating live data: %s' % step['live'])
- cliapp.runcmd(['sh', '-euc', step['live']], cwd=live)
-
- action = step['obnam']
- self.logger.msg('Obnam %s' % action)
- func = funcs = {
- 'backup': self.run_backup,
- 'restore': self.run_restore,
- }
- started = time.time()
- funcs[action](tempdir, checkout, config, step_info)
- ended = time.time()
- step_info['duration'] = ended - started
-
- result.add_step(step_info)
-
- def run_backup(self, tempdir, checkout, config, step_info):
- self.run_obnam(step_info, checkout, ['backup', '--config', config])
-
- def run_restore(self, tempdir, checkout, config, step_info):
- restored = os.path.join(tempdir, 'restored')
- if os.path.exists(restored):
- shutil.rmtree(restored)
- self.run_obnam(
- step_info, checkout,
- ['restore', '--config', config, '--to', restored])
-
- def run_obnam(self, step_info, checkout, args):
- env = dict(os.environ)
- env['OBNAM_PROFILE'] = 'obnam.prof'
- cliapp.runcmd(
- ['./obnam', '--no-default-config'] + args,
- env=env,
- cwd=checkout)
- step_info['profile'] = cliapp.runcmd(
- ['./obnam-viewprof', 'obnam.prof'],
- cwd=checkout)
-
- def save_result(self, spec, benchmark, result):
- obj = result.as_dict()
- pathname = self.get_report_pathname(spec, benchmark, result)
- with open(pathname, 'w') as f:
- yaml.safe_dump(obj, stream=f, default_flow_style=False, indent=4)
-
- def get_report_pathname(self, spec, benchmark, result):
- return os.path.join(
- spec['reports_dir'],
- '%s_%s.yaml' % (result.get_commit_id(), benchmark['name']))
-
- def generate_html(self, spec):
- objs = self.read_results_files(spec)
- for obj in objs:
- self.write_benchmark_page(spec, obj)
- self.write_summary_page(spec, objs)
- self.copy_css_file(spec)
- self.publish_html(spec)
+ def create_state_directory(self):
+ statedir = self.settings['state']
+ if not os.path.exists(statedir):
+ os.mkdir(statedir)
+ return statedir
- def read_results_files(self, spec):
- objs = []
- for filename in glob.glob(os.path.join(spec['reports_dir'], '*.yaml')):
- with open(filename) as f:
- objs.append(yaml.safe_load(f))
- return objs
-
- def write_benchmark_page(self, spec, obj):
- for benchmark in spec['benchmarks']:
- if benchmark['name'] == obj['name']:
- break
+ def clone_or_update_git(self, statedir, spec):
+ gitdir = self.gitdir(statedir)
+ if os.path.exists(gitdir):
+ cliapp.runcmd(['git', 'pull'])
else:
- benchmark = {}
-
- filename = os.path.join(
- spec['html_dir'],
- '{}_{}.html'.format(obj['commit_id'], obj['name']))
- with open(filename, 'w') as f:
- for index, step in enumerate(obj['steps']):
- if 'obnam' not in step:
- continue
-
- basename = '{commit}_{name}_{index}.txt'.format(
- commit=obj['commit_id'],
- name=obj['name'],
- index=index)
-
- filename = os.path.join(spec['html_dir'], basename)
- with open(filename, 'w') as profile:
- profile.write(step['profile'])
- step['profile_filename'] = basename
-
- reference = 'unknown'
- if benchmark is not None:
- spec_step = benchmark['steps'][index]
- if 'reference' in spec_step:
- reference = '%.1f %%' % (
- 100.0 * step['duration'] / spec_step['reference'])
- step['reference_fmt'] = reference
-
- step['duration_fmt'] = '%.1f' % step['duration']
-
- vars = {
- 'obj': obj,
- }
-
- env = jinja2.Environment(
- autoescape=lambda foo: True,
- extensions=['jinja2.ext.autoescape'])
- template = env.from_string(benchmark_j2)
- f.write(template.render(**vars))
-
- def q(self, text):
- '''Quote for HTML'''
- text = str(text)
- text = '&amp;'.join(text.split('&'))
- text = '&lt;'.join(text.split('<'))
- text = '&gt;'.join(text.split('>'))
- return text
-
- def write_summary_page(self, spec, objs):
- benchmark_names = self.find_benchmark_names(objs)
- runs = self.create_table_of_benchmark_runs(benchmark_names, objs)
-
- filename = os.path.join(spec['html_dir'], 'index.html')
- with open(filename, 'w') as f:
- for run in runs:
- run['links'] = {}
- run['references'] = {}
- for name in benchmark_names:
- reference = sum(
- sum(x.get('reference', 0) for x in b['steps'])
- for b in spec['benchmarks']
- if b['name'] == name)
- if reference > 0:
- reference = '%.1f %%' % (
- 100.0 * run['durations'][name] / reference)
- else:
- reference = 'unknown'
- run['references'][name] = reference
-
- run['links'][name] = '{commit}_{name}.html'.format(
- commit=self.q(run['commit_id']),
- name=self.q(name))
- run['durations'][name] = '%.1f' % run['durations'][name]
-
- vars = {
- 'benchmark_names': benchmark_names,
- 'runs': runs,
- 'spec': yaml.safe_dump(
- spec, default_flow_style=False, indent=4),
- }
-
- env = jinja2.Environment(
- autoescape=lambda foo: True,
- extensions=['jinja2.ext.autoescape'])
- template = env.from_string(summary_j2)
- f.write(template.render(**vars))
-
-
- def find_benchmark_names(self, objs):
- return list(sorted(set(o['name'] for o in objs)))
-
- def create_table_of_benchmark_runs(self, names, objs):
-
- def make_key(obj):
- return (obj['date'], obj['commit_id'])
-
- def total(obj, field):
- return sum(step.get(field, 0) for step in obj['steps'])
-
- sorted_objs = []
- for obj in objs:
- sorted_objs.append((make_key(obj), obj))
- sorted_objs.sort()
-
- runs = []
- for key, obj in sorted_objs:
- if not runs or make_key(runs[-1]) != key:
- runs.append({
- 'date': obj['date'],
- 'commit_id': obj['commit_id'],
- 'commit_msg': obj['commit_msg'],
- 'durations': { obj['name']: total(obj, 'duration') },
- })
- else:
- runs[-1]['durations'][obj['name']] = total(obj, 'duration')
-
- return runs
-
- def copy_css_file(self, spec):
- filename = os.path.join(spec['html_dir'], 'benchmark.css')
- shutil.copy('benchmark.css', filename)
-
- def publish_html(self, spec):
- if 'publish_html' in spec:
- self.logger.msg('Publishing HTML')
- cliapp.runcmd(
- ['sh', '-euc', spec['publish_html']],
- cwd=spec['html_dir'])
-
-
-class BenchmarkResult(object):
-
- def __init__(self):
- self._dict = {}
-
- def as_dict(self):
- return self._dict
-
- def collect_info_from_spec(self, spec):
- self._dict['name'] = spec['name']
-
- def collect_info_from_checkout(self, checkout):
- self.collect_checkout_commit_id(checkout)
- self.collect_checkout_commit_date(checkout)
- self.collect_checkout_commit_first_line(checkout)
-
- def collect_checkout_commit_id(self, checkout):
- output = cliapp.runcmd(['git', 'rev-parse', 'HEAD'], cwd=checkout)
- self._dict['commit_id'] = output.strip()[:7]
-
- def collect_checkout_commit_date(self, checkout):
- self._dict['date'] = 'unknown'
- output = cliapp.runcmd(
- ['git', 'show', '--date=iso', 'HEAD'],
- cwd=checkout)
- for line in output.splitlines():
- if line.startswith('Date:'):
- self._dict['date'] = line[len('Date:'):].strip()
- break
-
- def collect_checkout_commit_first_line(self, checkout):
- output = cliapp.runcmd(
- ['git', 'show', '--pretty=oneline', 'HEAD'],
- cwd=checkout)
- line1 = output.splitlines()[0].split(' ', 1)[1]
- self._dict['commit_msg'] = line1
-
- def add_step(self, step_info):
- self._dict['steps'] = self._dict.get('steps', []) + [step_info]
-
- def get_commit_id(self):
- return self._dict['commit_id']
-
-
-class IndentedLogger(object):
-
- def __init__(self):
- self._level = 0
- self._indent = 2
-
- def msg(self, text):
- sys.stdout.write(' ' * (self._level * self._indent))
- sys.stdout.write(text + '\n')
- sys.stdout.flush()
-
- def __enter__(self):
- self._level += 1
+ cliapp.runcmd(['git', 'clone', spec['git'], gitdir])
+
+ def gitdir(self, statedir):
+ return os.path.join(statedir, 'git')
+
+ def run_benchmarks(self, statedir, spec, refs):
+ benchmarker = obbenchlib.Benchmarker()
+ benchmarker.statedir = statedir
+ benchmarker.gitdir = self.gitdir(statedir)
+ benchmarker.resultdir = self.resultdir(statedir)
+ benchmarker.spec = spec
+ for ref in refs:
+ benchmarker.run_benchmarks(ref)
+
+ def resultdir(self, statedir):
+ return os.path.join(statedir, 'results')
+
+ def produce_html(self, statedir, spec):
+ gen = obbenchlib.HtmlGenerator()
+ gen.statedir = statedir
+ gen.resultdir = self.resultdir(statedir)
+ gen.gitdir = self.gitdir(statedir)
+ gen.spec = spec
+ gen.generate_html()
- def __exit__(self, *args):
- self._level -= 1
if __name__ == '__main__':
ObnamBenchmarker().run()
diff --git a/obbenchlib/__init__.py b/obbenchlib/__init__.py
new file mode 100644
index 0000000..9e52759
--- /dev/null
+++ b/obbenchlib/__init__.py
@@ -0,0 +1,3 @@
+from .benchmarker import Benchmarker
+from .result import Result
+from .htmlgen import HtmlGenerator
diff --git a/obbenchlib/benchmarker.py b/obbenchlib/benchmarker.py
new file mode 100644
index 0000000..cba86a3
--- /dev/null
+++ b/obbenchlib/benchmarker.py
@@ -0,0 +1,172 @@
+# Copyright 2015 Lars Wirzenius
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-3+ =*=
+
+
+import os
+import pstats
+import shutil
+import StringIO
+import tempfile
+import time
+
+import cliapp
+
+import obbenchlib
+
+
+class Benchmarker(object):
+
+ profile_name = 'obnam.prof'
+
+ def __init__(self):
+ self.statedir = None
+ self.gitdir = None
+ self.resultdir = None
+ self._livedir = None
+ self._repodir = None
+ self._srcdir = None
+ self._config = None
+ self._restored = None
+ self._timestamp = None
+ self.spec = None
+
+ def run_benchmarks(self, ref):
+ tempdir = self.create_temp_dir()
+ self._livedir = self.create_subdir(tempdir, 'live')
+ self._repodir = self.create_subdir(tempdir, 'repo')
+ self._srcdir = self.create_subdir(tempdir, 'src')
+ self._restored = self.create_subdir(tempdir, 'restored')
+ self._config = self.prepare_obnam_config(tempdir)
+ self._timestamp = time.strftime('%Y-%m-%d %H:%M:%S')
+
+ self.prepare_obnam(ref)
+ if not os.path.exists(self.resultdir):
+ os.mkdir(self.resultdir)
+ for benchmark in self.spec['benchmarks']:
+ result = self.run_benchmark(benchmark)
+ result.save_in_dir(self.resultdir)
+ self.remove_temp_dir(tempdir)
+
+ def create_temp_dir(self):
+ return tempfile.mkdtemp()
+
+ def create_subdir(self, parent, child):
+ pathname = os.path.join(parent, child)
+ os.mkdir(pathname)
+ return pathname
+
+ def remove_temp_dir(self, tempdir):
+ shutil.rmtree(tempdir)
+
+ def prepare_obnam_config(self, tempdir):
+ config = os.path.join(tempdir, 'obnam.conf')
+ with open(config, 'w') as f:
+ f.write('[config]\n')
+ f.write('quiet = yes\n')
+ f.write('repository = %s\n' % self._repodir)
+ f.write('root = %s\n' % self._livedir)
+ return config
+
+ def prepare_obnam(self, ref):
+ cliapp.runcmd(['git', 'clone', self.gitdir, self._srcdir])
+ cliapp.runcmd(['git', 'checkout', ref], cwd=self._srcdir)
+ cliapp.runcmd(
+ ['python', 'setup.py', 'build_ext', '-i'],
+ cwd=self._srcdir)
+
+ def run_benchmark(self, benchmark):
+ result = obbenchlib.Result()
+ result.benchmark_name = benchmark['name']
+ result.run_timestamp = self._timestamp
+ result.commit_date = self.get_commit_date()
+ result.commit_timestamp = self.get_commit_timestamp()
+ result.commit_id = self.get_commit_id()
+ for step in benchmark['steps']:
+ result.start_step()
+ self.run_step(result, step)
+ return result
+
+ def get_commit_date(self):
+ timestamp = self.get_commit_timestamp()
+ return timestamp.split()[0]
+
+ def get_commit_timestamp(self):
+ output = cliapp.runcmd(
+ ['git', 'show', '--date=iso', 'HEAD'],
+ cwd=self._srcdir)
+ for line in output.splitlines():
+ if line.startswith('Date:'):
+ return line[len('Date:'):].strip()
+ raise Exception('commit has no Date:')
+
+ def get_commit_id(self):
+ output = cliapp.runcmd(['git', 'rev-parse', 'HEAD'], cwd=self._srcdir)
+ return output.strip()
+
+ def run_step(self, result, step):
+ if 'live' in step:
+ self.run_step_live(result, step['live'])
+ self.run_step_obnam(result, step['obnam'])
+
+ def run_step_live(self, result, shell_command):
+ started = time.time()
+ cliapp.runcmd(['sh', '-euc', shell_command], cwd=self._livedir)
+ duration = time.time() - started
+ result.set_value('live', 'duration', duration)
+
+ def run_step_obnam(self, result, obnam_subcommand):
+ funcs = {
+ 'backup': self.run_obnam_backup,
+ 'restore': self.run_obnam_restore,
+ }
+ started = time.time()
+ funcs[obnam_subcommand]()
+ duration = time.time() - started
+ result.set_value(obnam_subcommand, 'duration', duration)
+ result.set_value(obnam_subcommand, 'profile', self.read_profile())
+ result.set_value(
+ obnam_subcommand, 'profile-text', self.read_profile_text())
+
+ def run_obnam_backup(self):
+ self.run_obnam(['backup'])
+
+ def run_obnam_restore(self):
+ self.run_obnam(['restore', '-to', self._restored])
+
+ def run_obnam(self, args):
+ env = dict(os.environ)
+ env['OBNAM_PROFILE'] = self.profile_name
+ opts = ['--no-default-config', '--config', self._config]
+ cliapp.runcmd(
+ ['./obnam'] + opts + args,
+ env=env,
+ cwd=self._srcdir)
+
+ def read_profile(self):
+ filename = os.path.join(self._srcdir, self.profile_name)
+ with open(filename) as f:
+ return f.read()
+
+ def read_profile_text(self):
+ f = StringIO.StringIO()
+ filename = os.path.join(self._srcdir, self.profile_name)
+ p = pstats.Stats(filename, stream=f)
+ p.strip_dirs()
+ p.sort_stats('cumulative')
+ p.print_stats()
+ p.print_callees()
+ return f.getvalue()
diff --git a/obbenchlib/htmlgen.py b/obbenchlib/htmlgen.py
new file mode 100644
index 0000000..195f2c1
--- /dev/null
+++ b/obbenchlib/htmlgen.py
@@ -0,0 +1,232 @@
+# Copyright 2015 Lars Wirzenius
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-3+ =*=
+
+
+import glob
+import os
+
+import jinja2
+import markdown
+import yaml
+
+import obbenchlib
+
+
+class HtmlGenerator(object):
+
+ def __init__(self):
+ self.statedir = None
+ self.resultdir = None
+ self.spec = None
+
+ def generate_html(self):
+ results = self.load_results()
+
+ env = jinja2.Environment(
+ loader=jinja2.PackageLoader('obbenchlib'),
+ autoescape=lambda foo: True,
+ extensions=['jinja2.ext.autoescape'])
+
+ self.create_html_dir()
+ page_classes = [FrontPage, BenchmarkPage, ProfileData, CssFile]
+ for page_class in page_classes:
+ page = page_class()
+ page.env = env
+ page.results = results
+ page.spec = self.spec
+
+ for filename, data in page.generate():
+ self.write_file(filename, data)
+
+ @property
+ def htmldir(self):
+ return os.path.join(self.statedir, 'html')
+
+ def create_html_dir(self):
+ if not os.path.exists(self.htmldir):
+ os.mkdir(self.htmldir)
+
+ def load_results(self):
+ results = []
+ for filename in glob.glob(os.path.join(self.resultdir, '*.yaml')):
+ with open(filename) as f:
+ results.append(yaml.safe_load(f))
+ return results
+
+ def write_file(self, relative_path, text):
+ filename = os.path.join(self.htmldir, relative_path)
+ with open(filename, 'w') as f:
+ f.write(text)
+
+
+class HtmlPage(object):
+
+ def __init__(self):
+ self.env = None
+ self.results = None
+ self.spec = None
+
+ def format_markdown(self, text):
+ return markdown.markdown(text)
+
+ def get_step_names(self, benchmark):
+ return [step['obnam'] for step in benchmark['steps']]
+
+ def generate(self):
+ raise NotImplementedError()
+
+ def render(self, template_name, variables):
+ template = self.env.get_template(template_name)
+ return template.render(**variables)
+
+
+class FrontPage(HtmlPage):
+
+ def generate(self):
+ variables = {
+ 'description': self.format_markdown(self.spec['description']),
+ 'benchmark_names': [
+ benchmark['name']
+ for benchmark in sorted(self.spec['benchmarks'])
+ ],
+ 'results_table': self.results_table(),
+ }
+ yield 'index.html', self.render('index.j2', variables)
+
+ def results_table(self):
+ table = {}
+ for result in self.results:
+ key = '{commit_timestamp} {commit_id} {run_timestamp}'.format(
+ **result)
+ if key not in table:
+ table[key] = {
+ 'commit_id': result['commit_id'],
+ 'commit_date': result['commit_date'],
+ }
+ table[key][result['benchmark_name']] = self.duration(result)
+
+ return [table[key] for key in sorted(table.keys())]
+
+ def duration(self, result):
+ total = 0
+ for step in result['steps']:
+ for key in step:
+ if key != 'live':
+ total += step[key].get('duration', 0)
+ return total
+
+
+class BenchmarkPage(HtmlPage):
+
+ def generate(self):
+ benchmark_names = [
+ benchmark['name']
+ for benchmark in self.spec['benchmarks']
+ ]
+
+ for benchmark_name in benchmark_names:
+ yield self.generate_benchmark_page(benchmark_name)
+
+ def generate_benchmark_page(self, benchmark_name):
+ benchmark = self.find_benchmark(benchmark_name)
+ table_rows = self.table_rows(benchmark)
+
+ variables = {
+ 'benchmark_name': benchmark_name,
+ 'description': self.format_markdown(
+ benchmark.get('description', '')),
+ 'table_rows': table_rows,
+ 'step_names': self.get_step_names(benchmark),
+ }
+
+ return (
+ '{}.html'.format(benchmark_name),
+ self.render('benchmark.j2', variables)
+ )
+
+ def find_benchmark(self, benchmark_name):
+ for benchmark in self.spec['benchmarks']:
+ if benchmark['name'] == benchmark_name:
+ return benchmark
+ return {}
+
+ def table_rows(self, benchmark):
+ results = self.get_results_for_benchmark(benchmark)
+ step_names = self.get_step_names(benchmark)
+ rows = []
+ for result in results:
+ rows.append(self.table_row(result, step_names))
+ return sorted(rows, key=lambda row: row['commit_timestamp'])
+
+ def get_results_for_benchmark(self, benchmark):
+ return [
+ result
+ for result in self.results
+ if result['benchmark_name'] == benchmark['name']
+ ]
+
+ def table_row(self, result, step_names):
+ row = {
+ 'result_id': result['result_id'],
+ 'commit_timestamp': result['commit_timestamp'],
+ 'commit_date': result['commit_date'],
+ 'commit_id': result['commit_id'],
+ 'total': 0,
+ 'steps': [],
+ }
+ for i, step in enumerate(result['steps']):
+ for step_name in step_names:
+ if step_name in step:
+ row['steps'].append({
+ 'filename_txt': '{}_{}.txt'.format(
+ result['result_id'], i),
+ 'duration': step[step_name]['duration'],
+ })
+ row['total'] += row['steps'][-1]['duration']
+ break
+ return row
+
+
+class ProfileData(HtmlPage):
+
+ def generate(self):
+ for result in self.results:
+ for i, step in enumerate(result['steps']):
+ for operation in step:
+ if 'profile' in step[operation]:
+ yield self.generate_profile_data(
+ result, step, i, operation)
+ yield self.generate_profile_text(
+ result, step, i, operation)
+
+ def generate_profile_data(self, result, step, i, operation):
+ filename = '{}_{}.prof'.format(result['result_id'], i)
+ return filename, step[operation]['profile']
+
+ def generate_profile_text(self, result, step, i, operation):
+ filename = '{}_{}.txt'.format(result['result_id'], i)
+ return filename, step[operation]['profile-text']
+
+
+class CssFile(object):
+
+ def generate(self):
+ filename = os.path.join(
+ os.path.dirname(obbenchlib.__file__), 'obbench.css')
+ with open(filename) as f:
+ data = f.read()
+ yield 'obbench.css', data
diff --git a/benchmark.css b/obbenchlib/obbench.css
index 4e35607..4e35607 100644
--- a/benchmark.css
+++ b/obbenchlib/obbench.css
diff --git a/obbenchlib/result.py b/obbenchlib/result.py
new file mode 100644
index 0000000..8ac24b8
--- /dev/null
+++ b/obbenchlib/result.py
@@ -0,0 +1,57 @@
+# Copyright 2015 Lars Wirzenius
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-3+ =*=
+
+
+import os
+import random
+
+import yaml
+
+
+class Result(object):
+
+ def __init__(self):
+ self.benchmark_name = None
+ self.run_timestamp = None
+ self.commit_date = None
+ self.commit_timestamp = None
+ self.commit_id = None
+ self._result_id = random.randint(0, 2**64-1)
+ self._steps = []
+
+ def start_step(self):
+ self._steps.append({})
+
+ def set_value(self, operation, kind, value):
+ step = self._steps[-1]
+ if operation not in step:
+ step[operation] = {}
+ step[operation][kind] = value
+
+ def save_in_dir(self, dirname):
+ o = {
+ 'result_id': self._result_id,
+ 'benchmark_name': self.benchmark_name,
+ 'run_timestamp': self.run_timestamp,
+ 'commit_date': self.commit_date,
+ 'commit_timestamp': self.commit_timestamp,
+ 'commit_id': self.commit_id,
+ 'steps': self._steps,
+ }
+ filename = os.path.join(dirname, '{}.yaml'.format(self._result_id))
+ with open(filename, 'w') as f:
+ yaml.safe_dump(o, stream=f)
diff --git a/obbenchlib/templates/benchmark.j2 b/obbenchlib/templates/benchmark.j2
new file mode 100644
index 0000000..1a723a1
--- /dev/null
+++ b/obbenchlib/templates/benchmark.j2
@@ -0,0 +1,38 @@
+<html>
+ <head>
+ <title>Obnam benchmark: {{ benchmark_name }}</title>
+ <link rel="stylesheet" href="obbench.css" type="text/css" />
+ </head>
+ <body>
+ <h1>Obnam benchmark: {{ benchmark_name }}</h1>
+
+ <p><a href="index.html">Front page</a></p>
+
+ {{ description|safe }}
+
+ <table>
+ <tr>
+ <th>date</th>
+ <th>commit</th>
+ {% for step_name in step_names %}
+ <th>{{ step_name }}</th>
+ {% endfor %}
+ <th>total</th>
+ </tr>
+ {% for row in table_rows %}
+ <tr>
+ <td>{{ row.commit_date }}</td>
+ <td>{{ '%.7s'|format(row.commit_id) }}</td>
+ {% for step in row.steps %}
+ <td>
+ <a href="{{ step.filename_txt }}">
+ {{ '%.1f'|format(step.duration) }}
+ </a>
+ </td>
+ {% endfor %}
+ <td>{{ '%.1f'|format(row.total) }}</td>
+ </tr>
+ {% endfor %}
+ </table>
+ </body>
+</html>
diff --git a/obbenchlib/templates/index.j2 b/obbenchlib/templates/index.j2
new file mode 100644
index 0000000..3212abb
--- /dev/null
+++ b/obbenchlib/templates/index.j2
@@ -0,0 +1,28 @@
+<html>
+ <head>
+ <title>Obnam benchmarks</title>
+ <link rel="stylesheet" href="obbench.css" type="text/css" />
+ </head>
+ <body>
+ <h1>Obnam benchmarks</h1>
+ {{ description|safe }}
+ <table>
+ <tr>
+ <th>date</th>
+ <th>commit</th>
+ {% for name in benchmark_names %}
+ <th>{{ name }}</th>
+ {% endfor %}
+ </tr>
+ {% for row in results_table %}
+ <tr>
+ <td>{{ row.commit_date }}</td>
+ <td>{{ '%.7s'|format(row.commit_id) }}</td>
+ {% for name in benchmark_names %}
+ <td><a href="{{ name }}.html">{{ '%.1f'|format(row[name]) }}</a></td>
+ {% endfor %}
+ </tr>
+ {% endfor %}
+ </table>
+ </body>
+</html>
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..a44b7dd
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+# Copyright (C) 2015 Lars Wirzenius <liw@liw.fi>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from distutils.core import setup, Extension
+import glob
+
+setup(name='obbench',
+ version='0.1',
+ description='Obnam benchmarking',
+ author='Lars Wirzenius',
+ author_email='liw@liw.fi',
+ scripts=['obbench'],
+ packages=['obbenchlib'],
+ package_data={
+ 'obbenchlib': ['obbench.css', 'templates/*'],
+ },
+ )
diff --git a/test-run b/test-run
deleted file mode 100755
index 3bad01d..0000000
--- a/test-run
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-
-set -eu
-
-rm -rf test.dir
-mkdir test.dir
-mkdir test.dir/html
-mkdir test.dir/reports
-
-./obbench test.yaml master~2 master~1 master
diff --git a/test.yaml b/test.yaml
deleted file mode 100644
index cbdfaea..0000000
--- a/test.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
-git: /home/liw/obnam/obnam
-obnam_config:
- repository-format: green-albatross
-benchmarks:
- - name: empty_dir
- description: backup an empty directory
- steps:
- - live: exit 0
- obnam: backup
- reference: 11
- - obnam: restore
- reference: 12
- - name: many_files
- description: backup many small files
- steps:
- - live: genbackupdata . --create=10 --file-size=1
- obnam: backup
- reference: 31
- - obnam: restore
- reference: 32
- - obnam: backup
-reports_dir: /home/liw/obnam/benchmarks/test.dir/reports
-html_dir: /home/liw/obnam/benchmarks/test.dir/html
-publish_html: echo PUBLISHING HTML HERE
-state: /home/liw/obnam/benchmarks/test.dir/state.yaml
diff --git a/yarns/000.yarn b/yarns/000.yarn
new file mode 100644
index 0000000..fd1b150
--- /dev/null
+++ b/yarns/000.yarn
@@ -0,0 +1,6 @@
+---
+title: Obnam benchmarking tool
+author: Lars Wirzenius
+date: in-development version from git
+...
+
diff --git a/yarns/100-intro.yarn b/yarns/100-intro.yarn
new file mode 100644
index 0000000..b59b142
--- /dev/null
+++ b/yarns/100-intro.yarn
@@ -0,0 +1,26 @@
+# Introduction
+
+This is the manual for `obbench`, a tool for benchmarking the
+[Obnam][] backup program. The obbench program is at least the fifth
+generation of the tool: the author seems to have a big difficulty
+trying to figure out how benchmarking should be done.
+
+[Obnam]: http://obnam.org/
+
+The obbench approach is that its operation is defined in a benchmark
+specification file, rather than code. The code provides some primitive
+operations, which can be combined in various ways in the specification
+file. The goal is that new benchmarks can be specified easily, without
+changing obbench itself.
+
+Additionally, obbench measures various aspects of running Obnam and
+the resulting backup repository, and generates a static website to
+report the results.
+
+## Installation
+
+Obbench can be run from its git source tree (`./obbench`), without any
+build steps required. Additionally, the author will provide Debian
+packages on [code.liw.fi][].
+
+[code.liw.fi]: http://liw.fi/code/
diff --git a/yarns/200-config.yarn b/yarns/200-config.yarn
new file mode 100644
index 0000000..5202b2a
--- /dev/null
+++ b/yarns/200-config.yarn
@@ -0,0 +1,74 @@
+# Configuration
+
+Obbench configuration is split into two: a benchmark specification
+file in YAML, and a user- and site-specific configuration. The former
+specifies the benchmarks to run, and the latter where state and
+results should be put on the local machine.
+
+
+## Benchmark specification YAML file
+
+The benchmark specification file uses [YAML][] as the syntax. The top
+level object is an "associative array", corresponding to a Perl
+hashmap, a Python dict, or more generally a set of key/value pairs.
+
+[YAML]: https://en.wikipedia.org/wiki/YAML
+
+An example:
+
+ EXAMPLE benchmark specification file
+ git: /home/liw/obnam/obnam
+ obnam_config:
+ repository-format: green-albatross
+ benchmarks:
+ - name: many_files
+ description: backup many small files
+ steps:
+ - live: genbackupdata . --create=10 --file-size=1
+ obnam: backup
+ reference: 31
+ - obnam: restore
+ reference: 32
+ - obnam: backup
+
+The top level keys are:
+
+key |type |description
+--------------|-------------|------
+`git` |URL |location of Obnam git repository
+`obnam_config`|dict |Obnam configuration variables
+`benchmarks` |list of dicts|actual benchmark specifications
+
+There are no default values, so for example the git URL to the Obnam
+repository needs to specified every time.
+
+The Obnam configuration variables are as specified by Obnam itself.
+Any configuration variables may be used. Note that obbench itself
+provides the following settings for Obnam: `quiet`, `repository`,
+`root`, and `log`. The benchmark specification should probably not
+override those.
+
+The benchmark have the following key/value pairs.
+
+key |type |description
+-------------|-------------|-----------
+`name` |symbol |name of the benchmark
+`description`|text |description of the benchmark
+`steps` |list of dicts|list of steps in the benchmark
+
+The steps can use the following keys:
+
+key |type |description
+-------|----------|-----------
+`live` |shell |modify live data for step
+`obnam`|subcommand|Obnam subcommand to run
+
+Every step optionally modifies the live data to be backed up. This is
+done by running an arbitrary shell command. Additionally, an Obnam
+subcommand can be run on the live data. Currently, the supported
+subcommands are `backup` and `restore`. Arbitrary Obnam subcommands
+are not supported.
+
+## Local configuration
+
+FIXME. This needs to be implemented and documented.
diff --git a/yarns/300-running.yarn b/yarns/300-running.yarn
new file mode 100644
index 0000000..c196280
--- /dev/null
+++ b/yarns/300-running.yarn
@@ -0,0 +1,70 @@
+# Running obbench
+
+To run obbench we need to set up a benchmark specification. The
+examples below use the [yarn][] syntax, as they also form an automated
+test suite for obbench.
+
+[yarn]: http://liw.fi/cmdtest/
+
+ SCENARIO running obbench
+
+For this example, we use a very simple benchmark specification. Note
+that due to yarn syntax limitations, we use the JSON variant of YAML.
+
+ GIVEN a benchmark specification file simple.yaml containing
+ ... {
+ ... git: "/home/liw/code/obnam/obnam",
+ ... description: "Sample benchmark description.\n\n**There you go.**",
+ ... benchmarks: [
+ ... {
+ ... name: silly,
+ ... description: "It's a silly benchmark",
+ ... steps: [
+ ... {
+ ... live: "genbackupdata --create=1k .",
+ ... obnam: backup
+ ... },
+ ... {
+ ... obnam: restore,
+ ... }
+ ... ]
+ ... },
+ ... {
+ ... name: moresilly,
+ ... description: "It's **another** silly benchmark!",
+ ... steps: [
+ ... {
+ ... live: "genbackupdata --create=1k .",
+ ... obnam: backup
+ ... },
+ ... {
+ ... obnam: backup
+ ... },
+ ... ]
+ ... }
+ ... ]
+ ... }
+
+We also create a local configuration, also using YAML's JSON syntax:
+
+ GIVEN an obbench configuration file local.yaml containing
+ ... {
+ ... config: {
+ ... state: "statedir"
+ ... }
+ ... }
+
+We then run obbench for the first time, for several commits. We run it
+twice for the tip of master, so that we know obbench handles running
+it twice for the same commit (e.g., because of environmental changes).
+
+ WHEN I run obbench --config local.yaml simple.yaml
+ ... master~1 master master
+
+We now have some results.
+
+ THEN directory statedir exists
+ AND directory statedir/git/.git exists
+ AND files matching statedir/results/*.yaml exist
+ AND file statedir/html/index.html exists
+ AND file statedir/html/obbench.css exists
diff --git a/yarns/900-implements.yarn b/yarns/900-implements.yarn
new file mode 100644
index 0000000..a0004cf
--- /dev/null
+++ b/yarns/900-implements.yarn
@@ -0,0 +1,58 @@
+# Scenario step implementations
+
+This chapter contains implementations of the scenario steps, so that
+this manual may be used as an automated test suite for obbench. See
+[yarn][] documentation for understanding this.
+
+We use Python to implement the steps. This requires at least
+version 0.19 of yarn.
+
+## Create benchmark specification file
+
+ IMPLEMENTS GIVEN a benchmark specification file (\S+) containing (.*)
+ import os
+ filename = os.environ['MATCH_1']
+ config_text = os.environ['MATCH_2']
+ with open(filename, 'w') as f:
+ f.write(config_text)
+
+## Create a local configuration file
+
+ IMPLEMENTS GIVEN an obbench configuration file (\S+) containing (.*)
+ import os
+ filename = os.environ['MATCH_1']
+ config_text = os.environ['MATCH_2']
+ with open(filename, 'w') as f:
+ f.write(config_text)
+
+## Run obbench, with arguments
+
+ IMPLEMENTS WHEN I run obbench (.*)
+ import os
+ import cliapp
+ arg_string = os.environ['MATCH_1']
+ args = arg_string.split()
+ srcdir = os.environ['SRCDIR']
+ obbench = os.path.join(srcdir, 'obbench')
+ cliapp.runcmd([obbench] + args, stdout=None, stderr=None)
+
+### Check directory existence
+
+ IMPLEMENTS THEN directory (\S+) exists
+ import os
+ dirname = os.environ['MATCH_1']
+ assert os.path.isdir(dirname), "dir {} doesn't exist".format(dirname)
+
+### Check file existence
+
+ IMPLEMENTS THEN file (\S+) exists
+ import os
+ filename = os.environ['MATCH_1']
+ assert os.path.isfile(filename), "file {} doesn't exist".format(filename)
+
+### Check glob matching
+
+ IMPLEMENTS THEN files matching (\S+) exist
+ import glob, os
+ pattern = os.environ['MATCH_1']
+ assert glob.glob(pattern) != [], "glob {} doesn't match".format(pattern)
diff --git a/yarns/Makefile b/yarns/Makefile
new file mode 100644
index 0000000..4a8cfe1
--- /dev/null
+++ b/yarns/Makefile
@@ -0,0 +1,30 @@
+# Copyright 2013-2015 Lars Wirzenius
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# =*= License: GPL-3+ =*=
+
+
+yarns = $(shell ls *.yarn)
+css = ../obbenchlib/obbench.css
+
+all: yarns.pdf yarns.html
+
+yarns.pdf: $(yarns) Makefile
+ pandoc --chapters --toc -o yarns.pdf $(yarns)
+
+yarns.html: $(yarns) Makefile $(css)
+ pandoc -H $(css) --smart --toc --chapters --number-sections \
+ -V geometry:lettersize \
+ --standalone --self-contained -o yarns.html $(yarns)