summaryrefslogtreecommitdiff
path: root/share
diff options
context:
space:
mode:
authorLars Wirzenius <liw@liw.fi>2021-01-16 13:03:56 +0000
committerLars Wirzenius <liw@liw.fi>2021-01-16 13:03:56 +0000
commit276b958196df8486cd41542dd38076d2ece074ef (patch)
tree9c386e13746bd066cd97454fbb18eed7d4345993 /share
parentdd347c2528982cbd083d8109dc4250f5722d40b8 (diff)
parent27b6f70a2e7556fc3012b173602d258f33cf0a7e (diff)
downloadsubplot-276b958196df8486cd41542dd38076d2ece074ef.tar.gz
Merge branch 'vfs' into 'main'
Resources - virtual filesystem See merge request larswirzenius/subplot!122
Diffstat (limited to 'share')
-rw-r--r--share/bash/template/assert.sh19
-rw-r--r--share/bash/template/cap.sh17
-rw-r--r--share/bash/template/ctx.sh17
-rw-r--r--share/bash/template/dict.sh50
-rw-r--r--share/bash/template/files.sh22
-rw-r--r--share/bash/template/template.sh.tera163
-rw-r--r--share/bash/template/template.yaml8
-rw-r--r--share/python/lib/daemon.md81
-rw-r--r--share/python/lib/daemon.py189
-rw-r--r--share/python/lib/daemon.yaml27
-rw-r--r--share/python/lib/files.md82
-rw-r--r--share/python/lib/files.py158
-rw-r--r--share/python/lib/files.yaml62
-rw-r--r--share/python/lib/runcmd.md170
-rw-r--r--share/python/lib/runcmd.py252
-rw-r--r--share/python/lib/runcmd.yaml83
-rw-r--r--share/python/template/asserts.py23
-rw-r--r--share/python/template/context.py95
-rw-r--r--share/python/template/context_tests.py156
-rw-r--r--share/python/template/encoding.py12
-rw-r--r--share/python/template/encoding_tests.py19
-rw-r--r--share/python/template/files.py23
-rw-r--r--share/python/template/main.py97
-rw-r--r--share/python/template/scenarios.py97
-rw-r--r--share/python/template/template.py.tera78
-rw-r--r--share/python/template/template.yaml9
-rw-r--r--share/rust/lib/datadir.yaml13
-rw-r--r--share/rust/lib/files.yaml68
-rw-r--r--share/rust/lib/runcmd.yaml89
-rw-r--r--share/rust/template/template.rs.tera70
-rw-r--r--share/rust/template/template.yaml2
31 files changed, 2251 insertions, 0 deletions
diff --git a/share/bash/template/assert.sh b/share/bash/template/assert.sh
new file mode 100644
index 0000000..43bb11b
--- /dev/null
+++ b/share/bash/template/assert.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+# Check two values for equality and give error if they are not equal
+assert_eq() {
+ if ! diff -u <(echo "$1") <(echo "$2")
+ then
+ echo "expected values to be identical, but they're not"
+ exit 1
+ fi
+}
+
+# Check first value contains second value.
+assert_contains() {
+ if ! echo "$1" | grep -F "$2" > /dev/null
+ then
+ echo "expected first value to contain second value"
+ exit 1
+ fi
+}
diff --git a/share/bash/template/cap.sh b/share/bash/template/cap.sh
new file mode 100644
index 0000000..8ea35d8
--- /dev/null
+++ b/share/bash/template/cap.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# Store step captures for calling the corresponding functions.
+
+cap_new() {
+ dict_new _cap
+}
+
+cap_set()
+{
+ dict_set _cap "$1" "$2"
+}
+
+cap_get()
+{
+ dict_get _cap "$1"
+}
diff --git a/share/bash/template/ctx.sh b/share/bash/template/ctx.sh
new file mode 100644
index 0000000..c9401c6
--- /dev/null
+++ b/share/bash/template/ctx.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# A context abstraction using dictionaries.
+
+ctx_new() {
+ dict_new _ctx
+}
+
+ctx_set()
+{
+ dict_set _ctx "$1" "$2"
+}
+
+ctx_get()
+{
+ dict_get _ctx "$1"
+}
diff --git a/share/bash/template/dict.sh b/share/bash/template/dict.sh
new file mode 100644
index 0000000..aea5b96
--- /dev/null
+++ b/share/bash/template/dict.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+# Simple dictionary abstraction. All values are stored in files so
+# they can more easily be inspected.
+
+dict_new() {
+ local name="$1"
+ rm -rf "$name"
+ mkdir "$name"
+}
+
+dict_has() {
+ local name="$1"
+ local key="$2"
+ local f="$name/$key"
+ test -e "$f"
+}
+
+dict_get() {
+ local name="$1"
+ local key="$2"
+ local f="$name/$key"
+ cat "$f"
+}
+
+dict_get_default() {
+ local name="$1"
+ local key="$2"
+ local default="$3"
+ local f="$name/$key"
+ if [ -e "$f" ]
+ then
+ cat "$f"
+ else
+ echo "$default"
+ fi
+}
+
+dict_set() {
+ local name="$1"
+ local key="$2"
+ local value="$3"
+ local f="$name/$key"
+ echo "$value" > "$f"
+}
+
+dict_keys() {
+ local name="$1"
+ ls -1 "$name"
+}
diff --git a/share/bash/template/files.sh b/share/bash/template/files.sh
new file mode 100644
index 0000000..50c935d
--- /dev/null
+++ b/share/bash/template/files.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Store files embedded in the markdown input.
+
+files_new() {
+ dict_new _files
+}
+
+files_set() {
+ dict_set _files "$1" "$2"
+}
+
+files_get() {
+ dict_get _files "$1"
+}
+
+
+# Decode a base64 encoded string.
+
+decode_base64() {
+ echo "$1" | base64 -d
+}
diff --git a/share/bash/template/template.sh.tera b/share/bash/template/template.sh.tera
new file mode 100644
index 0000000..5e92371
--- /dev/null
+++ b/share/bash/template/template.sh.tera
@@ -0,0 +1,163 @@
+#!/usr/bin/env bash
+
+set -eu -o pipefail
+
+#############################################################################
+# Functions that implement steps.
+
+{% for func in functions %}
+#----------------------------------------------------------------------------
+# This code comes from: {{ func.source }}
+
+{{ func.code }}
+{% endfor %}
+
+
+#############################################################################
+# Scaffolding for generated test program.
+
+{% include "dict.sh" %}
+{% include "ctx.sh" %}
+{% include "cap.sh" %}
+{% include "files.sh" %}
+{% include "assert.sh" %}
+
+# Remember where we started from. The step functions may need to refer
+# to files there.
+srcdir="$(pwd)"
+echo "srcdir $srcdir"
+
+# Create a new temporary directory and chdir there. This allows step
+# functions to create new files in the current working directory
+# without having to be so careful.
+_datadir="$(mktemp -d)"
+echo "datadir $_datadir"
+cd "$_datadir"
+
+
+# Store test data files that were embedded in the source document.
+# Base64 encoding is used to allow arbitrary data.
+
+files_new
+{% for file in files %}
+# {{ file.filename }}
+filename="$(decode_base64 '{{ file.filename | base64 }}')"
+contents="$(decode_base64 '{{ file.contents | base64 }}')"
+files_set "$filename" "$contents"
+{% endfor %}
+
+
+#############################################################################
+# Code to implement the scenarios.
+
+{% for scenario in scenarios %}
+######################################
+# Scenario: {{ scenario.title }}
+scenario_{{ loop.index }}() {
+ local title scendir step name text ret cleanups steps
+ declare -a cleanups
+ declare -a steps
+
+ title="$(decode_base64 '{{ scenario.title | base64 }}')"
+ echo "scenario: $title"
+
+ scendir="$(mktemp -d -p "$_datadir")"
+ cd "$scendir"
+ export HOME="$scendir"
+ export TMPDIR="$scendir"
+
+ ctx_new
+ cleanups[0]=''
+ steps[0]=''
+ ret=0
+
+ {% for step in scenario.steps %}
+ if [ "$ret" = 0 ]
+ then
+ # Step: {{ step.text }}
+ step="{{ step.kind | lower }} $(decode_base64 '{{ step.text | base64 }}')"
+ echo " step: $step"
+
+ cap_new
+ {% for part in step.parts %}{% if part.CapturedText is defined -%}
+ name="$(decode_base64 '{{ part.CapturedText.name | base64 }}')"
+ text="$(decode_base64 '{{ part.CapturedText.text | base64 }}')"
+ cap_set "$name" "$text"
+ {% endif -%}
+ {% endfor -%}
+ if {{ step.function }}
+ then
+ cleanup='{{ step.cleanup }}'
+ if [ "$cleanup" != "" ]
+ then
+ {% raw %}
+ i=${#cleanups}
+ cleanups[$i]="$cleanup"
+ steps[$i]="$step"
+ {% endraw %}
+ fi
+ else
+ ret=$?
+ fi
+ fi
+ {% endfor %}
+
+ {% raw %}
+ echo "${!cleanups[*]}" | tr ' ' '\n' | tac | while read i
+ do
+ step="${steps[$i]}"
+ func="${cleanups[$i]}"
+ echo " cleanup: $step"
+ $func
+ done
+ {% endraw %}
+
+ return $ret
+}
+{% endfor %}
+
+#############################################################################
+# Make the environment minimal.
+
+# Write to stdout the names of all environment variables, one per
+# line. Handle also cases where the value of an environment variable
+# contains newlines.
+envnames()
+{
+ env -0 | xargs -0 -n1 -i'{}' sh -c "printf '%s\n' '{}' | head -n1 | sed 's/=.*//'"
+}
+
+# Unset all environment variables. At the beginning of each scenario,
+# some additional ones will be set to the per-scenario directory.
+unset $(envnames)
+export PATH=/bin:/usr/bin
+export SHELL=/bin/sh
+
+
+#############################################################################
+# Run the scenarios.
+
+if [ "$#" = 0 ]
+then {% for scenario in scenarios %}
+ scenario_{{ loop.index }}{% endfor %}
+else
+
+
+ for pattern in "$@"
+ do
+ pattern="$(echo "$pattern" | tr A-Z a-z)"
+{% for scenario in scenarios %}
+ if echo "{{ scenario.title | lower }}" | grep -F -e "$pattern" > /dev/null
+ then
+ scenario_{{ loop.index }}
+ fi
+{% endfor %}
+ done
+fi
+
+
+#############################################################################
+# Clean up temporary directory and report success.
+
+rm -rf "$_datadir"
+echo "OK, all scenarios finished successfully"
diff --git a/share/bash/template/template.yaml b/share/bash/template/template.yaml
new file mode 100644
index 0000000..01269dd
--- /dev/null
+++ b/share/bash/template/template.yaml
@@ -0,0 +1,8 @@
+template: template.sh.tera
+run: bash
+helpers:
+ - assert.sh
+ - cap.sh
+ - ctx.sh
+ - dict.sh
+ - files.sh
diff --git a/share/python/lib/daemon.md b/share/python/lib/daemon.md
new file mode 100644
index 0000000..4d91bd1
--- /dev/null
+++ b/share/python/lib/daemon.md
@@ -0,0 +1,81 @@
+# Introduction
+
+The [Subplot][] library `daemon` for Python provides scenario steps
+and their implementations for running a background process and
+terminating at the end of the scenario.
+
+[Subplot]: https://subplot.liw.fi/
+
+This document explains the acceptance criteria for the library and how
+they're verified. It uses the steps and functions from the
+`lib/daemon` library. The scenarios all have the same structure: run a
+command, then examine the exit code, verify the process is running.
+
+# Daemon is started and terminated
+
+This scenario starts a background process, verifies it's started, and
+verifies it's terminated after the scenario ends.
+
+~~~scenario
+given there is no "/bin/sleep 12765" process
+when I start "/bin/sleep 12765" as a background process as sleepyhead
+then a process "/bin/sleep 12765" is running
+when I stop background process sleepyhead
+then there is no "/bin/sleep 12765" process
+~~~
+
+
+# Daemon takes a while to open its port
+
+[netcat]: https://en.wikipedia.org/wiki/Netcat
+
+This scenario verifies that if the background process never starts
+listening on its port, the daemon library handles that correctly. We
+do this by using [netcat][] to start a dummy daemon, after a short
+delay. The lib/daemon code will wait for netcat to open its port, by
+connecting to the port. It then closes the port, which causes netcat
+to terminate.
+
+~~~scenario
+given a daemon helper shell script slow-start-daemon.sh
+given there is no "slow-start-daemon.sh" process
+when I try to start "./slow-start-daemon.sh" as slow-daemon, on port 8888
+when I stop background process slow-daemon
+then there is no "slow-start-daemon.sh" process
+~~~
+
+~~~{#slow-start-daemon.sh .file .sh .numberLines}
+#!/bin/bash
+
+set -euo pipefail
+
+sleep 2
+netcat -l 8888 > /dev/null
+echo OK
+~~~
+
+# Daemon never opens the intended port
+
+This scenario verifies that if the background process never starts
+listening on its port, the daemon library handles that correctly.
+
+~~~scenario
+given there is no "/bin/sleep 12765" process
+when I try to start "/bin/sleep 12765" as sleepyhead, on port 8888
+then starting daemon fails with "ConnectionRefusedError"
+then a process "/bin/sleep 12765" is running
+when I stop background process sleepyhead
+then there is no "/bin/sleep 12765" process
+~~~
+
+
+---
+title: Acceptance criteria for the lib/daemon Subplot library
+author: The Subplot project
+bindings:
+- daemon.yaml
+template: python
+functions:
+- daemon.py
+- runcmd.py
+...
diff --git a/share/python/lib/daemon.py b/share/python/lib/daemon.py
new file mode 100644
index 0000000..4ba1987
--- /dev/null
+++ b/share/python/lib/daemon.py
@@ -0,0 +1,189 @@
+import logging
+import os
+import signal
+import socket
+import subprocess
+import time
+
+
+# A helper function for testing lib/daemon itself.
+def _daemon_shell_script(ctx, filename=None):
+ get_file = globals()["get_file"]
+ data = get_file(filename)
+ with open(filename, "wb") as f:
+ f.write(data)
+ os.chmod(filename, 0o755)
+
+
+# Start a daemon that will open a port on localhost.
+def daemon_start_on_port(ctx, path=None, args=None, name=None, port=None):
+ _daemon_start(ctx, path=path, args=args, name=name)
+ daemon_wait_for_port("localhost", port)
+
+
+# Start a daemon after a little wait. This is used only for testing the
+# port-waiting code.
+def _daemon_start_soonish(ctx, path=None, args=None, name=None, port=None):
+ _daemon_start(ctx, path=os.path.abspath(path), args=args, name=name)
+ daemon = ctx.declare("_daemon")
+
+ # Store the PID of the process we just started so that _daemon_stop_soonish
+ # can kill it during the cleanup phase. This works around the Subplot
+ # Python template not giving the step captures to cleanup functions. Note
+ # that this code assume at most one _soonish function is called.
+ daemon["_soonish"] = daemon[name]["pid"]
+
+ try:
+ daemon_wait_for_port("localhost", port)
+ except Exception as e:
+ daemon["_start_error"] = repr(e)
+
+ logging.info("pgrep: %r", _daemon_pgrep(path))
+
+
+def _daemon_stop_soonish(ctx):
+ ns = ctx.declare("_daemon")
+ pid = ns["_soonish"]
+ logging.debug(f"Stopping soonishly-started daemon, {pid}")
+ signo = signal.SIGKILL
+ try:
+ os.kill(pid, signo)
+ except ProcessLookupError:
+ logging.warning("Process did not actually exist (anymore?)")
+
+
+# Start a daeamon, get its PID. Don't wait for a port or anything. This is
+# meant for background processes that don't have port. Useful for testing the
+# lib/daemon library of Subplot, but not much else.
+def _daemon_start(ctx, path=None, args=None, name=None):
+ runcmd_run = globals()["runcmd_run"]
+ runcmd_exit_code_is = globals()["runcmd_exit_code_is"]
+ runcmd_get_exit_code = globals()["runcmd_get_exit_code"]
+ runcmd_get_stderr = globals()["runcmd_get_stderr"]
+ runcmd_prepend_to_path = globals()["runcmd_prepend_to_path"]
+
+ argv = [path] + args.split()
+
+ logging.debug(f"Starting daemon {name}")
+ logging.debug(f" ctx={ctx.as_dict()}")
+ logging.debug(f" name={name}")
+ logging.debug(f" path={path}")
+ logging.debug(f" args={args}")
+ logging.debug(f" argv={argv}")
+
+ ns = ctx.declare("_daemon")
+
+ this = ns[name] = {
+ "pid-file": f"{name}.pid",
+ "stderr": f"{name}.stderr",
+ "stdout": f"{name}.stdout",
+ }
+
+ # Debian installs `daemonize` to /usr/sbin, which isn't part of the minimal
+ # environment that Subplot sets up. So we add /usr/sbin to the PATH.
+ runcmd_prepend_to_path(ctx, "/usr/sbin")
+ runcmd_run(
+ ctx,
+ [
+ "daemonize",
+ "-c",
+ os.getcwd(),
+ "-p",
+ this["pid-file"],
+ "-e",
+ this["stderr"],
+ "-o",
+ this["stdout"],
+ ]
+ + argv,
+ )
+
+ # Check that daemonize has exited OK. If it hasn't, it didn't start the
+ # background process at all. If so, log the stderr in case there was
+ # something useful there for debugging.
+ exit = runcmd_get_exit_code(ctx)
+ if exit != 0:
+ stderr = runcmd_get_stderr(ctx)
+ logging.error(f"daemon {name} stderr: {stderr}")
+ runcmd_exit_code_is(ctx, 0)
+
+ # Get the pid of the background process, from the pid file created by
+ # daemonize. We don't need to wait for it, since we know daemonize already
+ # exited. If it isn't there now, it's won't appear later.
+ if not os.path.exists(this["pid-file"]):
+ raise Exception("daemonize didn't create a PID file")
+
+ this["pid"] = _daemon_wait_for_pid(this["pid-file"], 10.0)
+
+ logging.debug(f"Started daemon {name}")
+ logging.debug(f" pid={this['pid']}")
+ logging.debug(f" ctx={ctx.as_dict()}")
+
+
+def _daemon_wait_for_pid(filename, timeout):
+ start = time.time()
+ while time.time() < start + timeout:
+ with open(filename) as f:
+ data = f.read().strip()
+ if data:
+ return int(data)
+ raise Exception("daemonize created a PID file without a PID")
+
+
+def daemon_wait_for_port(host, port, timeout=5.0):
+ addr = (host, port)
+ until = time.time() + timeout
+ while True:
+ try:
+ s = socket.create_connection(addr, timeout=timeout)
+ s.close()
+ return
+ except socket.timeout:
+ logging.error(f"daemon did not respond at port {port} within {timeout} seconds")
+ raise
+ except socket.error as e:
+ logging.info(f"could not connect to daemon at {port}: {e}")
+ pass
+ if time.time() >= until:
+ logging.error(f"could not connect to daemon at {port} within {timeout} seconds")
+ raise ConnectionRefusedError()
+ # Sleep a bit to avoid consuming too much CPU while busy-waiting.
+ time.sleep(0.1)
+
+
+# Stop a daemon.
+def daemon_stop(ctx, name=None):
+ logging.debug(f"Stopping daemon {name}")
+ ns = ctx.declare("_daemon")
+ logging.debug(f" ns={ns}")
+ pid = ns[name]["pid"]
+ signo = signal.SIGKILL
+
+ logging.debug(f"Terminating process {pid} with signal {signo}")
+ try:
+ os.kill(pid, signo)
+ except ProcessLookupError:
+ logging.warning("Process did not actually exist (anymore?)")
+
+
+def daemon_no_such_process(ctx, args=None):
+ assert not _daemon_pgrep(args)
+
+
+def daemon_process_exists(ctx, args=None):
+ assert _daemon_pgrep(args)
+
+
+def _daemon_pgrep(pattern):
+ logging.info(f"checking if process exists: pattern={pattern}")
+ exit = subprocess.call(["pgrep", "-laf", pattern])
+ logging.info(f"exit code: {exit}")
+ return exit == 0
+
+
+def daemon_start_fails_with(ctx, message=None):
+ daemon = ctx.declare("_daemon")
+ error = daemon["_start_error"]
+ logging.debug(f"daemon_start_fails_with: error={error!r}")
+ logging.debug(f"daemon_start_fails_with: message={message!r}")
+ assert message.lower() in error.lower()
diff --git a/share/python/lib/daemon.yaml b/share/python/lib/daemon.yaml
new file mode 100644
index 0000000..f72ba1a
--- /dev/null
+++ b/share/python/lib/daemon.yaml
@@ -0,0 +1,27 @@
+- given: there is no "{args:text}" process
+ function: daemon_no_such_process
+
+- given: a daemon helper shell script {filename}
+ function: _daemon_shell_script
+
+- when: I start "{path}{args:text}" as a background process as {name}, on port {port}
+ function: daemon_start_on_port
+
+- when: I try to start "{path}{args:text}" as {name}, on port {port}
+ function: _daemon_start_soonish
+ cleanup: _daemon_stop_soonish
+
+- when: I start "{path}{args:text}" as a background process as {name}
+ function: _daemon_start
+
+- when: I stop background process {name}
+ function: daemon_stop
+
+- then: a process "{args:text}" is running
+ function: daemon_process_exists
+
+- then: there is no "{args:text}" process
+ function: daemon_no_such_process
+
+- then: starting daemon fails with "{message:text}"
+ function: daemon_start_fails_with
diff --git a/share/python/lib/files.md b/share/python/lib/files.md
new file mode 100644
index 0000000..68ef1ac
--- /dev/null
+++ b/share/python/lib/files.md
@@ -0,0 +1,82 @@
+# Introduction
+
+The [Subplot][] library `files` provides scenario steps and their
+implementations for managing files on the file system during tests.
+The library consists of a bindings file `lib/files.yaml` and
+implementations in Python in `lib/files.py`.
+
+[Subplot]: https://subplot.liw.fi/
+
+This document explains the acceptance criteria for the library and how
+they're verified. It uses the steps and functions from the `files`
+library.
+
+# Create on-disk files from embedded files
+
+Subplot allows the source document to embed test files, and the
+`files` library provides steps to create real, on-disk files from
+the embedded files.
+
+~~~scenario
+given file hello.txt
+then file hello.txt exists
+and file hello.txt contains "hello, world"
+and file other.txt does not exist
+given file other.txt from hello.txt
+then file other.txt exists
+and files hello.txt and other.txt match
+and only files hello.txt, other.txt exist
+~~~
+
+~~~{#hello.txt .file .numberLines}
+hello, world
+~~~
+
+
+# File metadata
+
+These steps create files and manage their metadata.
+
+~~~scenario
+given file hello.txt
+when I remember metadata for file hello.txt
+then file hello.txt has same metadata as before
+
+when I write "yo" to file hello.txt
+then file hello.txt has different metadata from before
+~~~
+
+# File modification time
+
+These steps manipulate and test file modification times.
+
+~~~scenario
+given file foo.dat has modification time 1970-01-02 03:04:05
+then file foo.dat has a very old modification time
+
+when I touch file foo.dat
+then file foo.dat has a very recent modification time
+~~~
+
+
+# File contents
+
+These steps verify contents of files.
+
+~~~scenario
+given file hello.txt
+then file hello.txt contains "hello, world"
+and file hello.txt matches regex "hello, .*"
+and file hello.txt matches regex /hello, .*/
+~~~
+
+
+---
+title: Acceptance criteria for the files Subplot library
+author: The Subplot project
+template: python
+bindings:
+- files.yaml
+functions:
+- files.py
+...
diff --git a/share/python/lib/files.py b/share/python/lib/files.py
new file mode 100644
index 0000000..ec37b9d
--- /dev/null
+++ b/share/python/lib/files.py
@@ -0,0 +1,158 @@
+import logging
+import os
+import re
+import time
+
+
+def files_create_from_embedded(ctx, filename=None):
+ files_create_from_embedded_with_other_name(
+ ctx, filename_on_disk=filename, embedded_filename=filename
+ )
+
+
+def files_create_from_embedded_with_other_name(
+ ctx, filename_on_disk=None, embedded_filename=None
+):
+ get_file = globals()["get_file"]
+ with open(filename_on_disk, "wb") as f:
+ f.write(get_file(embedded_filename))
+
+
+def files_create_from_text(ctx, filename=None, text=None):
+ with open(filename, "w") as f:
+ f.write(text)
+
+
+def files_file_exists(ctx, filename=None):
+ assert_eq = globals()["assert_eq"]
+ assert_eq(os.path.exists(filename), True)
+
+
+def files_file_does_not_exist(ctx, filename=None):
+ assert_eq = globals()["assert_eq"]
+ assert_eq(os.path.exists(filename), False)
+
+
+def files_only_these_exist(ctx, filenames=None):
+ assert_eq = globals()["assert_eq"]
+ filenames = filenames.replace(",", "").split()
+ assert_eq(set(os.listdir(".")), set(filenames))
+
+
+def files_file_contains(ctx, filename=None, data=None):
+ assert_eq = globals()["assert_eq"]
+ with open(filename, "rb") as f:
+ actual = f.read()
+ actual = actual.decode("UTF-8")
+ assert_eq(data in actual, True)
+
+
+def files_file_matches_regex(ctx, filename=None, regex=None):
+ assert_eq = globals()["assert_eq"]
+ with open(filename) as f:
+ content = f.read()
+ m = re.search(regex, content)
+ if m is None:
+ logging.debug(f"files_file_matches_regex: no match")
+ logging.debug(f" filenamed: {filename}")
+ logging.debug(f" regex: {regex}")
+ logging.debug(f" content: {regex}")
+ logging.debug(f" match: {m}")
+ assert_eq(bool(m), True)
+
+
+def files_match(ctx, filename1=None, filename2=None):
+ assert_eq = globals()["assert_eq"]
+ with open(filename1, "rb") as f:
+ data1 = f.read()
+ with open(filename2, "rb") as f:
+ data2 = f.read()
+ assert_eq(data1, data2)
+
+
+def files_touch_with_timestamp(
+ ctx,
+ filename=None,
+ year=None,
+ month=None,
+ day=None,
+ hour=None,
+ minute=None,
+ second=None,
+):
+ t = (
+ int(year),
+ int(month),
+ int(day),
+ int(hour),
+ int(minute),
+ int(second),
+ -1,
+ -1,
+ -1,
+ )
+ ts = time.mktime(t)
+ _files_touch(filename, ts)
+
+
+def files_touch(ctx, filename=None):
+ _files_touch(filename, None)
+
+
+def _files_touch(filename, ts):
+ if not os.path.exists(filename):
+ open(filename, "w").close()
+ times = None
+ if ts is not None:
+ times = (ts, ts)
+ os.utime(filename, times=times)
+
+
+def files_mtime_is_recent(ctx, filename=None):
+ st = os.stat(filename)
+ age = abs(st.st_mtime - time.time())
+ assert age < 1.0
+
+
+def files_mtime_is_ancient(ctx, filename=None):
+ st = os.stat(filename)
+ age = abs(st.st_mtime - time.time())
+ year = 365 * 24 * 60 * 60
+ required = 39 * year
+ logging.debug(f"ancient? mtime={st.st_mtime} age={age} required={required}")
+ assert age > required
+
+
+def files_remember_metadata(ctx, filename=None):
+ meta = _files_remembered(ctx)
+ meta[filename] = _files_get_metadata(filename)
+ logging.debug("files_remember_metadata:")
+ logging.debug(f" meta: {meta}")
+ logging.debug(f" ctx: {ctx}")
+
+
+# Check that current metadata of a file is as stored in the context.
+def files_has_remembered_metadata(ctx, filename=None):
+ assert_eq = globals()["assert_eq"]
+ meta = _files_remembered(ctx)
+ logging.debug("files_has_remembered_metadata:")
+ logging.debug(f" meta: {meta}")
+ logging.debug(f" ctx: {ctx}")
+ assert_eq(meta[filename], _files_get_metadata(filename))
+
+
+def files_has_different_metadata(ctx, filename=None):
+ assert_ne = globals()["assert_ne"]
+ meta = _files_remembered(ctx)
+ assert_ne(meta[filename], _files_get_metadata(filename))
+
+
+def _files_remembered(ctx):
+ ns = ctx.declare("_files")
+ return ns.get("remembered-metadata", {})
+
+
+def _files_get_metadata(filename):
+ st = os.lstat(filename)
+ keys = ["st_dev", "st_gid", "st_ino", "st_mode", "st_mtime", "st_size", "st_uid"]
+ return {key: getattr(st, key) for key in keys}
diff --git a/share/python/lib/files.yaml b/share/python/lib/files.yaml
new file mode 100644
index 0000000..be69920
--- /dev/null
+++ b/share/python/lib/files.yaml
@@ -0,0 +1,62 @@
+- given: file {filename}
+ function: files_create_from_embedded
+ types:
+ filename: file
+
+- given: file {filename_on_disk} from {embedded_filename}
+ function: files_create_from_embedded_with_other_name
+ types:
+ embedded_filename: file
+
+- given: file {filename} has modification time {year}-{month}-{day} {hour}:{minute}:{second}
+ function: files_touch_with_timestamp
+
+- when: I write "(?P<text>.*)" to file (?P<filename>\S+)
+ regex: true
+ function: files_create_from_text
+
+- when: I remember metadata for file {filename}
+ function: files_remember_metadata
+
+- when: I touch file {filename}
+ function: files_touch
+
+- then: file {filename} exists
+ function: files_file_exists
+
+- then: file {filename} does not exist
+ function: files_file_does_not_exist
+
+- then: only files (?P<filenames>.+) exist
+ function: files_only_these_exist
+ regex: true
+
+- then: file (?P<filename>\S+) contains "(?P<data>.*)"
+ regex: true
+ function: files_file_contains
+
+- then: file (?P<filename>\S+) matches regex /(?P<regex>.*)/
+ regex: true
+ function: files_file_matches_regex
+
+- then: file (?P<filename>\S+) matches regex "(?P<regex>.*)"
+ regex: true
+ function: files_file_matches_regex
+
+- then: files {filename1} and {filename2} match
+ function: files_match
+
+- then: file {filename} has same metadata as before
+ function: files_has_remembered_metadata
+
+- then: file {filename} has different metadata from before
+ function: files_has_different_metadata
+
+- then: file {filename} has changed from before
+ function: files_has_different_metadata
+
+- then: file {filename} has a very recent modification time
+ function: files_mtime_is_recent
+
+- then: file {filename} has a very old modification time
+ function: files_mtime_is_ancient
diff --git a/share/python/lib/runcmd.md b/share/python/lib/runcmd.md
new file mode 100644
index 0000000..a9d4ed4
--- /dev/null
+++ b/share/python/lib/runcmd.md
@@ -0,0 +1,170 @@
+# Introduction
+
+The [Subplot][] library `runcmd` for Python provides scenario steps
+and their implementations for running Unix commands and examining the
+results. The library consists of a bindings file `lib/runcmd.yaml` and
+implementations in Python in `lib/runcmd.py`. There is no Bash
+version.
+
+[Subplot]: https://subplot.liw.fi/
+
+This document explains the acceptance criteria for the library and how
+they're verified. It uses the steps and functions from the
+`lib/runcmd` library. The scenarios all have the same structure: run a
+command, then examine the exit code, standard output (stdout for
+short), or standard error output (stderr) of the command.
+
+The scenarios use the Unix commands `/bin/true` and `/bin/false` to
+generate exit codes, and `/bin/echo` to produce stdout. To generate
+stderr, they use the little helper script below.
+
+~~~{#err.sh .file .sh .numberLines}
+#!/bin/sh
+echo "$@" 1>&2
+~~~
+
+# Check exit code
+
+These scenarios verify the exit code. To make it easier to write
+scenarios in language that flows more naturally, there are a couple of
+variations.
+
+## Successful execution
+
+~~~scenario
+when I run /bin/true
+then exit code is 0
+and command is successful
+~~~
+
+## Failed execution
+
+~~~scenario
+when I try to run /bin/false
+then exit code is not 0
+and command fails
+~~~
+
+# Check output has what we want
+
+These scenarios verify that stdout or stderr do have something we want
+to have.
+
+## Check stdout is exactly as wanted
+
+Note that the string is surrounded by double quotes to make it clear
+to the reader what's inside. Also, C-style string escapes are
+understood.
+
+~~~scenario
+when I run /bin/echo hello, world
+then stdout is exactly "hello, world\n"
+~~~
+
+## Check stderr is exactly as wanted
+
+~~~scenario
+given helper script err.sh for runcmd
+when I run sh err.sh hello, world
+then stderr is exactly "hello, world\n"
+~~~
+
+## Check stdout using sub-string search
+
+Exact string comparisons are not always enough, so we can verify a
+sub-string is in output.
+
+~~~scenario
+when I run /bin/echo hello, world
+then stdout contains "world\n"
+and exit code is 0
+~~~
+
+## Check stderr using sub-string search
+
+~~~scenario
+given helper script err.sh for runcmd
+when I run sh err.sh hello, world
+then stderr contains "world\n"
+~~~
+
+## Check stdout using regular expressions
+
+Fixed strings are not always enough, so we can verify output matches a
+regular expression. Note that the regular expression is not delimited
+and does not get any C-style string escaped decoded.
+
+~~~scenario
+when I run /bin/echo hello, world
+then stdout matches regex world$
+~~~
+
+## Check stderr using regular expressions
+
+~~~scenario
+given helper script err.sh for runcmd
+when I run sh err.sh hello, world
+then stderr matches regex world$
+~~~
+
+# Check output doesn't have what we want to avoid
+
+These scenarios verify that the stdout or stderr do not
+have something we want to avoid.
+
+## Check stdout is not exactly something
+
+~~~scenario
+when I run /bin/echo hi
+then stdout isn't exactly "hello, world\n"
+~~~
+
+## Check stderr is not exactly something
+
+~~~scenario
+given helper script err.sh for runcmd
+when I run sh err.sh hi
+then stderr isn't exactly "hello, world\n"
+~~~
+
+## Check stdout doesn't contain sub-string
+
+~~~scenario
+when I run /bin/echo hi
+then stdout doesn't contain "world"
+~~~
+
+## Check stderr doesn't contain sub-string
+
+~~~scenario
+given helper script err.sh for runcmd
+when I run sh err.sh hi
+then stderr doesn't contain "world"
+~~~
+
+## Check stdout doesn't match regular expression
+
+~~~scenario
+when I run /bin/echo hi
+then stdout doesn't match regex world$
+
+~~~
+
+## Check stderr doesn't match regular expressions
+
+~~~scenario
+given helper script err.sh for runcmd
+when I run sh err.sh hi
+then stderr doesn't match regex world$
+~~~
+
+
+---
+title: Acceptance criteria for the lib/runcmd Subplot library
+author: The Subplot project
+template: python
+bindings:
+- runcmd.yaml
+functions:
+- runcmd.py
+...
diff --git a/share/python/lib/runcmd.py b/share/python/lib/runcmd.py
new file mode 100644
index 0000000..a2564c6
--- /dev/null
+++ b/share/python/lib/runcmd.py
@@ -0,0 +1,252 @@
+import logging
+import os
+import re
+import shlex
+import subprocess
+
+
+#
+# Helper functions.
+#
+
+# Get exit code or other stored data about the latest command run by
+# runcmd_run.
+
+
+def _runcmd_get(ctx, name):
+ ns = ctx.declare("_runcmd")
+ return ns[name]
+
+
+def runcmd_get_exit_code(ctx):
+ return _runcmd_get(ctx, "exit")
+
+
+def runcmd_get_stdout(ctx):
+ return _runcmd_get(ctx, "stdout")
+
+
+def runcmd_get_stdout_raw(ctx):
+ return _runcmd_get(ctx, "stdout.raw")
+
+
+def runcmd_get_stderr(ctx):
+ return _runcmd_get(ctx, "stderr")
+
+
+def runcmd_get_stderr_raw(ctx):
+ return _runcmd_get(ctx, "stderr.raw")
+
+
+def runcmd_get_argv(ctx):
+ return _runcmd_get(ctx, "argv")
+
+
+# Run a command, given an argv and other arguments for subprocess.Popen.
+#
+# This is meant to be a helper function, not bound directly to a step. The
+# stdout, stderr, and exit code are stored in the "_runcmd" namespace in the
+# ctx context.
+def runcmd_run(ctx, argv, **kwargs):
+ ns = ctx.declare("_runcmd")
+
+ # The Subplot Python template empties os.environ at startup, modulo a small
+ # number of variables with carefully chosen values. Here, we don't need to
+ # care about what those variables are, but we do need to not overwrite
+ # them, so we just add anything in the env keyword argument, if any, to
+ # os.environ.
+ env = dict(os.environ)
+ for key, arg in kwargs.pop("env", {}).items():
+ env[key] = arg
+
+ pp = ns.get("path-prefix")
+ if pp:
+ env["PATH"] = pp + ":" + env["PATH"]
+
+ logging.debug(f"runcmd_run")
+ logging.debug(f" argv: {argv}")
+ logging.debug(f" env: {env}")
+ p = subprocess.Popen(
+ argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, **kwargs
+ )
+ stdout, stderr = p.communicate("")
+ ns["argv"] = argv
+ ns["stdout.raw"] = stdout
+ ns["stderr.raw"] = stderr
+ ns["stdout"] = stdout.decode("utf-8")
+ ns["stderr"] = stderr.decode("utf-8")
+ ns["exit"] = p.returncode
+ logging.debug(f" ctx: {ctx}")
+ logging.debug(f" ns: {ns}")
+
+
+# Step: prepend srcdir to PATH whenever runcmd runs a command.
+def runcmd_helper_srcdir_path(ctx):
+ srcdir = globals()["srcdir"]
+ runcmd_prepend_to_path(ctx, srcdir)
+
+
+# Step: This creates a helper script.
+def runcmd_helper_script(ctx, filename=None):
+ get_file = globals()["get_file"]
+ with open(filename, "wb") as f:
+ f.write(get_file(filename))
+
+
+#
+# Step functions for running commands.
+#
+
+
+def runcmd_prepend_to_path(ctx, dirname=None):
+ ns = ctx.declare("_runcmd")
+ pp = ns.get("path-prefix", "")
+ if pp:
+ pp = f"{pp}:{dirname}"
+ else:
+ pp = dirname
+ ns["path-prefix"] = pp
+
+
+def runcmd_step(ctx, argv0=None, args=None):
+ runcmd_try_to_run(ctx, argv0=argv0, args=args)
+ runcmd_exit_code_is_zero(ctx)
+
+
+def runcmd_try_to_run(ctx, argv0=None, args=None):
+ argv = [shlex.quote(argv0)] + shlex.split(args)
+ runcmd_run(ctx, argv)
+
+
+#
+# Step functions for examining exit codes.
+#
+
+
+def runcmd_exit_code_is_zero(ctx):
+ runcmd_exit_code_is(ctx, exit=0)
+
+
+def runcmd_exit_code_is(ctx, exit=None):
+ assert_eq = globals()["assert_eq"]
+ assert_eq(runcmd_get_exit_code(ctx), int(exit))
+
+
+def runcmd_exit_code_is_nonzero(ctx):
+ runcmd_exit_code_is_not(ctx, exit=0)
+
+
+def runcmd_exit_code_is_not(ctx, exit=None):
+ assert_ne = globals()["assert_ne"]
+ assert_ne(runcmd_get_exit_code(ctx), int(exit))
+
+
+#
+# Step functions and helpers for examining output in various ways.
+#
+
+
+def runcmd_stdout_is(ctx, text=None):
+ _runcmd_output_is(runcmd_get_stdout(ctx), text)
+
+
+def runcmd_stdout_isnt(ctx, text=None):
+ _runcmd_output_isnt(runcmd_get_stdout(ctx), text)
+
+
+def runcmd_stderr_is(ctx, text=None):
+ _runcmd_output_is(runcmd_get_stderr(ctx), text)
+
+
+def runcmd_stderr_isnt(ctx, text=None):
+ _runcmd_output_isnt(runcmd_get_stderr(ctx), text)
+
+
+def _runcmd_output_is(actual, wanted):
+ assert_eq = globals()["assert_eq"]
+ wanted = bytes(wanted, "utf8").decode("unicode_escape")
+ logging.debug("_runcmd_output_is:")
+ logging.debug(f" actual: {actual!r}")
+ logging.debug(f" wanted: {wanted!r}")
+ assert_eq(actual, wanted)
+
+
+def _runcmd_output_isnt(actual, wanted):
+ assert_ne = globals()["assert_ne"]
+ wanted = bytes(wanted, "utf8").decode("unicode_escape")
+ logging.debug("_runcmd_output_isnt:")
+ logging.debug(f" actual: {actual!r}")
+ logging.debug(f" wanted: {wanted!r}")
+ assert_ne(actual, wanted)
+
+
+def runcmd_stdout_contains(ctx, text=None):
+ _runcmd_output_contains(runcmd_get_stdout(ctx), text)
+
+
+def runcmd_stdout_doesnt_contain(ctx, text=None):
+ _runcmd_output_doesnt_contain(runcmd_get_stdout(ctx), text)
+
+
+def runcmd_stderr_contains(ctx, text=None):
+ _runcmd_output_contains(runcmd_get_stderr(ctx), text)
+
+
+def runcmd_stderr_doesnt_contain(ctx, text=None):
+ _runcmd_output_doesnt_contain(runcmd_get_stderr(ctx), text)
+
+
+def _runcmd_output_contains(actual, wanted):
+ assert_eq = globals()["assert_eq"]
+ wanted = bytes(wanted, "utf8").decode("unicode_escape")
+ logging.debug("_runcmd_output_contains:")
+ logging.debug(f" actual: {actual!r}")
+ logging.debug(f" wanted: {wanted!r}")
+ assert_eq(wanted in actual, True)
+
+
+def _runcmd_output_doesnt_contain(actual, wanted):
+ assert_ne = globals()["assert_ne"]
+ wanted = bytes(wanted, "utf8").decode("unicode_escape")
+ logging.debug("_runcmd_output_doesnt_contain:")
+ logging.debug(f" actual: {actual!r}")
+ logging.debug(f" wanted: {wanted!r}")
+ assert_ne(wanted in actual, True)
+
+
+def runcmd_stdout_matches_regex(ctx, regex=None):
+ _runcmd_output_matches_regex(runcmd_get_stdout(ctx), regex)
+
+
+def runcmd_stdout_doesnt_match_regex(ctx, regex=None):
+ _runcmd_output_doesnt_match_regex(runcmd_get_stdout(ctx), regex)
+
+
+def runcmd_stderr_matches_regex(ctx, regex=None):
+ _runcmd_output_matches_regex(runcmd_get_stderr(ctx), regex)
+
+
+def runcmd_stderr_doesnt_match_regex(ctx, regex=None):
+ _runcmd_output_doesnt_match_regex(runcmd_get_stderr(ctx), regex)
+
+
+def _runcmd_output_matches_regex(actual, regex):
+ assert_ne = globals()["assert_ne"]
+ r = re.compile(regex)
+ m = r.search(actual)
+ logging.debug("_runcmd_output_matches_regex:")
+ logging.debug(f" actual: {actual!r}")
+ logging.debug(f" regex: {regex!r}")
+ logging.debug(f" match: {m}")
+ assert_ne(m, None)
+
+
+def _runcmd_output_doesnt_match_regex(actual, regex):
+ assert_eq = globals()["assert_eq"]
+ r = re.compile(regex)
+ m = r.search(actual)
+ logging.debug("_runcmd_output_doesnt_match_regex:")
+ logging.debug(f" actual: {actual!r}")
+ logging.debug(f" regex: {regex!r}")
+ logging.debug(f" match: {m}")
+ assert_eq(m, None)
diff --git a/share/python/lib/runcmd.yaml b/share/python/lib/runcmd.yaml
new file mode 100644
index 0000000..48dde90
--- /dev/null
+++ b/share/python/lib/runcmd.yaml
@@ -0,0 +1,83 @@
+# Steps to run commands.
+
+- given: helper script {filename} for runcmd
+ function: runcmd_helper_script
+
+- given: srcdir is in the PATH
+ function: runcmd_helper_srcdir_path
+
+- when: I run (?P<argv0>\S+)(?P<args>.*)
+ regex: true
+ function: runcmd_step
+
+- when: I try to run (?P<argv0>\S+)(?P<args>.*)
+ regex: true
+ function: runcmd_try_to_run
+
+# Steps to examine exit code of latest command.
+
+- then: exit code is {exit}
+ function: runcmd_exit_code_is
+
+- then: exit code is not {exit}
+ function: runcmd_exit_code_is_not
+
+- then: command is successful
+ function: runcmd_exit_code_is_zero
+
+- then: command fails
+ function: runcmd_exit_code_is_nonzero
+
+# Steps to examine stdout/stderr for exact content.
+
+- then: stdout is exactly "(?P<text>.*)"
+ regex: true
+ function: runcmd_stdout_is
+
+- then: "stdout isn't exactly \"(?P<text>.*)\""
+ regex: true
+ function: runcmd_stdout_isnt
+
+- then: stderr is exactly "(?P<text>.*)"
+ regex: true
+ function: runcmd_stderr_is
+
+- then: "stderr isn't exactly \"(?P<text>.*)\""
+ regex: true
+ function: runcmd_stderr_isnt
+
+# Steps to examine stdout/stderr for sub-strings.
+
+- then: stdout contains "(?P<text>.*)"
+ regex: true
+ function: runcmd_stdout_contains
+
+- then: "stdout doesn't contain \"(?P<text>.*)\""
+ regex: true
+ function: runcmd_stdout_doesnt_contain
+
+- then: stderr contains "(?P<text>.*)"
+ regex: true
+ function: runcmd_stderr_contains
+
+- then: "stderr doesn't contain \"(?P<text>.*)\""
+ regex: true
+ function: runcmd_stderr_doesnt_contain
+
+# Steps to match stdout/stderr against regular expressions.
+
+- then: stdout matches regex (?P<regex>.*)
+ regex: true
+ function: runcmd_stdout_matches_regex
+
+- then: stdout doesn't match regex (?P<regex>.*)
+ regex: true
+ function: runcmd_stdout_doesnt_match_regex
+
+- then: stderr matches regex (?P<regex>.*)
+ regex: true
+ function: runcmd_stderr_matches_regex
+
+- then: stderr doesn't match regex (?P<regex>.*)
+ regex: true
+ function: runcmd_stderr_doesnt_match_regex
diff --git a/share/python/template/asserts.py b/share/python/template/asserts.py
new file mode 100644
index 0000000..c898454
--- /dev/null
+++ b/share/python/template/asserts.py
@@ -0,0 +1,23 @@
+# Check two values for equality and give error if they are not equal
+def assert_eq(a, b):
+ assert a == b, "expected %r == %r" % (a, b)
+
+
+# Check two values for inequality and give error if they are equal
+def assert_ne(a, b):
+ assert a != b, "expected %r != %r" % (a, b)
+
+
+# Check that two dict values are equal.
+def assert_dict_eq(a, b):
+ assert isinstance(a, dict)
+ assert isinstance(b, dict)
+ for key in a:
+ assert key in b, f"exected {key} in both dicts"
+ av = a[key]
+ bv = b[key]
+ assert_eq(type(av), type(bv))
+ if isinstance(av, list):
+ assert_eq(list(sorted(av)), list(sorted(bv)))
+ for key in b:
+ assert key in a, f"exected {key} in both dicts"
diff --git a/share/python/template/context.py b/share/python/template/context.py
new file mode 100644
index 0000000..d61316e
--- /dev/null
+++ b/share/python/template/context.py
@@ -0,0 +1,95 @@
+import logging
+import re
+
+
+# Store context between steps.
+class Context:
+ def __init__(self):
+ self._vars = {}
+ self._ns = {}
+
+ def as_dict(self):
+ return dict(self._vars)
+
+ def get(self, key, default=None):
+ return self._vars.get(key, default)
+
+ def __getitem__(self, key):
+ return self._vars[key]
+
+ def __setitem__(self, key, value):
+ logging.debug("Context: key {!r} set to {!r}".format(key, value))
+ self._vars[key] = value
+
+ def __contains__(self, key):
+ return key in self._vars
+
+ def __delitem__(self, key):
+ del self._vars[key]
+
+ def __repr__(self):
+ return repr({"vars": self._vars, "namespaces": self._ns})
+
+ def declare(self, name):
+ if name not in self._ns:
+ self._ns[name] = NameSpace(name)
+ logging.debug(f"Context: declared {name}")
+ return self._ns[name]
+
+ def remember_value(self, name, value):
+ ns = self.declare("_values")
+ if name in ns:
+ raise KeyError(name)
+ ns[name] = value
+
+ def recall_value(self, name):
+ ns = self.declare("_values")
+ if name not in ns:
+ raise KeyError(name)
+ return ns[name]
+
+ def expand_values(self, pattern):
+ parts = []
+ while pattern:
+ m = re.search(r"(?<!\$)\$\{(?P<name>\S*)\}", pattern)
+ if not m:
+ parts.append(pattern)
+ break
+ name = m.group("name")
+ if not name:
+ raise KeyError("empty name in expansion")
+ value = self.recall_value(name)
+ parts.append(value)
+ pattern = pattern[m.end() :]
+ return "".join(parts)
+
+
+class NameSpace:
+ def __init__(self, name):
+ self.name = name
+ self._dict = {}
+
+ def as_dict(self):
+ return dict(self._dict)
+
+ def get(self, key, default=None):
+ if key not in self._dict:
+ if default is None:
+ return None
+ self._dict[key] = default
+ return self._dict[key]
+
+ def __setitem__(self, key, value):
+ self._dict[key] = value
+
+ def __getitem__(self, key):
+ return self._dict[key]
+
+ def __contains__(self, key):
+ return key in self._dict
+
+ def __delitem__(self, key):
+ del self._dict[key]
+
+ def __repr__(self):
+ return repr(self._dict)
diff --git a/share/python/template/context_tests.py b/share/python/template/context_tests.py
new file mode 100644
index 0000000..c91350e
--- /dev/null
+++ b/share/python/template/context_tests.py
@@ -0,0 +1,156 @@
+import unittest
+
+from context import Context
+
+
+class ContextTests(unittest.TestCase):
+ def test_converts_to_empty_dict_initially(self):
+ ctx = Context()
+ self.assertEqual(ctx.as_dict(), {})
+
+ def test_set_item(self):
+ ctx = Context()
+ ctx["foo"] = "bar"
+ self.assertEqual(ctx["foo"], "bar")
+
+ def test_does_not_contain_item(self):
+ ctx = Context()
+ self.assertFalse("foo" in ctx)
+
+ def test_no_longer_contains_item(self):
+ ctx = Context()
+ ctx["foo"] = "bar"
+ del ctx["foo"]
+ self.assertFalse("foo" in ctx)
+
+ def test_contains_item(self):
+ ctx = Context()
+ ctx["foo"] = "bar"
+ self.assertTrue("foo" in ctx)
+
+ def test_get_returns_default_if_item_does_not_exist(self):
+ ctx = Context()
+ self.assertEqual(ctx.get("foo"), None)
+
+ def test_get_returns_specified_default_if_item_does_not_exist(self):
+ ctx = Context()
+ self.assertEqual(ctx.get("foo", "bar"), "bar")
+
+ def test_get_returns_value_if_item_exists(self):
+ ctx = Context()
+ ctx["foo"] = "bar"
+ self.assertEqual(ctx.get("foo", "yo"), "bar")
+
+ def test_reprs_itself_when_empty(self):
+ ctx = Context()
+ self.assertFalse("foo" in repr(ctx))
+
+ def test_reprs_itself_when_not_empty(self):
+ ctx = Context()
+ ctx["foo"] = "bar"
+ self.assertTrue("foo" in repr(ctx))
+ self.assertTrue("bar" in repr(ctx))
+
+
+class ContextMemoryTests(unittest.TestCase):
+ def test_recall_raises_exception_for_unremembered_value(self):
+ ctx = Context()
+ with self.assertRaises(KeyError):
+ ctx.recall_value("foo")
+
+ def test_recall_returns_remembered_value(self):
+ ctx = Context()
+ ctx.remember_value("foo", "bar")
+ self.assertEqual(ctx.recall_value("foo"), "bar")
+
+ def test_remember_raises_exception_for_previously_remembered(self):
+ ctx = Context()
+ ctx.remember_value("foo", "bar")
+ with self.assertRaises(KeyError):
+ ctx.remember_value("foo", "bar")
+
+ def test_expand_returns_pattern_without_values_as_is(self):
+ ctx = Context()
+ self.assertEqual(ctx.expand_values("foo"), "foo")
+
+ def test_expand_allows_double_dollar_escapes(self):
+ ctx = Context()
+ self.assertEqual(ctx.expand_values("$${foo}"), "$${foo}")
+
+ def test_expand_raises_exception_for_empty_name_expansion_as_is(self):
+ ctx = Context()
+ with self.assertRaises(KeyError):
+ ctx.expand_values("${}")
+
+ def test_expand_raises_error_for_unrememebered_values(self):
+ ctx = Context()
+ with self.assertRaises(KeyError):
+ ctx.expand_values("${foo}")
+
+ def test_expands_rememebered_values(self):
+ ctx = Context()
+ ctx.remember_value("foo", "bar")
+ self.assertEqual(ctx.expand_values("${foo}"), "bar")
+
+
+class ContextNamepaceTests(unittest.TestCase):
+ def test_explicit_namespaces_are_empty_dicts_initially(self):
+ ctx = Context()
+ ns = ctx.declare("foo")
+ self.assertEqual(ns.as_dict(), {})
+
+ def test_declaring_explicit_namespaces_is_idempotent(self):
+ ctx = Context()
+ ns1 = ctx.declare("foo")
+ ns2 = ctx.declare("foo")
+ self.assertEqual(id(ns1), id(ns2))
+
+ def test_knows_their_name(self):
+ ctx = Context()
+ ns = ctx.declare("foo")
+ self.assertEqual(ns.name, "foo")
+
+ def test_sets_key(self):
+ ctx = Context()
+ ns = ctx.declare("foo")
+ ns["bar"] = "yo"
+ self.assertEqual(ns["bar"], "yo")
+
+ def test_gets(self):
+ ctx = Context()
+ ns = ctx.declare("foo")
+ ns["bar"] = "yo"
+ self.assertEqual(ns.get("bar", "argh"), "yo")
+
+ def test_get_without_default_doesnt_set(self):
+ ctx = Context()
+ ns = ctx.declare("foo")
+ ns.get("bar")
+ self.assertFalse("bar" in ns)
+
+ def test_gets_with_default_sets_as_well(self):
+ ctx = Context()
+ ns = ctx.declare("foo")
+ self.assertEqual(ns.get("bar", "yo"), "yo")
+ self.assertEqual(ns["bar"], "yo")
+
+ def test_does_not_contain_key(self):
+ ctx = Context()
+ ns = ctx.declare("foo")
+ self.assertFalse("bar" in ns)
+
+ def test_contains_key(self):
+ ctx = Context()
+ ns = ctx.declare("foo")
+ ns["bar"] = "yo"
+ self.assertTrue("bar" in ns)
+
+ def test_deletes(self):
+ ctx = Context()
+ ns = ctx.declare("foo")
+ ns["bar"] = "yo"
+ del ns["bar"]
+ self.assertFalse("bar" in ns)
+
+
+unittest.main()
diff --git a/share/python/template/encoding.py b/share/python/template/encoding.py
new file mode 100644
index 0000000..1efb95e
--- /dev/null
+++ b/share/python/template/encoding.py
@@ -0,0 +1,12 @@
+# Decode a base64 encoded string. Result is binary or unicode string.
+
+
+import base64
+
+
+def decode_bytes(s):
+ return base64.b64decode(s)
+
+
+def decode_str(s):
+ return base64.b64decode(s).decode()
diff --git a/share/python/template/encoding_tests.py b/share/python/template/encoding_tests.py
new file mode 100644
index 0000000..4167aa4
--- /dev/null
+++ b/share/python/template/encoding_tests.py
@@ -0,0 +1,19 @@
+import base64
+import unittest
+
+import encoding
+
+
+class EncodingTests(unittest.TestCase):
+ def test_str_roundtrip(self):
+ original = "foo\nbar\0"
+ encoded = base64.b64encode(original.encode())
+ self.assertEqual(encoding.decode_str(encoded), original)
+
+ def test_bytes_roundtrip(self):
+ original = b"foo\nbar\0"
+ encoded = base64.b64encode(original)
+ self.assertEqual(encoding.decode_bytes(encoded), original)
+
+
+unittest.main()
diff --git a/share/python/template/files.py b/share/python/template/files.py
new file mode 100644
index 0000000..6346172
--- /dev/null
+++ b/share/python/template/files.py
@@ -0,0 +1,23 @@
+# Retrieve an embedded test data file using filename.
+
+
+class Files:
+ def __init__(self):
+ self._files = {}
+
+ def set(self, filename, content):
+ self._files[filename] = content
+
+ def get(self, filename):
+ return self._files[filename]
+
+
+_files = Files()
+
+
+def store_file(filename, content):
+ _files.set(filename, content)
+
+
+def get_file(filename):
+ return _files.get(filename)
diff --git a/share/python/template/main.py b/share/python/template/main.py
new file mode 100644
index 0000000..87e2782
--- /dev/null
+++ b/share/python/template/main.py
@@ -0,0 +1,97 @@
+import argparse
+import logging
+import os
+import random
+import shutil
+import tarfile
+import tempfile
+
+
+# Remember where we started from. The step functions may need to refer
+# to files there.
+srcdir = os.getcwd()
+print("srcdir", srcdir)
+
+# Create a new temporary directory and chdir there. This allows step
+# functions to create new files in the current working directory
+# without having to be so careful.
+_datadir = tempfile.mkdtemp()
+print("datadir", _datadir)
+os.chdir(_datadir)
+
+
+def parse_command_line():
+ p = argparse.ArgumentParser()
+ p.add_argument("--log")
+ p.add_argument("--env", action="append", default=[])
+ p.add_argument("--save-on-failure")
+ p.add_argument("patterns", nargs="*")
+ return p.parse_args()
+
+
+def setup_logging(args):
+ if args.log:
+ fmt = "%(asctime)s %(levelname)s %(message)s"
+ datefmt = "%Y-%m-%d %H:%M:%S"
+ formatter = logging.Formatter(fmt, datefmt)
+
+ filename = os.path.abspath(os.path.join(srcdir, args.log))
+ handler = logging.FileHandler(filename)
+ handler.setFormatter(formatter)
+ else:
+ handler = logging.NullHandler()
+
+ logger = logging.getLogger()
+ logger.addHandler(handler)
+ logger.setLevel(logging.DEBUG)
+
+
+def save_directory(dirname, tarname):
+ print("tarname", tarname)
+ logging.info("Saving {} to {}".format(dirname, tarname))
+ tar = tarfile.open(tarname, "w")
+ tar.add(dirname, arcname="datadir")
+ tar.close()
+
+
+def main(scenarios):
+ args = parse_command_line()
+ setup_logging(args)
+ logging.info("Test program starts")
+
+ logging.info("patterns: {}".format(args.patterns))
+ if len(args.patterns) == 0:
+ logging.info("Executing all scenarios")
+ todo = list(scenarios)
+ random.shuffle(todo)
+ else:
+ logging.info("Executing requested scenarios only: {}".format(args.patterns))
+ patterns = [arg.lower() for arg in args.patterns]
+ todo = [
+ scen
+ for scen in scenarios
+ if any(pattern in scen.get_title().lower() for pattern in patterns)
+ ]
+
+ extra_env = {}
+ for env in args.env:
+ (name, value) = env.split("=", 1)
+ extra_env[name] = value
+ logging.debug(f"args.env: {args.env}")
+ logging.debug(f"env vars from command line; {extra_env}")
+
+ try:
+ for scen in todo:
+ scen.run(_datadir, extra_env)
+ except Exception as e:
+ logging.error(str(e), exc_info=True)
+ if args.save_on_failure:
+ print(args.save_on_failure)
+ filename = os.path.abspath(os.path.join(srcdir, args.save_on_failure))
+ print(filename)
+ save_directory(_datadir, filename)
+ raise
+
+ shutil.rmtree(_datadir)
+ print("OK, all scenarios finished successfully")
+ logging.info("OK, all scenarios finished successfully")
diff --git a/share/python/template/scenarios.py b/share/python/template/scenarios.py
new file mode 100644
index 0000000..e2703df
--- /dev/null
+++ b/share/python/template/scenarios.py
@@ -0,0 +1,97 @@
+import logging
+import os
+import tempfile
+
+
+#############################################################################
+# Code to implement the scenarios.
+
+
+class Step:
+ def __init__(self):
+ self._kind = None
+ self._text = None
+ self._args = {}
+ self._function = None
+ self._cleanup = None
+
+ def set_kind(self, kind):
+ self._kind = kind
+
+ def set_text(self, text):
+ self._text = text
+
+ def set_arg(self, name, value):
+ self._args[name] = value
+
+ def set_function(self, function):
+ self._function = function
+
+ def set_cleanup(self, cleanup):
+ self._cleanup = cleanup
+
+ def do(self, ctx):
+ print(" step: {} {}".format(self._kind, self._text))
+ logging.info(" step: {} {}".format(self._kind, self._text))
+ self._function(ctx, **self._args)
+
+ def cleanup(self, ctx):
+ if self._cleanup:
+ print(" cleanup: {} {}".format(self._kind, self._text))
+ logging.info(" cleanup: {} {}".format(self._kind, self._text))
+ self._cleanup(ctx)
+ else:
+ logging.info(" no cleanup defined: {} {}".format(self._kind, self._text))
+
+
+class Scenario:
+ def __init__(self, ctx):
+ self._title = None
+ self._steps = []
+ self._ctx = ctx
+
+ def get_title(self):
+ return self._title
+
+ def set_title(self, title):
+ self._title = title
+
+ def append_step(self, step):
+ self._steps.append(step)
+
+ def run(self, datadir, extra_env):
+ print("scenario: {}".format(self._title))
+ logging.info("Scenario: {}".format(self._title))
+ logging.info("extra environment variables: {}".format(extra_env))
+
+ scendir = tempfile.mkdtemp(dir=datadir)
+ os.chdir(scendir)
+ self._set_environment_variables_to(scendir, extra_env)
+
+ done = []
+ ctx = self._ctx
+ try:
+ for step in self._steps:
+ step.do(ctx)
+ done.append(step)
+ except Exception as e:
+ logging.error(str(e), exc_info=True)
+ for step in reversed(done):
+ step.cleanup(ctx)
+ raise
+ for step in reversed(done):
+ step.cleanup(ctx)
+
+ def _set_environment_variables_to(self, scendir, extra_env):
+ minimal = {
+ "PATH": "/bin:/usr/bin",
+ "SHELL": "/bin/sh",
+ "HOME": scendir,
+ "TMPDIR": scendir,
+ }
+
+ os.environ.clear()
+ os.environ.update(minimal)
+ os.environ.update(extra_env)
+ logging.debug(f"extra_env: {dict(extra_env)!r}")
+ logging.debug(f"os.environ: {dict(os.environ)!r}")
diff --git a/share/python/template/template.py.tera b/share/python/template/template.py.tera
new file mode 100644
index 0000000..aa97cf0
--- /dev/null
+++ b/share/python/template/template.py.tera
@@ -0,0 +1,78 @@
+#############################################################################
+# Functions that implement steps.
+
+{% for func in functions %}
+#----------------------------------------------------------------------------
+# This code comes from: {{ func.source }}
+
+{{ func.code }}
+{% endfor %}
+
+
+#############################################################################
+# Scaffolding for generated test program.
+
+{% include "context.py" %}
+{% include "encoding.py" %}
+{% include "files.py" %}
+{% include "asserts.py" %}
+{% include "scenarios.py" %}
+{% include "main.py" %}
+
+
+#############################################################################
+# Test data files that were embedded in the source document. Base64
+# encoding is used to allow arbitrary data.
+
+{% for file in files %}
+# {{ file.filename }}
+filename = decode_str('{{ file.filename | base64 }}')
+contents = decode_bytes('{{ file.contents | base64 }}')
+store_file(filename, contents)
+{% endfor %}
+
+
+
+#############################################################################
+# Classes for individual scenarios.
+
+{% for scenario in scenarios %}
+#----------------------------------------------------------------------------
+# Scenario: {{ scenario.title }}
+class Scenario_{{ loop.index }}():
+ def __init__(self):
+ ctx = Context()
+ self._scenario = Scenario(ctx)
+ self._scenario.set_title(decode_str('{{ scenario.title | base64 }}'))
+ {% for step in scenario.steps %}
+ # Step: {{ step.text }}
+ step = Step()
+ step.set_kind('{{ step.kind | lower }}')
+ step.set_text(decode_str('{{ step.text | base64 }}'))
+ step.set_function({{ step.function }})
+ if '{{ step.cleanup }}':
+ step.set_cleanup({{ step.cleanup }})
+ self._scenario.append_step(step)
+ {% for part in step.parts %}{% if part.CapturedText is defined -%}
+ name = decode_str('{{ part.CapturedText.name | base64 }}')
+ text = decode_str('{{ part.CapturedText.text | base64 }}')
+ step.set_arg(name, text)
+ {% endif -%}
+ {% endfor -%}
+ {% endfor %}
+
+ def get_title(self):
+ return self._scenario.get_title()
+
+ def run(self, datadir, extra_env):
+ self._scenario.run(datadir, extra_env)
+{% endfor %}
+
+_scenarios = { {% for scenario in scenarios %}
+ Scenario_{{ loop.index }}(),{% endfor %}
+}
+
+
+#############################################################################
+# Call main function and clean up.
+main(_scenarios)
diff --git a/share/python/template/template.yaml b/share/python/template/template.yaml
new file mode 100644
index 0000000..73f2510
--- /dev/null
+++ b/share/python/template/template.yaml
@@ -0,0 +1,9 @@
+template: template.py.tera
+helpers:
+ - context.py
+ - encoding.py
+ - files.py
+ - asserts.py
+ - scenarios.py
+ - main.py
+run: python3
diff --git a/share/rust/lib/datadir.yaml b/share/rust/lib/datadir.yaml
new file mode 100644
index 0000000..acd4ad4
--- /dev/null
+++ b/share/rust/lib/datadir.yaml
@@ -0,0 +1,13 @@
+# Bindings for the datadir steps
+# These steps are pretty simplistic since Datadir is mostly
+# a utility context for use by other step libraries, however some
+# of the capabilities are worth exporting as steps
+
+- given: datadir has at least {bytes}B of space
+ function: subplotlib::steplibrary::datadir::datadir_has_enough_space
+ types:
+ bytes: uint
+- given: datadir has at least {megabytes}M of space
+ function: subplotlib::steplibrary::datadir::datadir_has_enough_space_megabytes
+ types:
+ megabytes: uint
diff --git a/share/rust/lib/files.yaml b/share/rust/lib/files.yaml
new file mode 100644
index 0000000..339e7cf
--- /dev/null
+++ b/share/rust/lib/files.yaml
@@ -0,0 +1,68 @@
+# Bindings for the files steps
+# These bind the files step library for subplotlib
+
+- given: file {embedded_file}
+ function: subplotlib::steplibrary::files::create_from_embedded
+ types:
+ embedded_file: file
+
+- given: file {filename_on_disk} from {embedded_file}
+ function: subplotlib::steplibrary::files::create_from_embedded_with_other_name
+ types:
+ embedded_file: file
+
+- given: file (?P<filename>\S+) has modification time (?P<mtime>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})
+ regex: true
+ function: subplotlib::steplibrary::files::touch_with_timestamp
+ types:
+ mtime: text
+
+- when: I write "(?P<text>.*)" to file (?P<filename>\S+)
+ regex: true
+ function: subplotlib::steplibrary::files::create_from_text
+
+- when: I remember metadata for file {filename}
+ function: subplotlib::steplibrary::files::remember_metadata
+
+- when: I touch file {filename}
+ function: subplotlib::steplibrary::files::touch
+
+- then: file {filename} exists
+ function: subplotlib::steplibrary::files::file_exists
+
+- then: file {filename} does not exist
+ function: subplotlib::steplibrary::files::file_does_not_exist
+
+- then: only files (?P<filenames>.+) exist
+ function: subplotlib::steplibrary::files::only_these_exist
+ regex: true
+
+- then: file (?P<filename>\S+) contains "(?P<data>.*)"
+ regex: true
+ function: subplotlib::steplibrary::files::file_contains
+
+- then: file (?P<filename>\S+) matches regex /(?P<regex>.*)/
+ regex: true
+ function: subplotlib::steplibrary::files::file_matches_regex
+
+- then: file (?P<filename>\S+) matches regex "(?P<regex>.*)"
+ regex: true
+ function: subplotlib::steplibrary::files::file_matches_regex
+
+- then: files {filename1} and {filename2} match
+ function: subplotlib::steplibrary::files::file_match
+
+- then: file {filename} has same metadata as before
+ function: subplotlib::steplibrary::files::has_remembered_metadata
+
+- then: file {filename} has different metadata from before
+ function: subplotlib::steplibrary::files::has_different_metadata
+
+- then: file {filename} has changed from before
+ function: subplotlib::steplibrary::files::has_different_metadata
+
+- then: file {filename} has a very recent modification time
+ function: subplotlib::steplibrary::files::mtime_is_recent
+
+- then: file {filename} has a very old modification time
+ function: subplotlib::steplibrary::files::mtime_is_ancient
diff --git a/share/rust/lib/runcmd.yaml b/share/rust/lib/runcmd.yaml
new file mode 100644
index 0000000..0a635c9
--- /dev/null
+++ b/share/rust/lib/runcmd.yaml
@@ -0,0 +1,89 @@
+# Bindings for the runcmd steplibrary
+
+- given: helper script {script} for runcmd
+ function: subplotlib::steplibrary::runcmd::helper_script
+ types:
+ script: file
+
+- given: srcdir is in the PATH
+ function: subplotlib::steplibrary::runcmd::helper_srcdir_path
+
+- when: I run (?P<argv0>\S+)(?P<args>.*)
+ regex: true
+ function: subplotlib::steplibrary::runcmd::run
+
+- when: I try to run (?P<argv0>\S+)(?P<args>.*)
+ regex: true
+ function: subplotlib::steplibrary::runcmd::try_to_run
+
+# Steps to examine exit code of latest command.
+
+- then: exit code is {exit}
+ function: subplotlib::steplibrary::runcmd::exit_code_is
+ types:
+ exit: int
+
+- then: exit code is not {exit}
+ function: subplotlib::steplibrary::runcmd::exit_code_is_not
+ types:
+ exit: int
+
+- then: command is successful
+ function: subplotlib::steplibrary::runcmd::exit_code_is_zero
+
+- then: command fails
+ function: subplotlib::steplibrary::runcmd::exit_code_is_nonzero
+
+# Steps to examine stdout/stderr for exact content.
+
+- then: stdout is exactly "(?P<text>.*)"
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stdout_is
+
+- then: 'stdout isn''t exactly "(?P<text>.*)"'
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stdout_isnt
+
+- then: stderr is exactly "(?P<text>.*)"
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stderr_is
+
+- then: 'stderr isn''t exactly "(?P<text>.*)"'
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stderr_isnt
+
+# Steps to examine stdout/stderr for sub-strings.
+
+- then: stdout contains "(?P<text>.*)"
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stdout_contains
+
+- then: 'stdout doesn''t contain "(?P<text>.*)"'
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stdout_doesnt_contain
+
+- then: stderr contains "(?P<text>.*)"
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stderr_contains
+
+- then: 'stderr doesn''t contain "(?P<text>.*)"'
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stderr_doesnt_contain
+
+# Steps to match stdout/stderr against regular expressions.
+
+- then: stdout matches regex (?P<regex>.*)
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stdout_matches_regex
+
+- then: stdout doesn't match regex (?P<regex>.*)
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stdout_doesnt_match_regex
+
+- then: stderr matches regex (?P<regex>.*)
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stderr_matches_regex
+
+- then: stderr doesn't match regex (?P<regex>.*)
+ regex: true
+ function: subplotlib::steplibrary::runcmd::stderr_doesnt_match_regex
diff --git a/share/rust/template/template.rs.tera b/share/rust/template/template.rs.tera
new file mode 100644
index 0000000..c972d37
--- /dev/null
+++ b/share/rust/template/template.rs.tera
@@ -0,0 +1,70 @@
+use subplotlib::prelude::*;
+
+{% for func in functions %}
+
+// --------------------------------
+// This came from {{ func.source }}
+
+{{ func.code }}
+
+{% endfor %}
+
+// --------------------------------
+
+lazy_static! {
+ static ref SUBPLOT_EMBEDDED_FILES: Vec<SubplotDataFile> = vec![
+{% for file in files %}
+ SubplotDataFile::new("{{ file.filename | base64 }}",
+ "{{ file.contents | base64 }}"),
+{% endfor %}
+ ];
+}
+
+{% for scenario in scenarios %}
+
+// ---------------------------------
+
+// {{ scenario.title | commentsafe }}
+#[test]
+fn {{ scenario.title | nameslug }}() {
+ let mut scenario = Scenario::new(&base64_decode("{{scenario.title | base64}}"));
+ {% for step in scenario.steps %}
+ let step = {{step.function}}::Builder::default()
+ {% for part in step.parts %}{% if part.CapturedText is defined -%}
+ {%- set name = part.CapturedText.name -%}
+ {%- set text = part.CapturedText.text -%}
+ {%- set type = step.types[name] | default(value='text') -%}
+ .{{name}}(
+ {% if type in ['number', 'int', 'uint'] %}{{text}}
+ {%- elif type in ['text', 'word']%}
+ // "{{text | commentsafe }}"
+ &base64_decode("{{text | base64}}"
+ )
+ {%- elif type in ['file'] %}
+ {
+ use std::path::PathBuf;
+ // {{ text | commentsafe }}
+ let target_name: PathBuf = base64_decode("{{ text | base64 }}").into();
+ SUBPLOT_EMBEDDED_FILES
+ .iter()
+ .find(|df| df.name() == target_name)
+ .expect("Unable to find file at runtime")
+ .clone()
+ }
+ {%- else %} /* WOAH unknown type {{step.types[name]}} */ {{text}}
+ {%- endif %}
+ )
+ {% endif -%}
+ {% endfor -%}
+ .build();
+ {%- if step.cleanup %}
+ let cleanup = {{step.cleanup}}::Builder::default().build();
+ scenario.add_step(step, Some(cleanup));
+ {%- else %}
+ scenario.add_step(step, None);
+ {%- endif %}
+ {% endfor %}
+
+ scenario.run().unwrap();
+}
+{% endfor %}
diff --git a/share/rust/template/template.yaml b/share/rust/template/template.yaml
new file mode 100644
index 0000000..110f5df
--- /dev/null
+++ b/share/rust/template/template.yaml
@@ -0,0 +1,2 @@
+template: template.rs.tera
+run: cargo test