summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--NEWS12
-rw-r--r--artifact_store.py4
-rwxr-xr-xcheck135
-rwxr-xr-xcreate-token47
-rw-r--r--debian/changelog10
-rwxr-xr-xgenerate-rsa-key34
-rw-r--r--ick2/actions.py318
-rw-r--r--ick2/buildsapi.py4
-rw-r--r--ick2/buildsm.py63
-rw-r--r--ick2/notificationapi.py21
-rw-r--r--ick2/persistent.py8
-rw-r--r--ick2/persistent_tests.py6
-rw-r--r--ick2/trans.py5
-rw-r--r--ick2/version.py4
-rw-r--r--notification_service.py4
-rw-r--r--pipelines/systrees.ick7
-rw-r--r--setup.py3
-rwxr-xr-xstart_ick4
-rw-r--r--yarns/000.yarn33
-rw-r--r--yarns/100-projects.yarn9
-rw-r--r--yarns/150-pipelines.yarn9
-rw-r--r--yarns/200-version.yarn12
-rw-r--r--yarns/300-workers.yarn33
-rw-r--r--yarns/400-build.yarn209
-rw-r--r--yarns/500-build-fail.yarn53
-rw-r--r--yarns/600-unauthz.yarn15
-rw-r--r--yarns/700-artifact-store.yarn29
-rw-r--r--yarns/900-implements.yarn39
-rw-r--r--yarns/900-local.yarn191
-rw-r--r--yarns/900-remote.yarn23
-rw-r--r--yarns/lib.py134
31 files changed, 672 insertions, 806 deletions
diff --git a/NEWS b/NEWS
index 170b67e..ed6c79b 100644
--- a/NEWS
+++ b/NEWS
@@ -1,7 +1,7 @@
NEWS for ick2, a CI server
=============================================================================
-Copyright 2017-2018 Lars Wirzenius
+Copyright 2017-2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -17,9 +17,17 @@ You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
-Version 0.53.2+git, not yet released
+Version 0.55+git, not yet released
+----------------------------------
+
+
+Version 0.54, released 2019-07-26
------------------------------------
+* A ton of changes. This NEWS entry isn't complete, sorry. I'm making
+ a release before merging in a large change to how the controller
+ stores persistent data.
+
* The worker manager now has an action to mirror several git
repositories at once: `action: git_mirror`. See
[the specification][].
diff --git a/artifact_store.py b/artifact_store.py
index ea1db6a..8dea22d 100644
--- a/artifact_store.py
+++ b/artifact_store.py
@@ -1,5 +1,5 @@
#!/usr/bin/python3
-# Copyright (C) 2018 Lars Wirzenius
+# Copyright (C) 2018-2019 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -120,4 +120,4 @@ app = main()
if __name__ == '__main__':
print('running in debug mode')
- app.run(host='127.0.0.1', port=12766)
+ app.run(host='127.0.0.1', port=5555)
diff --git a/check b/check
index 133b9c0..0828aef 100755
--- a/check
+++ b/check
@@ -1,6 +1,6 @@
#!/bin/sh
#
-# Copyright 2017-2018 Lars Wirzenius
+# Copyright 2017-2019 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -17,99 +17,70 @@
set -eu
-title()
-{
- printf "\n"
- echo "$@"
- n=77
- for i in $(seq $n)
- do
- printf "%s" -
- done
- printf '\n'
+title() {
+ printf "\n"
+ echo "$@"
+ n=77
+ for i in $(seq $n); do
+ printf "%s" -
+ done
+ printf '\n'
}
-
-title Remote or local yarns?
-remote=no
-unit=yes
-yarns=yes
-if [ "$#" -gt 0 ]
-then
- case "$1" in
- https://*)
- remote=yes
- unit=no
- yarns=yes
- remote_url="$1"
- shift 1
- ;;
- yarns)
- remote=no
- unit=no
- yarns=yes
- shift 1
- ;;
- local)
- remote=no
- unit=yes
- yarns=no
- shift 1
- ;;
- *)
- echo "Don't understand args: $@" 1>&2
- exit 1
- ;;
- esac
+title Remote yarns?
+local=yes
+yarns=no
+if [ "$#" -gt 0 ]; then
+ case "$1" in
+ https://*)
+ local=no
+ yarns=yes
+ remote_url="$1"
+ shift 1
+ ;;
+ *)
+ echo "Don't understand args: $*" 1>&2
+ exit 1
+ ;;
+ esac
fi
+if [ "$local" = yes ]; then
+ title Unit tests
+ python3 -m CoverageTestRunner --ignore-missing-from=without-tests ick2
-if [ "$unit" = yes ]
-then
- title Unit tests
- python3 -m CoverageTestRunner --ignore-missing-from=without-tests ick2
-
- if [ -e .git ]
- then
- sources="$(git ls-files | grep -Fvxf copyright-exceptions)"
+ if [ -e .git ]; then
+ sources="$(git ls-files | grep -Fvxf copyright-exceptions)"
- title Copyright statements
- copyright-statement-lint $sources
+ # title Copyright statements
+ # copyright-statement-lint $sources
- title Copyright licences
- ./is-agpl3+ $sources
- fi
+ title Copyright licences
+ ./is-agpl3+ $sources
+ fi
- python_sources="ick_controller.py worker_manager ick2 icktool"
+ python_sources="ick_controller.py worker_manager ick2 icktool"
- title pycodestyle
- pycodestyle ick2 $python_sources
+ # title pycodestyle
+ # pycodestyle ick2 $python_sources
- if command -v pylint3 > /dev/null
- then
- title pylint3
- pylint3 --rcfile pylint.conf $python_sources
- fi
+ # if command -v pylint3 > /dev/null
+ # then
+ # title pylint3
+ # pylint3 --rcfile pylint.conf $python_sources
+ # fi
fi
-if [ "$yarns" = yes ]
-then
- title Yarns
- if [ "$remote" = no ]
- then
- impl=yarns/900-local.yarn
- args=""
- else
- impl=yarns/900-remote.yarn
- args="--env ICK_URL=$remote_url"
- fi
- yarn yarns/[^9]*.yarn yarns/900-implements.yarn "$impl" \
- --shell python2 \
- --shell-arg '' \
- --shell-library yarns/lib.py \
- --cd-datadir \
- $args \
- "$@"
+if [ "$yarns" = yes ]; then
+ title Yarns
+ yarn yarns/*.yarn \
+ --shell python2 \
+ --shell-arg '' \
+ --shell-library yarns/lib.py \
+ --cd-datadir \
+ --env "CONTROLLER=$remote_url" \
+ --env "SECRETS=$HOME/.config/qvarn/createtoken.conf" \
+ "$@"
fi
title OK
diff --git a/create-token b/create-token
deleted file mode 100755
index 55a7f7e..0000000
--- a/create-token
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/python3
-# Copyright (C) 2017-2018 Lars Wirzenius
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-import sys
-import time
-
-import Crypto.PublicKey.RSA
-
-import apifw
-
-
-# FIXME: These should agree with how ick controller is configured.
-# See the Ansible playbook.
-iss = 'localhost'
-
-
-key_text = sys.stdin.read()
-key = Crypto.PublicKey.RSA.importKey(key_text)
-
-scopes = ' '.join(sys.argv[1].split())
-aud = sys.argv[2]
-
-now = time.time()
-claims = {
- 'iss': iss,
- 'sub': 'subject-uuid',
- 'aud': aud,
- 'exp': now + 86400, # FIXME: This is silly long
- 'scope': scopes,
-}
-
-token = apifw.create_token(claims, key)
-sys.stdout.write(token.decode('ascii'))
diff --git a/debian/changelog b/debian/changelog
index 5e1e63c..a80d091 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,8 +1,14 @@
-ick2 (0.53.2+git-1) UNRELEASED; urgency=medium
+ick2 (0.55+git-1) UNRELEASED; urgency=medium
* New upstream version.
- -- Lars Wirzenius <liw@liw.fi> Wed, 18 Jul 2018 20:03:56 +0300
+ -- Lars Wirzenius <liw@liw.fi> Fri, 26 Jul 2019 09:11:54 +0300
+
+ick2 (0.54-1) stretch; urgency=medium
+
+ * New upstream version.
+
+ -- Lars Wirzenius <liw@liw.fi> Fri, 26 Jul 2019 09:11:53 +0300
ick2 (0.53.2-1) stretch; urgency=medium
diff --git a/generate-rsa-key b/generate-rsa-key
deleted file mode 100755
index e44a796..0000000
--- a/generate-rsa-key
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/python3
-# Copyright (C) 2017 Lars Wirzenius
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-import sys
-
-import Crypto.PublicKey.RSA
-
-
-RSA_KEY_BITS = 4096 # A nice, currently safe length
-
-key = Crypto.PublicKey.RSA.generate(RSA_KEY_BITS)
-
-filename = sys.argv[1]
-
-def write(filename, byts):
- with open(filename, 'w') as f:
- f.write(byts.decode('ascii'))
-
-write(filename, key.exportKey('PEM'))
-write(filename + '.pub', key.exportKey('OpenSSH'))
diff --git a/ick2/actions.py b/ick2/actions.py
index c901d6b..2881524 100644
--- a/ick2/actions.py
+++ b/ick2/actions.py
@@ -33,9 +33,9 @@ class UnknownStepError(Exception):
class ActionFactory:
_classes = {
- 'host': ick2.HostEnvironment,
- 'chroot': ick2.ChrootEnvironment,
- 'container': ick2.ContainerEnvironment,
+ "host": ick2.HostEnvironment,
+ "chroot": ick2.ChrootEnvironment,
+ "container": ick2.ContainerEnvironment,
}
def __init__(self, build_id, systree, workspace_area, reporter):
@@ -75,7 +75,7 @@ class ActionFactory:
return area
def create_environment(self, spec, project_name):
- env = spec.get('where', 'host')
+ env = spec.get("where", "host")
assert env in self.get_allowed_environments()
env_class = self._classes[env]
area = self.get_workspace_area()
@@ -95,46 +95,45 @@ class ActionFactory:
def _create_action_object(self, env, spec):
rules = [
- ('shell', ShellAction),
- ('python', PythonAction),
- ('debootstrap', DebootstrapAction),
+ ("shell", ShellAction),
+ ("python", PythonAction),
+ ("debootstrap", DebootstrapAction),
]
for key, klass in rules:
if key in spec:
return klass(env)
- if 'archive' in spec:
+ if "archive" in spec:
rules2 = {
- 'workspace': ArchiveWorkspaceAction,
- 'systree': ArchiveSystreeAction,
+ "workspace": ArchiveWorkspaceAction,
+ "systree": ArchiveSystreeAction,
}
- kind = spec['archive']
+ kind = spec["archive"]
klass = rules2.get(kind)
if klass:
return klass(env)
- if 'action' in spec:
+ if "action" in spec:
rules2 = {
- 'populate_systree': PopulateSystreeAction,
- 'populate_workspace': PopulateWorkspaceAction,
- 'create_workspace': CreateWorkspaceAction,
- 'git': GitAction,
- 'git_mirror': GitMirrorAction,
- 'rsync': RsyncAction,
- 'dput': DputAction,
- 'notify': NotifyAction,
+ "populate_systree": PopulateSystreeAction,
+ "populate_workspace": PopulateWorkspaceAction,
+ "create_workspace": CreateWorkspaceAction,
+ "git": GitAction,
+ "git_mirror": GitMirrorAction,
+ "rsync": RsyncAction,
+ "dput": DputAction,
+ "notify": NotifyAction,
}
- kind = spec['action']
+ kind = spec["action"]
klass = rules2.get(kind)
if klass:
return klass(env)
- raise UnknownStepError('Unknown action %r' % spec)
+ raise UnknownStepError("Unknown action %r" % spec)
class Action: # pragma: no cover
-
def __init__(self, env):
self._env = env
self._cc = None
@@ -181,20 +180,20 @@ class Action: # pragma: no cover
def encode64(self, params):
assert isinstance(params, dict)
as_text = json.dumps(params)
- as_bytes = as_text.encode('UTF-8')
+ as_bytes = as_text.encode("UTF-8")
as_base64 = base64.b64encode(as_bytes)
- return as_base64.decode('UTF-8')
+ return as_base64.decode("UTF-8")
def decode64(self, encoded):
- as_base64 = encoded.encode('UTF-8')
+ as_base64 = encoded.encode("UTF-8")
as_bytes = base64.b64decode(as_base64)
- as_text = as_bytes.decode('UTF-8')
+ as_text = as_bytes.decode("UTF-8")
return json.loads(as_text)
def get_authz_headers(self):
token = self.get_token()
return {
- 'Authorization': 'Bearer {}'.format(token),
+ "Authorization": "Bearer {}".format(token),
}
def execute(self, params, step):
@@ -202,79 +201,75 @@ class Action: # pragma: no cover
class ShellAction(Action):
-
def encode_parameters(self, params): # pragma: no cover
encoded = self.encode64(params)
return 'params() { echo -n "%s" | base64 -d; }\n' % encoded
def execute(self, params, step):
prefix = self.encode_parameters(params)
- snippet = step['shell']
- argv = ['bash', '-exuc', prefix + snippet]
+ snippet = step["shell"]
+ argv = ["bash", "-exuc", prefix + snippet]
exit_code = self._env.runcmd(argv)
- self._env.report(exit_code, 'action finished\n')
+ self._env.report(exit_code, "action finished\n")
return exit_code
class PythonAction(Action):
-
def encode_parameters(self, params): # pragma: no cover
encoded = self.encode64(params)
prefix = (
- 'import base64, json, subprocess\n'
- 'params = json.loads(base64.b64decode(\n'
+ "import base64, json, subprocess\n"
+ "params = json.loads(base64.b64decode(\n"
' "{}").decode("utf8"))\n'
- 'def RUN(*args, **kwargs):\n'
+ "def RUN(*args, **kwargs):\n"
' print("Executing:", args, kwargs)\n'
' if "check" not in kwargs:\n'
' kwargs["check"] = True\n'
- ' return subprocess.run(args, **kwargs)\n'
- 'def OUT(*args, **kwargs):\n'
- ' x = RUN(*args, stdout=subprocess.PIPE, **kwargs)\n'
+ " return subprocess.run(args, **kwargs)\n"
+ "def OUT(*args, **kwargs):\n"
+ " x = RUN(*args, stdout=subprocess.PIPE, **kwargs)\n"
' return x.stdout.decode("UTF-8")\n'
- 'def ERR(*args, **kwargs):\n'
- ' x = RUN(*args, stderr=subprocess.PIPE, check=False, **kwargs)\n'
+ "def ERR(*args, **kwargs):\n"
+ " x = RUN(*args, stderr=subprocess.PIPE, check=False, **kwargs)\n"
' return x.stderr.decode("UTF-8")\n'
- 'def OUTERR(*args, **kwargs):\n'
- ' x = RUN(*args, stdout=subprocess.PIPE, \n'
- ' sterr=subproces.STDOUT, check=False, **kwargs)\n'
+ "def OUTERR(*args, **kwargs):\n"
+ " x = RUN(*args, stdout=subprocess.PIPE, \n"
+ " sterr=subproces.STDOUT, check=False, **kwargs)\n"
' return s.stdout.decode("UTF-8"), x.stderr.decode("UTF-8")\n'
-
).format(encoded)
return prefix
def execute(self, params, step):
prefix = self.encode_parameters(params)
- snippet = step['python']
- argv = ['python3', '-c', prefix + '\n' + snippet]
+ snippet = step["python"]
+ argv = ["python3", "-c", prefix + "\n" + snippet]
exit_code = self._env.runcmd(argv)
- self._env.report(exit_code, 'action finished\n')
+ self._env.report(exit_code, "action finished\n")
return exit_code
class DebootstrapAction(Action):
- default_mirror = 'http://deb.debian.org/debian'
+ default_mirror = "http://deb.debian.org/debian"
def encode_parameters(self, params): # pragma: no cover
pass
def execute(self, params, step):
- suite = step.get('debootstrap')
- if suite is None or suite == 'auto':
- suite = params['debian_codename']
- mirror = step.get('mirror', self.default_mirror)
+ suite = step.get("debootstrap")
+ if suite is None or suite == "auto":
+ suite = params["debian_codename"]
+ mirror = step.get("mirror", self.default_mirror)
env = self.get_env()
workspace = env.get_workspace_directory()
- argv = ['sudo', 'debootstrap', suite, '.', mirror]
+ argv = ["sudo", "debootstrap", suite, ".", mirror]
exit_code = self._env.host_runcmd(argv, cwd=workspace)
- self._env.report(exit_code, 'action finished\n')
+ self._env.report(exit_code, "action finished\n")
return exit_code
class CreateWorkspaceAction(Action):
-
def encode_parameters(self, params): # pragma: no cover
pass
@@ -282,12 +277,11 @@ class CreateWorkspaceAction(Action):
env = self.get_env()
dirname = env.get_workspace_directory()
make_directory_empty(env, dirname)
- self._env.report(0, 'Created or emptied workspace %s\n' % dirname)
+ self._env.report(0, "Created or emptied workspace %s\n" % dirname)
return 0
class ArchiveBaseAction(Action): # pragma: no cover
-
def get_dirname(self, env):
raise NotImplementedError()
@@ -298,42 +292,42 @@ class ArchiveBaseAction(Action): # pragma: no cover
env = self.get_env()
dirname = self.get_dirname(env)
- name_from = step.get('name_from', 'artifact_name')
+ name_from = step.get("name_from", "artifact_name")
blob_name = params.get(name_from)
if not blob_name:
- env.report(1, 'No artifact_name parameter\n')
+ env.report(1, "No artifact_name parameter\n")
return 1
- env.report(None, 'Creating new artifact named {}\n'.format(blob_name))
- env.report(None, 'Artifact will be created from {}\n'.format(dirname))
+ env.report(None, "Creating new artifact named {}\n".format(blob_name))
+ env.report(None, "Artifact will be created from {}\n".format(dirname))
- globs = step.get('globs')
+ globs = step.get("globs")
if globs is None:
- names = ['.']
+ names = ["."]
else:
names = self.match_globs(dirname, globs)
url = self.get_blob_upload_url(blob_name)
headers = self.get_authz_headers()
- self._env.report(None, 'Creating tarball\n')
+ self._env.report(None, "Creating tarball\n")
fd, tarball = tempfile.mkstemp()
os.close(fd)
- tar = ['sudo', 'tar', '-zvcf', tarball, '-C', dirname] + names
+ tar = ["sudo", "tar", "-zvcf", tarball, "-C", dirname] + names
exit_code = self._env.host_runcmd(tar)
if exit_code != 0:
- self._env.report(exit_code, 'Tarball generation failed\n')
+ self._env.report(exit_code, "Tarball generation failed\n")
os.remove(tarball)
return exit_code
- self._env.report(None, 'Tarball generation finished OK\n')
-
- self._env.report(None, 'Uploading tarball to artifact store\n')
- curl = ['curl', '-sk', '-T', tarball] + [
- '-H{}:{}'.format(name, value)
- for name, value in headers.items()
- ] + [url]
+ self._env.report(None, "Tarball generation finished OK\n")
+
+ self._env.report(None, "Uploading tarball to artifact store\n")
+ curl = (
+ ["curl", "-sk", "-T", tarball]
+ + ["-H{}:{}".format(name, value) for name, value in headers.items()]
+ + [url]
+ )
exit_code = self._env.host_runcmd(curl)
- self._env.report(
- exit_code, 'curl upload finished (exit code %s)\n' % exit_code)
+ self._env.report(exit_code, "curl upload finished (exit code %s)\n" % exit_code)
os.remove(tarball)
return exit_code
@@ -341,20 +335,18 @@ class ArchiveBaseAction(Action): # pragma: no cover
def match_globs(self, workspace, globs):
names = []
for pat in globs:
- abspat = os.path.join(workspace, './' + pat)
+ abspat = os.path.join(workspace, "./" + pat)
for name in glob.glob(abspat):
names.append(os.path.normpath(name))
return names
class ArchiveSystreeAction(ArchiveBaseAction): # pragma: no cover
-
def get_dirname(self, env):
return env.get_systree_directory()
class ArchiveWorkspaceAction(ArchiveBaseAction): # pragma: no cover
-
def get_dirname(self, env):
return env.get_workspace_directory()
@@ -371,24 +363,26 @@ class PopulateActionBase(Action): # pragma: no cover
env = self.get_env()
name = step.get(self.step_field)
- if not name or name == 'auto':
- name_name = step.get('name_from', self.param_name)
+ if not name or name == "auto":
+ name_name = step.get("name_from", self.param_name)
name = params.get(name_name)
if not name:
- msg = '{} in action is {}, but no {} parameter\n'.format(
- self.step_field, name, name_name)
+ msg = "{} in action is {}, but no {} parameter\n".format(
+ self.step_field, name, name_name
+ )
env.report(1, msg)
return 1
- env.report(None, 'Using {} for artifact name\n'.format(name))
+ env.report(None, "Using {} for artifact name\n".format(name))
dirname = self.get_unpack_directory(env)
make_directory_empty(env, dirname)
exit_code = self.download_and_unpack_artifact(name, dirname)
new_code = self.mangle_exit_code(exit_code)
env.report(
- new_code, '{} finished (exit_code {} -> {})\n'.format(
- str(self), exit_code, new_code))
+ new_code,
+ "{} finished (exit_code {} -> {})\n".format(str(self), exit_code, new_code),
+ )
return new_code
def get_unpack_directory(self, env):
@@ -400,12 +394,13 @@ class PopulateActionBase(Action): # pragma: no cover
def download_and_unpack_artifact(self, name, dirname):
url = self.get_blob_upload_url(name)
headers = self.get_authz_headers()
- curl = ['curl', '-sk'] + [
- '-H{}:{}'.format(name, value)
- for name, value in headers.items()
- ] + [url]
+ curl = (
+ ["curl", "-sk"]
+ + ["-H{}:{}".format(name, value) for name, value in headers.items()]
+ + [url]
+ )
- untar = ['sudo', 'tar', '-zxf', '-', '-C', dirname]
+ untar = ["sudo", "tar", "-zxf", "-", "-C", dirname]
env = self.get_env()
return env.host_runcmd(curl, untar)
@@ -413,11 +408,11 @@ class PopulateActionBase(Action): # pragma: no cover
class PopulateSystreeAction(PopulateActionBase): # pragma: no cover
- step_field = 'systree_name'
- param_name = 'systree_name'
+ step_field = "systree_name"
+ param_name = "systree_name"
def __str__(self):
- return 'populate-systree'
+ return "populate-systree"
def get_unpack_directory(self, env):
return env.get_systree_directory()
@@ -428,11 +423,11 @@ class PopulateSystreeAction(PopulateActionBase): # pragma: no cover
class PopulateWorkspaceAction(PopulateActionBase): # pragma: no cover
- step_field = 'workspace_name'
- param_name = 'workspace_name'
+ step_field = "workspace_name"
+ param_name = "workspace_name"
def __str__(self):
- return 'populate-workspace'
+ return "populate-workspace"
def get_unpack_directory(self, env):
return env.get_workspace_directory()
@@ -443,7 +438,6 @@ class PopulateWorkspaceAction(PopulateActionBase): # pragma: no cover
class GitAction(Action): # pragma: no cover
-
def encode_parameters(self, params):
pass
@@ -451,44 +445,43 @@ class GitAction(Action): # pragma: no cover
env = self.get_env()
workspace = env.get_workspace_directory()
- git_dir = params.get('git_dir')
+ git_dir = params.get("git_dir")
if git_dir is None:
- env.report(1, 'git_dir not provided\n')
- if git_dir.startswith('/') or '..' in git_dir:
- env.report(1, 'git_dir not acceptable\n')
+ env.report(1, "git_dir not provided\n")
+ if git_dir.startswith("/") or ".." in git_dir:
+ env.report(1, "git_dir not acceptable\n")
- git_url = params.get('git_url')
+ git_url = params.get("git_url")
if git_url is None:
- env.report(1, 'git_url not provided\n')
+ env.report(1, "git_url not provided\n")
pathname = os.path.join(workspace, git_dir)
if os.path.exists(pathname):
- argv = ['git', 'remote', '-v', 'update', '--prune']
+ argv = ["git", "remote", "-v", "update", "--prune"]
cwd = pathname
else:
- argv = ['git', 'clone', '-v', git_url, git_dir]
+ argv = ["git", "clone", "-v", git_url, git_dir]
cwd = workspace
exit_code = env.host_runcmd(argv, cwd=cwd)
- env.report(exit_code, 'git finished (exit code %d)\n' % exit_code)
+ env.report(exit_code, "git finished (exit code %d)\n" % exit_code)
return exit_code
class GitMirrorAction(Action): # pragma: no cover
-
def encode_parameters(self, params):
pass
def execute(self, params, step):
env = self.get_env()
workspace = env.get_workspace_directory()
- mirrors = os.path.join(workspace, '.mirrors')
+ mirrors = os.path.join(workspace, ".mirrors")
- if step.get('where') != 'host':
+ if step.get("where") != "host":
env.report(1, '"where" must be "host"\n')
return 1
- sources = params.get('sources')
+ sources = params.get("sources")
if sources is None:
env.report(1, '"sources" parameter not provided\n')
return 1
@@ -496,17 +489,15 @@ class GitMirrorAction(Action): # pragma: no cover
try:
exit_code = self.git_mirror(env, sources, mirrors)
except Exception as e:
- env.report(1, 'Caught exception: {}\n'.format(e))
+ env.report(1, "Caught exception: {}\n".format(e))
return 1
- env.report(
- exit_code,
- 'git mirror action finished (exit code %d)\n' % exit_code)
+ env.report(exit_code, "git mirror action finished (exit code %d)\n" % exit_code)
return exit_code
def git_mirror(self, env, sources, mirrors):
if not os.path.exists(mirrors):
- env.report(None, 'mkdir {}\n'.format(mirrors))
+ env.report(None, "mkdir {}\n".format(mirrors))
os.mkdir(mirrors)
checked = self.check_sources(sources)
@@ -519,8 +510,8 @@ class GitMirrorAction(Action): # pragma: no cover
def check_sources(self, sources):
checked = []
for source in sources:
- name = source.get('name')
- repo = source.get('repo')
+ name = source.get("name")
+ repo = source.get("repo")
if name is None:
raise Exception('source lacks "name" field: {}'.format(source))
if repo is None:
@@ -529,27 +520,26 @@ class GitMirrorAction(Action): # pragma: no cover
return checked
def mirror(self, env, mirrors, name, url):
- env.report(None, 'git_mirror: mirrors: {}\n'.format(mirrors))
- env.report(None, 'git_mirror: name: {}\n'.format(name))
- env.report(None, 'git_mirror: url: {}\n'.format(url))
+ env.report(None, "git_mirror: mirrors: {}\n".format(mirrors))
+ env.report(None, "git_mirror: name: {}\n".format(name))
+ env.report(None, "git_mirror: url: {}\n".format(url))
dirname = os.path.join(mirrors, name)
- env.report(None, 'git_mirror: dirname: {}\n'.format(dirname))
+ env.report(None, "git_mirror: dirname: {}\n".format(dirname))
if os.path.exists(dirname):
- argv = ['git', 'remote', 'update', '--prune']
+ argv = ["git", "remote", "update", "--prune"]
cwd = dirname
else:
- argv = ['git', 'clone', '--mirror', url, name]
+ argv = ["git", "clone", "--mirror", url, name]
cwd = mirrors
os.mkdir(dirname)
- env.report(None, 'Running: {} in {}\n'.format(argv, cwd))
+ env.report(None, "Running: {} in {}\n".format(argv, cwd))
return env.host_runcmd(argv, cwd=cwd)
class RsyncAction(Action): # pragma: no cover
-
def encode_parameters(self, params):
pass
@@ -557,40 +547,41 @@ class RsyncAction(Action): # pragma: no cover
env = self.get_env()
workspace = env.get_workspace_directory()
- rsync_src = params.get('rsync_src')
+ rsync_src = params.get("rsync_src")
if rsync_src is None:
- env.report(1, 'rsync_src not provided\n')
+ env.report(1, "rsync_src not provided\n")
if not self._is_relative(rsync_src):
- env.report(1, 'rsync_src not acceptable\n')
+ env.report(1, "rsync_src not acceptable\n")
- rsync_target = params.get('rsync_target')
+ rsync_target = params.get("rsync_target")
if rsync_target is None:
- env.report(1, 'git_url not provided\n')
+ env.report(1, "git_url not provided\n")
if not self._remote(rsync_target):
- env.report(1, 'rsync_target not acceptable\n')
+ env.report(1, "rsync_target not acceptable\n")
argv = [
- 'rsync', '-av', '--delete-after',
- './{}/.'.format(rsync_src),
- '{}/.'.format(rsync_target),
+ "rsync",
+ "-av",
+ "--delete-after",
+ "./{}/.".format(rsync_src),
+ "{}/.".format(rsync_target),
]
exit_code = env.host_runcmd(argv, cwd=workspace)
- env.report(exit_code, 'rsync finished (exit code %d)\n' % exit_code)
+ env.report(exit_code, "rsync finished (exit code %d)\n" % exit_code)
return exit_code
def _is_relative(self, src):
- if src.startswith('/'):
+ if src.startswith("/"):
return False
- if '../' in src:
+ if "../" in src:
return False
return True
def _remote(self, target):
- return ':' in target
+ return ":" in target
class DputAction(Action): # pragma: no cover
-
def encode_parameters(self, params):
pass
@@ -600,12 +591,12 @@ class DputAction(Action): # pragma: no cover
apt_server = self._cc.get_apt_server()
config = self.get_dput_config(apt_server)
- logging.debug('dput config:\n%s', config)
+ logging.debug("dput config:\n%s", config)
filename = self.create_dput_config_file(config)
- argv = ['sh', '-c', 'dput -c {} ick *.changes'.format(filename)]
+ argv = ["sh", "-c", "dput -c {} ick *.changes".format(filename)]
exit_code = env.host_runcmd(argv, cwd=workspace)
- env.report(exit_code, 'dput finished (exit code %d)\n' % exit_code)
+ env.report(exit_code, "dput finished (exit code %d)\n" % exit_code)
os.remove(config)
return exit_code
@@ -617,7 +608,7 @@ class DputAction(Action): # pragma: no cover
return filename
def get_dput_config(self, apt_server):
- template = '''\
+ template = """\
[ick]
login = incoming
fqdn = {apt_server}
@@ -626,12 +617,11 @@ incoming = /srv/apt/incoming
allow_unsigned_uploads = 1
check_version = 0
run_dinstall = 0
-'''
+"""
return template.format(apt_server=apt_server)
class NotifyAction(Action): # pragma: no cover
-
def encode_parameters(self, params):
pass
@@ -641,45 +631,43 @@ class NotifyAction(Action): # pragma: no cover
assert cc is not None
build_id = self.get_build_id()
- env.report(None, 'Notifying about build ending\n')
+ env.report(None, "Notifying about build ending\n")
- build_path = '/builds/{}'.format(build_id)
+ build_path = "/builds/{}".format(build_id)
build = cc.show(build_path)
- params = build.get('parameters', {})
- if 'notify' not in params:
- env.report(
- 0,
- 'NOT notifying about build ending: no "notify" parameter.\n')
+ params = build.get("parameters", {})
+ if "notify" not in params:
+ env.report(0, 'NOT notifying about build ending: no "notify" parameter.\n')
return
- recipients = params['notify']
+ recipients = params["notify"]
log = cc.get_log(build_id)
- log = log.decode('utf-8')
+ log = log.decode("utf-8")
+ log = "\n".join(log.splitlines()[-1000:])
notify = {
- 'recipients': recipients,
- 'build': self.mangle_build(build),
- 'log': log,
+ "recipients": recipients,
+ "build": self.mangle_build(build),
+ "log": log,
}
cc.notify(notify)
- env.report(0, 'Notified about build {} ending\n'.format(build_id))
+ env.report(0, "Notified about build {} ending\n".format(build_id))
def mangle_build(self, build):
b = copy.deepcopy(build)
- exit_code = build.get('exit_code')
+ exit_code = build.get("exit_code")
if exit_code is None:
- b['status'] = 'BUILDING'
+ b["status"] = "BUILDING"
elif exit_code == 0:
- b['status'] = 'SUCCESS'
+ b["status"] = "SUCCESS"
else:
- b['status'] = 'FAILED'
+ b["status"] = "FAILED"
return b
def make_directory_empty(env, dirname):
- return env.runcmd(
- ['sudo', 'find', dirname, '-mindepth', '1', '-delete'])
+ return env.runcmd(["sudo", "find", dirname, "-mindepth", "1", "-delete"])
diff --git a/ick2/buildsapi.py b/ick2/buildsapi.py
index 8862c63..2efc3d1 100644
--- a/ick2/buildsapi.py
+++ b/ick2/buildsapi.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Lars Wirzenius
+# Copyright (C) 2017-2019 Lars Wirzenius
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
@@ -60,7 +60,7 @@ class BuildsAPI(ick2.ResourceApiBase): # pragma: no cover
raise ick2.MethodNotAllowed('Updating builds directly is not allowed')
def list(self, **kwargs):
- result = super().list()
+ result = super().list(**kwargs)
items = result[self._type_name]
items.sort(key=lambda x: x.get('build_number'))
result[self._type_name] = items
diff --git a/ick2/buildsm.py b/ick2/buildsm.py
index 9177e09..8d71fde 100644
--- a/ick2/buildsm.py
+++ b/ick2/buildsm.py
@@ -17,15 +17,14 @@
import ick2
-BUILD_TRIGGERED = 'triggered'
-BUILD_BUILDING = 'building'
-BUILD_NOTIFYING = 'notifying'
-BUILD_DONE = 'done'
-BUILD_FAILED = 'failed'
+BUILD_TRIGGERED = "triggered"
+BUILD_BUILDING = "building"
+BUILD_NOTIFYING = "notifying"
+BUILD_DONE = "done"
+BUILD_FAILED = "failed"
class StateMachine:
-
def __init__(self, get_state, set_state):
self.transitions = {}
self.get_state = get_state
@@ -48,7 +47,6 @@ class StateMachine:
class BuildStateMachine:
-
def __init__(self, build):
self.build = build
self.sm = self.init_sm()
@@ -80,10 +78,10 @@ class BuildStateMachine:
return sm
def get_state(self):
- return self.build.resource['status']
+ return self.build.resource["status"]
def set_state(self, state):
- self.build.resource['status'] = state
+ self.build.resource["status"] = state
def handle_event(self, event):
old_state = self.get_state()
@@ -101,7 +99,7 @@ class BuildStateMachine:
graph = self.build.get_graph()
action_ids = graph.find_actions(ick2.ACTION_READY)
if not action_ids: # pragma: no cover
- self.build.resource['exit_code'] = 0
+ self.build.resource["exit_code"] = 0
return BUILD_DONE, None
action_id = action_ids[0]
@@ -113,7 +111,7 @@ class BuildStateMachine:
graph = self.build.get_graph()
action_ids = graph.find_actions(ick2.ACTION_READY)
if not action_ids: # pragma: no cover
- self.build.resource['exit_code'] = 0
+ self.build.resource["exit_code"] = 0
return BUILD_DONE, None
action_id = action_ids[0]
@@ -122,34 +120,32 @@ class BuildStateMachine:
return BUILD_NOTIFYING, (action_id, action)
def mark_action_done(self, event):
- self.build.resource['exit_code'] = event.exit_code
+ self.build.resource["exit_code"] = event.exit_code
graph = self.build.get_graph()
graph.set_action_status(event.action_id, ick2.ACTION_DONE)
graph.unblock()
if graph.has_more_to_do():
return BUILD_BUILDING, None
-
- self.add_notification_action()
- return BUILD_NOTIFYING, None
+ return BUILD_DONE, None
def add_notification_action(self):
action = {
- 'action': 'notify',
+ "action": "notify",
}
graph = self.build.get_graph()
graph.append_action(action, ick2.ACTION_READY, depends=[])
def mark_notification_done(self, event):
if event.exit_code not in (0, None): # pragma: no cover
- self.build.resource['exit_code'] = event.exit_code
+ self.build.resource["exit_code"] = event.exit_code
graph = self.build.get_graph()
graph.set_action_status(event.action_id, ick2.ACTION_DONE)
graph.unblock()
if graph.has_more_to_do(): # pragma: no cover
return BUILD_NOTIFYING, None
- if self.build.resource.get('exit_code') in (0, None):
- self.build.resource['exit_code'] = 0
+ if self.build.resource.get("exit_code") in (0, None):
+ self.build.resource["exit_code"] = 0
return BUILD_DONE, None
return BUILD_FAILED, None
@@ -157,9 +153,9 @@ class BuildStateMachine:
def mark_build_failed(self, event):
graph = self.build.get_graph()
graph.set_action_status(event.action_id, ick2.BUILD_FAILED)
- self.build.resource['exit_code'] = event.exit_code
+ self.build.resource["exit_code"] = event.exit_code
self.add_notification_action()
- return BUILD_NOTIFYING, None
+ return BUILD_FAILED, None
# Thing should be something we can create a BuildEvent from.
@@ -171,8 +167,8 @@ def create_build_event(thing):
return NeedWorkEvent()
if isinstance(thing, dict):
- exit_code = thing.get('exit_code')
- action_id = thing.get('action_id')
+ exit_code = thing.get("exit_code")
+ action_id = thing.get("action_id")
if exit_code is None:
return PartialActionOutputEvent()
if exit_code == 0:
@@ -181,31 +177,26 @@ def create_build_event(thing):
class BuildEvent: # pragma: no cover
-
- event_type = 'BuildEvent'
+ event_type = "BuildEvent"
def __str__(self):
return self.event_type
class BuildStartsEvent(BuildEvent):
-
- event_type = 'build-starts'
+ event_type = "build-starts"
class NeedWorkEvent(BuildEvent):
-
- event_type = 'need-work'
+ event_type = "need-work"
class PartialActionOutputEvent(BuildEvent):
-
- event_type = 'partial-output'
+ event_type = "partial-output"
class ActionFinishedEvent(BuildEvent):
-
- event_type = 'action-finished'
+ event_type = "action-finished"
def __init__(self, action_id):
self.action_id = action_id
@@ -213,8 +204,7 @@ class ActionFinishedEvent(BuildEvent):
class ActionFailedEvent(BuildEvent):
-
- event_type = 'action-failed'
+ event_type = "action-failed"
def __init__(self, action_id, exit_code):
self.action_id = action_id
@@ -222,6 +212,5 @@ class ActionFailedEvent(BuildEvent):
class UnexpectedEvent(Exception): # pragma: no cover
-
def __init__(self, event, state):
- super().__init__('Did not expect %s in %s' % (event, state))
+ super().__init__("Did not expect %s in %s" % (event, state))
diff --git a/ick2/notificationapi.py b/ick2/notificationapi.py
index ab261bd..877b929 100644
--- a/ick2/notificationapi.py
+++ b/ick2/notificationapi.py
@@ -17,29 +17,32 @@
import ick2
-class NotificationAPI:
+N = 1000 # Max number of line to include in log
+
+class NotificationAPI:
def __init__(self, config):
self._config = config
def find_missing_route(self, missing_path):
return [
{
- 'method': 'POST',
- 'path': '/notify',
- 'callback': self.notify,
+ "method": "POST",
+ "path": "/notify",
+ "callback": self.notify,
},
]
def notify(self, content_type, body, **kwargs):
- ick2.log.log('info', msg_text='Notification requested', kwargs=kwargs)
+ ick2.log.log("info", msg_text="Notification requested", kwargs=kwargs)
- recipients = body.get('recipients', [])
- build = body.get('build', {})
- log = body.get('log', '')
+ recipients = body.get("recipients", [])
+ build = body.get("build", {})
+ log = body.get("log", "")
+ log = "".join(f"{line}\n" for line in log.splitlines()[-N:])
sendmail = ick2.Sendmail()
sendmail.set_config(self._config)
sendmail.send(recipients, build, log)
- return ick2.OK('')
+ return ick2.OK("")
diff --git a/ick2/persistent.py b/ick2/persistent.py
index c5e2840..1d79e3d 100644
--- a/ick2/persistent.py
+++ b/ick2/persistent.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Lars Wirzenius
+# Copyright (C) 2018-2019 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -27,9 +27,6 @@ import ick2
class PersistentStateInterface: # pragma: no cover
- def get_resource_kinds(self):
- raise NotImplementedError()
-
def get_resource_ids(self, kind):
raise NotImplementedError()
@@ -79,9 +76,6 @@ class FilePersistentState(PersistentStateInterface):
dirname = self._dirname(kind)
return os.path.join(dirname, self._safe(rid))
- def get_resource_kinds(self):
- return self._unsafe_list(os.listdir(self._dir))
-
def has_resource(self, kind, rid):
filename = self._filename(kind, rid)
return os.path.exists(filename)
diff --git a/ick2/persistent_tests.py b/ick2/persistent_tests.py
index 8acb141..de279a1 100644
--- a/ick2/persistent_tests.py
+++ b/ick2/persistent_tests.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Lars Wirzenius
+# Copyright (C) 2018-2019 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -35,9 +35,6 @@ class FilePersistentStateTests(unittest.TestCase):
def test_returns_dirname(self):
self.assertEqual(self.state.get_directory(), self.tempdir)
- def test_has_no_resource_kinds_initially(self):
- self.assertEqual(self.state.get_resource_kinds(), [])
-
def test_has_no_resources_initially(self):
self.assertEqual(self.state.get_resource_ids('silly'), [])
@@ -50,7 +47,6 @@ class FilePersistentStateTests(unittest.TestCase):
r = ick2.resource_from_dict(as_dict)
self.state.write_resource('silly', '#1', r)
self.assertTrue(self.state.has_resource('silly', '#1'))
- self.assertEqual(self.state.get_resource_kinds(), ['silly'])
self.assertEqual(self.state.get_resource_ids('silly'), ['#1'])
r2 = self.state.get_resource('silly', '#1')
diff --git a/ick2/trans.py b/ick2/trans.py
index f1c8e5e..c5dc22f 100644
--- a/ick2/trans.py
+++ b/ick2/trans.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2018 Lars Wirzenius
+# Copyright (C) 2018-2019 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -59,9 +59,6 @@ class TransactionalState:
raise ick2.NotFound(kind=kind, rid=rid)
return TransactionalResource(self.state, kind, rid)
- def get_resource_kinds(self):
- return self.state.get_resource_kinds()
-
def get_resource_ids(self, kind):
return self.state.get_resource_ids(kind)
diff --git a/ick2/version.py b/ick2/version.py
index 4c6e147..27b4c9b 100644
--- a/ick2/version.py
+++ b/ick2/version.py
@@ -1,2 +1,2 @@
-__version__ = "0.53.2+git"
-__version_info__ = (0, 53, 2, '+git')
+__version__ = "0.55+git"
+__version_info__ = (0, 55, '+git')
diff --git a/notification_service.py b/notification_service.py
index bd6bad1..f8e5c55 100644
--- a/notification_service.py
+++ b/notification_service.py
@@ -1,5 +1,5 @@
#!/usr/bin/python3
-# Copyright (C) 2018 Lars Wirzenius
+# Copyright (C) 2018-2019 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -123,4 +123,4 @@ app = main()
if __name__ == '__main__':
print('running in debug mode')
- app.run(host='127.0.0.1', port=12767)
+ app.run(host='127.0.0.1', port=6666)
diff --git a/pipelines/systrees.ick b/pipelines/systrees.ick
index b6da798..bd52b1c 100644
--- a/pipelines/systrees.ick
+++ b/pipelines/systrees.ick
@@ -18,6 +18,13 @@ pipelines:
- packages
- artifact_name
actions:
+ - shell: |
+ lsb_release -a
+ dpkg -l debootstrap
+ sudo chown root:root .
+ ls -la
+ where: host
+
- debootstrap: auto
mirror: http://deb.debian.org/debian
where: host
diff --git a/setup.py b/setup.py
index 39a7977..fc4d70b 100644
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,5 @@
#!/usr/bin/python3
-# Copyright (C) 2017-2018 Lars Wirzenius <liw@liw.fi>
+# Copyright (C) 2017-2019 Lars Wirzenius <liw@liw.fi>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -53,7 +53,6 @@ setup(
],
packages=['ick2'],
scripts=[
- 'create-token',
'start_ick',
'start_artifact_store',
'start_notification_service',
diff --git a/start_ick b/start_ick
index 8c9d50f..9939e01 100755
--- a/start_ick
+++ b/start_ick
@@ -1,5 +1,5 @@
#!/bin/sh
-# Copyright (C) 2017-2018 Lars Wirzenius
+# Copyright (C) 2017-2019 Lars Wirzenius
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -26,7 +26,7 @@ else
fi
gunicorn3 \
- --bind 127.0.0.1:12765 \
+ --bind 127.0.0.1:3333 \
--log-file "$GUNICORN_LOG" \
--log-level debug \
ick_controller:app
diff --git a/yarns/000.yarn b/yarns/000.yarn
index d5db1a1..12bdea2 100644
--- a/yarns/000.yarn
+++ b/yarns/000.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017 Lars Wirzenius
+Copyright 2017,2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -34,30 +34,25 @@ system. Written for execution by [yarn][].
## Running this test suite
-This test suite tests an Ick2 controller, and can either run in a
-local or remote mode. In local mode, each test scenario starts and
-stops a local instance, and runs tests against that. In remote mode,
-an existing, running controller instance is assumed, and tests are run
-against that.
+This test suite tests a deployed Ick2 controller and other components of Ick,
+but not the workers. The deployed Ick2 must not have any workers, and
+must be "empty", meaning, no project, pipelines, etc, must be defined.
+The test suit deletes everything.
-The `./check` script runs the tests. By default it runs in local mode.
-Local mode can be specified explicitly with the `local` parameter:
+The `./check` script runs the tests. It can run only local tests,
+which are mainly unit tests and code health.
EXAMPLE running the test suite in local mode
./check
- ./check local
- ./check local -v --tempdir tmp --snapshot
`./check` can be given extra arguments, which it will pass on to
-`yarn`.
-
-To run the tests in remote mode, give the controller URL:
+`yarn` to test a remote Ick instance, which may not have workers. The
+first argument is the controller URL:
EXAMPLE running the test suite in local mode
- ./check https://ick-controller --env ICK_PRIVATE_KEY=~/tmp/ick.key
- ./check https://ick-controller --env ICK_PRIVATE_KEY=~/tmp/ick.key \
- -v --tempdir tmp --snapshot
+ ./check https://ick-controller
+ ./check https://ick-controller -v --tempdir tmp --snapshot
-The URL **must** be an `https` URL. Additionally, the environment
-variable `ICK_PRIVATE_KEY` must be given a path to the *private* key
-for signing tokens, so that a new token can be generated.
+The URL **must** be an `https` URL. `qvisqvetool` must be configured
+to suppot the given Ick instance, so that test clients for API use can
+be managed by yarn automatically.
diff --git a/yarns/100-projects.yarn b/yarns/100-projects.yarn
index b75dea4..4c5291f 100644
--- a/yarns/100-projects.yarn
+++ b/yarns/100-projects.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017-2018 Lars Wirzenius
+Copyright 2017-2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -46,18 +46,13 @@ First we test the controller API for managing projects, without
building them. We start by starting an instance of the controller.
SCENARIO managing projects
- GIVEN an RSA key pair for token signing
- AND an access token for user with scopes
+ GIVEN an access token for user with scopes
... uapi_pipelines_post
... uapi_projects_get
... uapi_projects_post
... uapi_projects_id_get
... uapi_projects_id_put
... uapi_projects_id_delete
- AND controller config uses statedir at the state directory
- AND controller config uses https://blobs.example.com as artifact store
- AND controller config uses https://auth.example.com as authentication
- AND controller config uses https://notify.example.com as notify
AND a running ick controller
WHEN user makes request GET /projects
diff --git a/yarns/150-pipelines.yarn b/yarns/150-pipelines.yarn
index d828935..6a303eb 100644
--- a/yarns/150-pipelines.yarn
+++ b/yarns/150-pipelines.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017-2018 Lars Wirzenius
+Copyright 2017-2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -58,17 +58,12 @@ First we test the controller API for managing pipelines, without
running them. We start by starting an instance of the controller.
SCENARIO managing pipelines
- GIVEN an RSA key pair for token signing
- AND an access token for user with scopes
+ GIVEN an access token for user with scopes
... uapi_pipelines_get
... uapi_pipelines_post
... uapi_pipelines_id_get
... uapi_pipelines_id_put
... uapi_pipelines_id_delete
- AND controller config uses statedir at the state directory
- AND controller config uses https://blobs.example.com as artifact store
- AND controller config uses https://auth.example.com as authentication
- AND controller config uses https://notify.example.com as notify
AND a running ick controller
WHEN user makes request GET /pipelines
diff --git a/yarns/200-version.yarn b/yarns/200-version.yarn
index 710a57a..7fceef2 100644
--- a/yarns/200-version.yarn
+++ b/yarns/200-version.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017-2018 Lars Wirzenius
+Copyright 2017-2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -22,21 +22,13 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
The Ick controller reports is version upon request.
SCENARIO checking controller version
- GIVEN an RSA key pair for token signing
- AND an access token for user with scopes
+ GIVEN an access token for user with scopes
... uapi_version_get
- AND controller config uses statedir at the state directory
- AND controller config uses https://blobs.example.com as artifact store
- AND controller config uses https://auth.example.com as authentication
- AND controller config uses https://notify.example.com as notify
AND a running ick controller
WHEN user makes request GET /version
THEN result has status code 200
AND version in body matches version from setup.py
- AND artifact store URL is https://blobs.example.com
- AND authentication URL is https://auth.example.com
- AND notify URL is https://notify.example.com
FINALLY stop ick controller
diff --git a/yarns/300-workers.yarn b/yarns/300-workers.yarn
index 6399b20..cea6c81 100644
--- a/yarns/300-workers.yarn
+++ b/yarns/300-workers.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017-2018 Lars Wirzenius
+Copyright 2017-2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -52,18 +52,13 @@ Note that this only tests managing information about workers via the
controller API. It doesn't actually talk to the worker itself.
SCENARIO managing workers
- GIVEN an RSA key pair for token signing
- AND an access token for user with scopes
+ GIVEN an access token for user with scopes
... uapi_workers_get
... uapi_workers_id_get
... uapi_workers_id_put
... uapi_workers_id_delete
AND an access token for obelix with scopes
... uapi_workers_post
- AND controller config uses statedir at the state directory
- AND controller config uses https://blobs.example.com as artifact store
- AND controller config uses https://auth.example.com as authentication
- AND controller config uses https://notify.example.com as notify
AND a running ick controller
WHEN user makes request GET /workers
@@ -72,7 +67,6 @@ controller API. It doesn't actually talk to the worker itself.
WHEN obelix makes request POST /workers with a valid token and body
... {
- ... "worker": "obelix",
... "protocol": "ssh",
... "address": "obelix.ick.example",
... "user": "ick",
@@ -81,9 +75,10 @@ controller API. It doesn't actually talk to the worker itself.
... }
... }
THEN result has status code 201
+ AND worker id is OBELIX
AND body matches
... {
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "protocol": "ssh",
... "address": "obelix.ick.example",
... "user": "ick",
@@ -99,7 +94,7 @@ controller API. It doesn't actually talk to the worker itself.
... {
... "workers": [
... {
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "protocol": "ssh",
... "address": "obelix.ick.example",
... "user": "ick",
@@ -112,11 +107,11 @@ controller API. It doesn't actually talk to the worker itself.
WHEN user stops ick controller
GIVEN a running ick controller
- WHEN user makes request GET /workers/obelix
+ WHEN user makes request GET /workers/${OBELIX}
THEN result has status code 200
AND body matches
... {
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "protocol": "ssh",
... "address": "obelix.ick.example",
... "user": "ick",
@@ -125,10 +120,10 @@ controller API. It doesn't actually talk to the worker itself.
... }
... }
- WHEN user makes request PUT /workers/obelix with a valid token
+ WHEN user makes request PUT /workers/${OBELIX} with a valid token
... and body
... {
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "protocol": "local",
... "keywords": {
... "debian_codename": "unstable"
@@ -137,7 +132,7 @@ controller API. It doesn't actually talk to the worker itself.
THEN result has status code 200
AND body matches
... {
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "protocol": "local",
... "keywords": {
... "debian_codename": "unstable"
@@ -145,20 +140,20 @@ controller API. It doesn't actually talk to the worker itself.
... }
AND controller state directory contains worker obelix
- WHEN user makes request GET /workers/obelix
+ WHEN user makes request GET /workers/${OBELIX}
THEN result has status code 200
AND body matches
... {
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "protocol": "local",
... "keywords": {
... "debian_codename": "unstable"
... }
... }
- WHEN user makes request DELETE /workers/obelix
+ WHEN user makes request DELETE /workers/${OBELIX}
THEN result has status code 200
- WHEN user makes request GET /workers/obelix
+ WHEN user makes request GET /workers/${OBELIX}
THEN result has status code 404
FINALLY stop ick controller
diff --git a/yarns/400-build.yarn b/yarns/400-build.yarn
index 5172ba0..13eefce 100644
--- a/yarns/400-build.yarn
+++ b/yarns/400-build.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017-2018 Lars Wirzenius
+Copyright 2017-2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -25,20 +25,23 @@ This scenario tests the controller API to simulate a build.
Set up the controller.
- GIVEN an RSA key pair for token signing
- AND controller config uses statedir at the state directory
- AND controller config uses https://blobs.example.com as artifact store
- AND controller config uses https://auth.example.com as authentication
- AND controller config uses https://notify.example.com as notify
- AND an access token for user with scopes
+ GIVEN an access token for user with scopes
... uapi_pipelines_post
+ ... uapi_pipelines_get
+ ... uapi_pipelines_id_delete
... uapi_projects_post
+ ... uapi_projects_get
+ ... uapi_projects_id_delete
... uapi_projects_id_status_put
... uapi_projects_id_status_get
... uapi_projects_id_builds_get
+ ... uapi_workers_id_delete
... uapi_workers_id_get
... uapi_builds_get
+ ... uapi_builds_id_delete
... uapi_builds_id_get
+ ... uapi_logs_get
+ ... uapi_logs_id_delete
... uapi_logs_id_get
AND a running ick controller
@@ -105,6 +108,7 @@ Register a worker.
... {
... }
THEN result has status code 201
+ AND worker id is OBELIX
Trigger build of project that doesn't exist.
@@ -135,7 +139,7 @@ the worker to construct a new workspace for the build.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -154,7 +158,7 @@ the worker to construct a new workspace for the build.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -168,16 +172,16 @@ the worker to construct a new workspace for the build.
User can now see pipeline is running and which worker is building it.
- WHEN user makes request GET /workers/obelix
+ WHEN user makes request GET /workers/${OBELIX}
THEN result has status code 200
AND body matches
... {
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "doing": {
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -199,7 +203,7 @@ User can now see pipeline is running and which worker is building it.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "graph": {
... "1": {
@@ -238,7 +242,7 @@ Worker reports workspace creation is done. Note the zero exit code.
... {
... "build_id": "rome/1",
... "action_id": "1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": 0,
... "stdout": "",
@@ -256,7 +260,7 @@ Worker requests more work, and gets the first actual build step.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -275,7 +279,7 @@ hasn't finished yet.
... {
... "build_id": "rome/1",
... "action_id": "2",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": null,
... "stdout": "hey ho",
@@ -294,7 +298,7 @@ didn't finish.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -319,7 +323,7 @@ Report the step is done, and successfully.
... {
... "build_id": "rome/1",
... "action_id": "2",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": 0,
... "stdout": ", hey ho\n",
@@ -344,7 +348,7 @@ The build status now shows the next step as the active one.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -382,7 +386,7 @@ Now there's another step to do.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -396,15 +400,15 @@ Now there's another step to do.
User sees changed status.
- WHEN user makes request GET /workers/obelix
+ WHEN user makes request GET /workers/${OBELIX}
THEN result has status code 200
AND body matches
... {
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "doing": {
... "build_id": "rome/1",
... "build_number": 1,
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -424,7 +428,7 @@ Report it done.
... {
... "build_id": "rome/1",
... "action_id": "3",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": 0,
... "stdout": "to the gold mine we go!\n",
@@ -441,7 +445,7 @@ Worker now gets told to notify about the build.
... {
... "build_id": "rome/1",
... "build_number": 1,
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -459,7 +463,7 @@ Report it's done.
... {
... "build_id": "rome/1",
... "action_id": "4",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": 0,
... "stdout": "",
@@ -486,7 +490,7 @@ current action.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -526,7 +530,7 @@ current action.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -575,7 +579,7 @@ Start build again. This should become build number 2.
... "build_id": "rome/2",
... "build_number": 2,
... "log": "/logs/rome/2",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -596,7 +600,7 @@ Start build again. This should become build number 2.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -630,7 +634,7 @@ Start build again. This should become build number 2.
... "build_id": "rome/2",
... "build_number": 2,
... "log": "/logs/rome/2",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -662,7 +666,7 @@ Start build again. This should become build number 2.
... {
... "build_id": "rome/2",
... "action_id": "1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": 0,
... "stdout": "",
@@ -678,7 +682,7 @@ Start build again. This should become build number 2.
... "build_id": "rome/2",
... "build_number": 2,
... "log": "/logs/rome/2",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -694,7 +698,7 @@ Start build again. This should become build number 2.
... {
... "build_id": "rome/2",
... "action_id": "2",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": 0,
... "stdout": "hey ho",
@@ -710,7 +714,7 @@ Start build again. This should become build number 2.
... {
... "build_id": "rome/2",
... "action_id": "3",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": 0,
... "stdout": "hey ho",
@@ -725,7 +729,7 @@ Start build again. This should become build number 2.
... {
... "build_id": "rome/2",
... "build_number": 2,
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -741,7 +745,7 @@ Start build again. This should become build number 2.
... {
... "build_id": "rome/2",
... "action_id": "4",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": 0,
... "stdout": "",
@@ -763,7 +767,7 @@ Start build again. This should become build number 2.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -797,7 +801,7 @@ Start build again. This should become build number 2.
... "build_id": "rome/2",
... "build_number": 2,
... "log": "/logs/rome/2",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {
... "foo": "bar"
@@ -830,6 +834,29 @@ Start build again. This should become build number 2.
... ]
... }
+
+ WHEN user makes request DELETE /projects/bad_rome
+ AND user makes request DELETE /projects/rome
+ AND user makes request DELETE /projects/constantinople
+ AND user makes request DELETE /pipelines/construct
+ AND user makes request DELETE /workers/${OBELIX}
+ AND user makes request DELETE /builds/rome/1
+ AND user makes request DELETE /builds/rome/2
+ AND user makes request DELETE /logs/rome/1
+ AND user makes request DELETE /logs/rome/2
+
+ WHEN user makes request GET /projects
+ THEN body matches {"projects":[]}
+
+ WHEN user makes request GET /pipelines
+ THEN body matches {"pipelines":[]}
+
+ WHEN user makes request GET /builds
+ THEN body matches {"builds":[]}
+
+ WHEN user makes request GET /logs
+ THEN body matches {"log":[]}
+
FINALLY stop ick controller
@@ -841,20 +868,23 @@ This scenario tests the controller API to simulate a build.
Set up the controller.
- GIVEN an RSA key pair for token signing
- AND controller config uses statedir at the state directory
- AND controller config uses https://blobs.example.com as artifact store
- AND controller config uses https://auth.example.com as authentication
- AND controller config uses https://notify.example.com as notify
- AND an access token for user with scopes
+ GIVEN an access token for user with scopes
+ ... uapi_pipelines_get
... uapi_pipelines_post
+ ... uapi_pipelines_id_delete
+ ... uapi_projects_get
... uapi_projects_post
+ ... uapi_projects_id_delete
... uapi_projects_id_status_put
... uapi_projects_id_status_get
... uapi_projects_id_builds_get
... uapi_workers_id_get
+ ... uapi_workers_id_delete
... uapi_builds_get
+ ... uapi_builds_id_delete
... uapi_builds_id_get
+ ... uapi_logs_get
+ ... uapi_logs_id_delete
... uapi_logs_id_get
AND a running ick controller
@@ -891,6 +921,7 @@ Register a worker.
... {
... }
THEN result has status code 201
+ AND worker id is OBELIX
Build the first project.
@@ -909,7 +940,7 @@ Build the first project.
... "build_id": "first/1",
... "action_id": "1",
... "build_number": 1,
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "first",
... "exit_code": 0,
... "stdout": "",
@@ -930,7 +961,7 @@ Build the first project.
... "build_id": "first/1",
... "action_id": "2",
... "build_number": 1,
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "first",
... "exit_code": 0,
... "stdout": "",
@@ -950,7 +981,7 @@ Build the first project.
... "build_id": "first/1",
... "action_id": "3",
... "build_number": 1,
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "first",
... "exit_code": 0,
... "stdout": "",
@@ -981,7 +1012,7 @@ Build second project.
... {
... "build_id": "second/1",
... "action_id": "1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "second",
... "exit_code": 0,
... "stdout": "",
@@ -1001,7 +1032,7 @@ Build second project.
... {
... "build_id": "second/1",
... "action_id": "2",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "second",
... "exit_code": 0,
... "stdout": "",
@@ -1021,7 +1052,7 @@ Build second project.
... "build_id": "second/1",
... "action_id": "3",
... "build_number": 1,
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "second",
... "exit_code": 0,
... "stdout": "",
@@ -1038,8 +1069,29 @@ Build second project.
Finish up.
- FINALLY stop ick controller
+ WHEN user makes request DELETE /projects/first
+ AND user makes request DELETE /projects/second
+ AND user makes request DELETE /pipelines/do_something
+ AND user makes request DELETE /workers/${OBELIX}
+ AND user makes request DELETE /builds/first/1
+ AND user makes request DELETE /builds/second/1
+ AND user makes request DELETE /logs/first/1
+ AND user makes request DELETE /logs/second/1
+
+ WHEN user makes request GET /projects
+ THEN body matches {"projects":[]}
+
+ WHEN user makes request GET /pipelines
+ THEN body matches {"pipelines":[]}
+
+ WHEN user makes request GET /builds
+ THEN body matches {"builds":[]}
+
+ WHEN user makes request GET /logs
+ THEN body matches {"log":[]}
+
+ FINALLY stop ick controller
# Build two projects concurrently
@@ -1049,20 +1101,23 @@ This scenario tests the controller API to simulate a build.
Set up the controller.
- GIVEN an RSA key pair for token signing
- AND controller config uses statedir at the state directory
- AND controller config uses https://blobs.example.com as artifact store
- AND controller config uses https://auth.example.com as authentication
- AND controller config uses https://notify.example.com as notify
- AND an access token for user with scopes
+ GIVEN an access token for user with scopes
+ ... uapi_pipelines_get
... uapi_pipelines_post
+ ... uapi_pipelines_id_delete
+ ... uapi_projects_get
... uapi_projects_post
+ ... uapi_projects_id_delete
... uapi_projects_id_status_put
... uapi_projects_id_status_get
... uapi_projects_id_builds_get
... uapi_workers_id_get
+ ... uapi_workers_id_delete
... uapi_builds_get
+ ... uapi_builds_id_delete
... uapi_builds_id_get
+ ... uapi_logs_get
+ ... uapi_logs_id_delete
... uapi_logs_id_get
AND a running ick controller
@@ -1101,6 +1156,7 @@ Register a couple of workers.
... {
... }
THEN result has status code 201
+ AND worker id is ASTERIX
GIVEN an access token for obelix with scopes
... uapi_workers_post
@@ -1110,6 +1166,7 @@ Register a couple of workers.
... {
... }
THEN result has status code 201
+ AND worker id is OBELIX
Trigger both projects.
@@ -1147,7 +1204,7 @@ Trigger both projects.
... "build_id": "first/1",
... "action_id": "1",
... "build_number": 1,
- ... "worker": "asterix",
+ ... "worker": "${ASTERIX}",
... "project": "first",
... "exit_code": 0,
... "stdout": "",
@@ -1175,7 +1232,7 @@ Trigger both projects.
... "build_id": "second/1",
... "action_id": "1",
... "build_number": 1,
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "second",
... "exit_code": 0,
... "stdout": "",
@@ -1195,7 +1252,7 @@ Trigger both projects.
... {
... "build_id": "first/1",
... "action_id": "2",
- ... "worker": "asterix",
+ ... "worker": "${ASTERIX}",
... "project": "first",
... "exit_code": 0,
... "stdout": "",
@@ -1214,7 +1271,7 @@ Trigger both projects.
... {
... "build_id": "first/1",
... "action_id": "3",
- ... "worker": "asterix",
+ ... "worker": "${ASTERIX}",
... "project": "first",
... "exit_code": 0,
... "stdout": "",
@@ -1230,7 +1287,7 @@ Trigger both projects.
... {
... "build_id": "second/1",
... "action_id": "2",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "second",
... "exit_code": 0,
... "stdout": "",
@@ -1249,7 +1306,7 @@ Trigger both projects.
... {
... "build_id": "second/1",
... "action_id": "3",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "second",
... "exit_code": 0,
... "stdout": "",
@@ -1264,6 +1321,28 @@ Trigger both projects.
WHEN user requests list of builds
THEN the list of builds is ["first/1", "second/1"]
-Finish up.
+Finish up. Delete the resource we created.
+
+ WHEN user makes request DELETE /projects/first
+ AND user makes request DELETE /projects/second
+ AND user makes request DELETE /pipelines/do_something
+ AND user makes request DELETE /workers/${ASTERIX}
+ AND user makes request DELETE /workers/${OBELIX}
+ AND user makes request DELETE /builds/first/1
+ AND user makes request DELETE /builds/second/1
+ AND user makes request DELETE /logs/first/1
+ AND user makes request DELETE /logs/second/1
+
+ WHEN user makes request GET /projects
+ THEN body matches {"projects":[]}
+
+ WHEN user makes request GET /pipelines
+ THEN body matches {"pipelines":[]}
+
+ WHEN user makes request GET /builds
+ THEN body matches {"builds":[]}
+
+ WHEN user makes request GET /logs
+ THEN body matches {"log":[]}
FINALLY stop ick controller
diff --git a/yarns/500-build-fail.yarn b/yarns/500-build-fail.yarn
index 3373c2f..6ca06c6 100644
--- a/yarns/500-build-fail.yarn
+++ b/yarns/500-build-fail.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017-2018 Lars Wirzenius
+Copyright 2017-2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -26,20 +26,24 @@ build step fails.
Set up the controller.
- GIVEN an RSA key pair for token signing
- AND controller config uses statedir at the state directory
- AND controller config uses https://blobs.example.com as artifact store
- AND controller config uses https://auth.example.com as authentication
- AND controller config uses https://notify.example.com as notify
- AND an access token for user with scopes
+ GIVEN an access token for user with scopes
+ ... uapi_pipelines_get
+ ... uapi_pipelines_id_delete
... uapi_pipelines_post
+ ... uapi_projects_get
+ ... uapi_projects_id_delete
... uapi_projects_post
... uapi_projects_id_status_put
... uapi_projects_id_status_get
... uapi_projects_id_builds_get
+ ... uapi_workers_get
+ ... uapi_workers_id_delete
... uapi_workers_id_get
... uapi_builds_get
+ ... uapi_builds_id_delete
... uapi_builds_id_get
+ ... uapi_logs_get
+ ... uapi_logs_id_delete
... uapi_logs_id_get
AND a running ick controller
@@ -71,6 +75,7 @@ Register a worker.
... {
... }
THEN result has status code 201
+ AND worker id is OBELIX
Trigger build.
@@ -86,7 +91,7 @@ Worker wants work and gets the first step to run.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {},
... "action_id": "1",
@@ -103,7 +108,7 @@ failure.
... {
... "build_id": "rome/1",
... "action_id": "1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": 1,
... "stdout": "",
@@ -121,7 +126,7 @@ Worker is next told to notify end of build.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {},
... "action_id": "4",
@@ -134,7 +139,7 @@ Worker is next told to notify end of build.
... {
... "build_id": "rome/1",
... "action_id": "4",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "exit_code": 0,
... "stdout": "",
@@ -151,11 +156,11 @@ The build has ended, and there's no more work to do.
User sees changed status.
- WHEN user makes request GET /workers/obelix
+ WHEN user makes request GET /workers/${OBELIX}
THEN result has status code 200
AND body matches
... {
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "doing": {}
... }
@@ -170,7 +175,7 @@ There's a build with a log.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {},
... "graph": {
@@ -211,7 +216,7 @@ There's a build with a log.
... "build_id": "rome/1",
... "build_number": 1,
... "log": "/logs/rome/1",
- ... "worker": "obelix",
+ ... "worker": "${OBELIX}",
... "project": "rome",
... "parameters": {},
... "graph": {
@@ -248,4 +253,22 @@ There's a build with a log.
AND result has header Content-Type: text/plain
AND body text contains "eek!"
+ WHEN user makes request DELETE /projects/rome
+ AND user makes request DELETE /pipelines/construct
+ AND user makes request DELETE /workers/${OBELIX}
+ AND user makes request DELETE /builds/rome/1
+ AND user makes request DELETE /logs/rome/1
+
+ WHEN user makes request GET /projects
+ THEN body matches {"projects":[]}
+
+ WHEN user makes request GET /pipelines
+ THEN body matches {"pipelines":[]}
+
+ WHEN user makes request GET /builds
+ THEN body matches {"builds":[]}
+
+ WHEN user makes request GET /logs
+ THEN body matches {"log":[]}
+
FINALLY stop ick controller
diff --git a/yarns/600-unauthz.yarn b/yarns/600-unauthz.yarn
index 1c928ac..ab33404 100644
--- a/yarns/600-unauthz.yarn
+++ b/yarns/600-unauthz.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017-2018 Lars Wirzenius
+Copyright 2017-2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -26,12 +26,9 @@ returned.
Set up the controller.
- GIVEN an RSA key pair for token signing
- AND controller config uses statedir at the state directory
- AND controller config uses https://blobs.example.com as artifact store
- AND controller config uses https://auth.example.com as authentication
- AND controller config uses https://notify.example.com as notify
- AND an access token for user with scopes
+ GIVEN an access token for user with scopes
+ ... uapi_projects_get
+ ... uapi_projects_id_delete
... uapi_projects_post
... uapi_projects_id_status_put
... uapi_projects_id_status_get
@@ -88,4 +85,8 @@ Set up the controller.
WHEN outsider makes request POST /work with an invalid token and body {}
THEN result has status code 401
+ WHEN user makes request DELETE /projects/rome
+ WHEN user makes request GET /projects
+ THEN body matches {"projects":[]}
+
FINALLY stop ick controller
diff --git a/yarns/700-artifact-store.yarn b/yarns/700-artifact-store.yarn
index 2dcea2e..cadc83c 100644
--- a/yarns/700-artifact-store.yarn
+++ b/yarns/700-artifact-store.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017-2018 Lars Wirzenius
+Copyright 2017-2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -27,18 +27,23 @@ simple, in fact, it will certainly change in the future.
Set up the artifact store.
- GIVEN an RSA key pair for token signing
- AND artifact store config uses blobs at the blob directory
- AND an access token for user with scopes
+ GIVEN an access token for user with scopes
+ ... uapi_blobs_id_delete
... uapi_blobs_id_put
... uapi_blobs_id_get
- AND a running artifact store
+ AND a running ick controller
+
+<!--
+
+FIXME: This is disabled, until the artifact store supports deletion.
Try to get a non-existent blob. It should result in an error.
WHEN user retrieves /blobs/cake from artifact store
THEN result has status code 404
+-->
+
Create and store a blob, retrieve it and verify we get it back intack.
WHEN user creates a blob named cake with random data
@@ -49,4 +54,16 @@ Create and store a blob, retrieve it and verify we get it back intack.
THEN result has status code 200
AND body is the same as the blob cake
- FINALLY stop artifact store
+<!--
+
+FIXME: This is disabled, until the artifact store supports deletion.
+
+Delete the cake.
+
+ WHEN user deletes /blob/cake from artifact store
+ WHEN user retrieves /blobs/cake from artifact store
+ THEN result has status code 404
+
+-->
+
+ FINALLY stop ick controller
diff --git a/yarns/900-implements.yarn b/yarns/900-implements.yarn
index 490f46e..92acaa4 100644
--- a/yarns/900-implements.yarn
+++ b/yarns/900-implements.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017-2018 Lars Wirzenius
+Copyright 2017-2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -32,6 +32,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
IMPLEMENTS WHEN (\S+) makes request GET (\S+)
user = get_next_match()
path = get_next_match()
+ path = expand_vars(path, V)
token = get_token(user)
url = V['url']
http(V, get, url + path, token=token)
@@ -40,8 +41,19 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
user = get_next_match()
path = get_next_match()
token = get_token(user)
- url = V['bsurl']
- http(V, get_blob, url + path, token=token)
+ url = V['url']
+ version = get_version(url)
+ asurl = version['artifact_store']
+ http(V, get_blob, asurl + path, token=token)
+
+ IMPLEMENTS WHEN (\S+) deletes (\S+) from artifact store
+ user = get_next_match()
+ path = get_next_match()
+ token = get_token(user)
+ url = V['url']
+ version = get_version(url)
+ asurl = version['artifact_store']
+ http(V, delete, asurl + path, token=token)
IMPLEMENTS WHEN (\S+) makes request GET (\S+) with an invalid token
user = get_next_match()
@@ -54,6 +66,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
user = get_next_match()
path = get_next_match()
body = get_next_match()
+ body = expand_vars(body, V)
+ V['xxxPOSTbodyvalid'] = body
token = get_token(user)
url = V['url']
http(V, post, url + path, body=body, token=token)
@@ -62,6 +76,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
user = get_next_match()
path = get_next_match()
body = get_next_match()
+ body = expand_vars(body, V)
+ V['xxxPOSTbody'] = body
token = get_token(user)
url = V['url']
http(V, post, url + path, body=body, token='invalid')
@@ -69,7 +85,10 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
IMPLEMENTS WHEN (\S+) makes request PUT (\S+) with a valid token and body (.+)
user = get_next_match()
path = get_next_match()
+ path = expand_vars(path, V)
body = get_next_match()
+ body = expand_vars(body, V)
+ V['xxxPUTbody'] = body
token = get_token(user)
url = V['url']
http(V, put, url + path, body=body, token=token)
@@ -80,12 +99,15 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
path = get_next_match()
body = cat(filename)
token = get_token(user)
- url = V['bsurl']
- http(V, put_blob, url + path, body=body, token=token)
+ url = V['url']
+ version = get_version(url)
+ asurl = version['artifact_store']
+ http(V, put_blob, asurl + path, body=body, token=token)
IMPLEMENTS WHEN (\S+) makes request PUT (\S+) with an invalid token
user = get_next_match()
path = get_next_match()
+ path = expand_vars(path, V)
body = '{}'
token = get_token(user)
url = V['url']
@@ -94,18 +116,25 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
IMPLEMENTS WHEN (\S+) makes request DELETE (\S+)
user = get_next_match()
path = get_next_match()
+ path = expand_vars(path, V)
token = get_token(user)
url = V['url']
http(V, delete, url + path, token=token)
## HTTP response inspection
+ IMPLEMENTS THEN worker id is (\S+)
+ varname = get_next_match()
+ body = json.loads(V['body'])
+ V[varname] = body['worker']
+
IMPLEMENTS THEN result has status code (\d+)
expected = int(get_next_match())
assertEqual(expected, V['status_code'])
IMPLEMENTS THEN body matches (.+)
expected_text = get_next_match()
+ expected_text = expand_vars(expected_text, V)
expected = json.loads(expected_text)
actual = json.loads(V['body'])
print 'expected'
diff --git a/yarns/900-local.yarn b/yarns/900-local.yarn
deleted file mode 100644
index 5fa06f3..0000000
--- a/yarns/900-local.yarn
+++ /dev/null
@@ -1,191 +0,0 @@
-<!--
-
-Copyright 2017-2018 Lars Wirzenius
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see <http://www.gnu.org/licenses/>.
-
--->
-
-# Scenario step implementations for locally managed ick
-
-## Authentication setup
-
- IMPLEMENTS GIVEN an RSA key pair for token signing
- argv = [
- os.path.join(srcdir, 'generate-rsa-key'),
- 'token.key',
- ]
- cliapp.runcmd(argv, stdout=None, stderr=None)
-
- IMPLEMENTS GIVEN an access token for (\S+) with scopes (.+)
- user = get_next_match()
- scopes = get_next_match()
- key = open('token.key').read()
- argv = [
- os.path.join(srcdir, 'create-token'),
- scopes,
- user,
- ]
- token = cliapp.runcmd(argv, feed_stdin=key)
- store_token(user, token)
- V['issuer'] = 'localhost'
- V['audience'] = user
-
-## Controller configuration
-
- IMPLEMENTS GIVEN controller config uses (\S+) at the state directory
- V['statedir'] = get_next_match()
-
- IMPLEMENTS GIVEN controller config uses (\S+) as artifact store
- V['artifact_store'] = get_next_match()
-
- IMPLEMENTS GIVEN controller config uses (\S+) as authentication
- V['auth_url'] = get_next_match()
-
- IMPLEMENTS GIVEN controller config uses (\S+) as notify
- V['notify_url'] = get_next_match()
- assert V['notify_url'] is not None
-
-## Start and stop the controller
-
- IMPLEMENTS GIVEN a running ick controller
- import os, time, cliapp, yaml
- V['controller.log'] = 'ick_controller.log'
- V['gunicorn3.log'] = 'gunicorn3.log'
- V['port'] = random_free_port()
- V['url'] = 'http://127.0.0.1:{}'.format(V['port'])
- assert V['auth_url'] is not None
- assert V['notify_url'] is not None
- config = {
- 'token-issuer': V['issuer'],
- 'token-audience': V['audience'],
- 'token-public-key': cat('token.key.pub'),
- 'log': [
- {
- 'filename': V['controller.log'],
- },
- ],
- 'statedir': V['statedir'],
- 'apt-server': 'localhost',
- 'artifact-store': V['artifact_store'],
- 'auth-url': V['auth_url'],
- 'notify-url': V['notify_url'],
- }
- assert config['notify-url'] is not None
- env = dict(os.environ)
- env['ICK_CONTROLLER_CONFIG'] = 'ick_controller.yaml'
- yaml.safe_dump(config, open('ick_controller.yaml', 'w'))
- argv = [
- 'gunicorn3',
- '--daemon',
- '--bind', '127.0.0.1:{}'.format(V['port']),
- '--log-file', V['gunicorn3.log'],
- '--log-level', 'debug',
- '-p', 'pid',
- 'ick_controller:app',
- ]
- cliapp.runcmd(argv, env=env)
- V['pid'] = int(cat('pid'))
- wait_for_port(V['port'])
-
- IMPLEMENTS WHEN user stops ick controller
- import os, signal
- os.kill(int(V['pid']), signal.SIGTERM)
-
- IMPLEMENTS FINALLY stop ick controller
- import os, signal
- os.kill(V['pid'], signal.SIGTERM)
-
-## Controller state inspection
-
- IMPLEMENTS THEN controller state directory contains project (\S+)
- name = get_next_match()
- basename = encode_basename(name)
- filename = os.path.join(V['statedir'], 'projects', basename)
- print 'name', name
- print 'basename', basename
- print 'filename', filename
- assertTrue(os.path.exists(filename))
-
- IMPLEMENTS THEN controller state directory contains worker (\S+)
- name = get_next_match()
- basename = encode_basename(name)
- filename = os.path.join(V['statedir'], 'workers', basename)
- print 'filename', filename
- assertTrue(os.path.exists(filename))
-
-## Check version result
-
- IMPLEMENTS THEN artifact store URL is (\S+)
- expected = get_next_match()
- body = V['body']
- obj = json.loads(body)
- actual = obj['artifact_store']
- assertEqual(actual, expected)
-
- IMPLEMENTS THEN authentication URL is (\S+)
- expected = get_next_match()
- body = V['body']
- obj = json.loads(body)
- actual = obj['auth_url']
- assertEqual(actual, expected)
-
- IMPLEMENTS THEN notify URL is (\S+)
- expected = get_next_match()
- body = V['body']
- obj = json.loads(body)
- actual = obj['notify_url']
- assertEqual(actual, expected)
-
-## Start and stop artifact store
-
- IMPLEMENTS GIVEN artifact store config uses (\S+) at the blob directory
- V['blobdir'] = get_next_match()
-
- IMPLEMENTS GIVEN a running artifact store
- import os, time, cliapp, yaml
- V['artifact_store.log'] = 'artifact_store.log'
- V['gunicorn3_as.log'] = 'gunicorn3_as.log'
- V['bsport'] = random_free_port()
- V['bsurl'] = 'http://127.0.0.1:{}'.format(V['bsport'])
- config = {
- 'token-issuer': V['issuer'],
- 'token-audience': V['audience'],
- 'token-public-key': cat('token.key.pub'),
- 'log': [
- {
- 'filename': V['artifact_store.log'],
- },
- ],
- 'blobdir': V['blobdir'],
- }
- env = dict(os.environ)
- env['ARTIFACT_STORE_CONFIG'] = 'artifact_store.yaml'
- yaml.safe_dump(config, open('artifact_store.yaml', 'w'))
- argv = [
- 'gunicorn3',
- '--daemon',
- '--bind', '127.0.0.1:{}'.format(V['bsport']),
- '--log-file', V['gunicorn3_as.log'],
- '--log-level', 'debug',
- '-p', 'bspid',
- 'artifact_store:app',
- ]
- cliapp.runcmd(argv, env=env)
- V['bspid'] = int(cat('bspid'))
- wait_for_port(V['bsport'])
-
- IMPLEMENTS FINALLY stop artifact store
- import os, signal
- os.kill(V['bspid'], signal.SIGTERM)
diff --git a/yarns/900-remote.yarn b/yarns/900-remote.yarn
index 6cc7f88..5e84c13 100644
--- a/yarns/900-remote.yarn
+++ b/yarns/900-remote.yarn
@@ -1,6 +1,6 @@
<!--
-Copyright 2017 Lars Wirzenius
+Copyright 2017,2019 Lars Wirzenius
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
@@ -21,22 +21,12 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
## Authentication setup
- IMPLEMENTS GIVEN an RSA key pair for token signing
- V['private_key_file'] = os.environ['ICK_PRIVATE_KEY']
- assertTrue(os.path.exists(V['private_key_file']))
-
IMPLEMENTS GIVEN an access token for (\S+) with scopes (.+)
user = get_next_match()
- scopes = get_next_match()
- key = open(V['private_key_file']).read()
- argv = [
- os.path.join(srcdir, 'create-token'),
- scopes,
- ]
- token = cliapp.runcmd(argv, feed_stdin=key)
+ scopes = get_next_match().split()
+ create_api_client(user, scopes)
+ token = get_api_token(user, scopes)
store_token(user, token)
- V['issuer'] = 'localhost'
- V['audience'] = 'localhost'
## Controller configuration
@@ -46,13 +36,14 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
## Start and stop the controller
IMPLEMENTS GIVEN a running ick controller
- V['url'] = os.environ['ICK_URL']
+ V['url'] = os.environ['CONTROLLER']
IMPLEMENTS WHEN user stops ick controller
pass
IMPLEMENTS FINALLY stop ick controller
- pass
+ for client_id in get_client_ids():
+ delete_api_client(client_id)
## Controller state inspection
diff --git a/yarns/lib.py b/yarns/lib.py
index 290512c..6d8f2cf 100644
--- a/yarns/lib.py
+++ b/yarns/lib.py
@@ -19,13 +19,17 @@ import errno
import json
import os
import random
+import re
+import signal
import socket
import sys
import time
import urllib
+import uuid
import cliapp
import requests
+import yaml
from yarnutils import *
@@ -35,37 +39,80 @@ datadir = os.environ['DATADIR']
V = Variables(datadir)
-def random_free_port():
- MAX = 1000
- for i in range(MAX):
- port = random.randint(1025, 2**15-1)
- s = socket.socket()
- try:
- s.bind(('0.0.0.0', port))
- except OSError as e:
- if e.errno == errno.EADDRINUSE:
- continue
- print('cannot find a random free port')
- raise
- s.close()
- break
- print('picked port', port)
- return port
-
-
-def wait_for_port(port):
- MAX = 5
- t = time.time()
- while time.time() < t + MAX:
- try:
- s = socket.socket()
- s.connect(('127.0.0.1', port))
- except socket.error:
- time.sleep(0.1)
- except OSError as e:
- raise
- else:
- return
+def remember_client_id(alias, client_id, client_secret):
+ clients = V['clients']
+ if clients is None:
+ clients = {}
+ clients[alias] = {
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ }
+ V['clients'] = clients
+
+
+def get_client_id(alias):
+ clients = V['clients'] or {}
+ return clients[alias]['client_id']
+
+
+def get_client_ids():
+ clients = V['clients'] or {}
+ return [x['client_id'] for x in clients.values()]
+
+
+def get_client_secret(alias):
+ clients = V['clients'] or {}
+ return clients[alias]['client_secret']
+
+
+def create_api_client(alias, scopes):
+ client_id = str(uuid.uuid4())
+ client_secret = str(uuid.uuid4())
+ print('invented client id', client_id)
+ api = os.environ['CONTROLLER']
+ print('controller URL', api)
+ secrets = os.environ['SECRETS']
+ print('secrets', secrets)
+ base_argv = ['qvisqvetool', '--secrets', secrets, '-a', api]
+ print('base_argv', base_argv)
+ cliapp.runcmd(base_argv + ['create', 'client', client_id, client_secret])
+ cliapp.runcmd(base_argv + ['allow-scope', 'client', client_id] + scopes)
+ remember_client_id(alias, client_id, client_secret)
+
+
+def delete_api_client(client_id):
+ api = os.environ['CONTROLLER']
+ secrets = os.environ['SECRETS']
+ base_argv = ['qvisqvetool', '--secrets', secrets, '-a', api]
+ cliapp.runcmd(base_argv + ['delete', 'client', client_id])
+
+
+def get_api_token(alias, scopes):
+ print('getting token for', alias)
+
+ client_id = get_client_id(alias)
+ client_secret = get_client_secret(alias)
+ api = os.environ['CONTROLLER']
+
+ auth = (client_id, client_secret)
+ data = {
+ 'grant_type': 'client_credentials',
+ 'scope': ' '.join(scopes),
+ }
+
+ url = '{}/token'.format(api)
+
+ print('url', url)
+ print('auth', auth)
+ print('data', data)
+ r = requests.post(url, auth=auth, data=data)
+ if not r.ok:
+ sys.exit('Error getting token: %s %s' % (r.status_code, r.text))
+
+ token = r.json()['access_token']
+ print('token', token)
+ return token
+
def unescape(s):
t = ''
@@ -102,6 +149,12 @@ def get_token(user):
def http(V, func, url, **kwargs):
+ V['request'] = {
+ 'func': repr(func),
+ 'url': url,
+ 'kwargs': kwargs,
+ }
+ print('http', func, url, kwargs)
status, content_type, headers, body = func(url, **kwargs)
V['status_code'] = status
V['content_type'] = content_type
@@ -117,6 +170,11 @@ def get(url, token):
return r.status_code, r.headers['Content-Type'], dict(r.headers), r.text
+def get_version(url):
+ status, ctype, headers, text = get(url + '/version', 'no token')
+ assert ctype == 'application/json'
+ return json.loads(text)
+
def get_blob(url, token):
headers = {
'Authorization': 'Bearer {}'.format(token),
@@ -225,5 +283,15 @@ def list_diff(a, b):
return None
-def encode_basename(basename):
- return urllib.quote(basename, safe='')
+def expand_vars(text, variables):
+ result = ''
+ while text:
+ m = re.search(r'\${(?P<name>[^}]+)}', text)
+ if not m:
+ result += text
+ break
+ name = m.group('name')
+ print('expanding ', name)
+ result += text[:m.start()] + variables[name]
+ text = text[m.end():]
+ return result