summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLars Wirzenius <liw@liw.fi>2020-05-10 10:25:57 +0300
committerLars Wirzenius <liw@liw.fi>2020-05-10 11:13:07 +0300
commit90d3d8748a5c4c8d52e1ccc7da5110c01e834b98 (patch)
tree61d4c36eae1f9d4ae1c9b44aacc3005962c4c7c0
parentfd2cc1d6b35fba4ba99d4221068c48be74413d34 (diff)
downloadick-contractor-90d3d8748a5c4c8d52e1ccc7da5110c01e834b98.tar.gz
refactor: use argparse instead of cliapp
cliapp is an old, moribund Python project of mine, and argparse is the preferred way to parse the command line now.
-rwxr-xr-xcontractor404
-rw-r--r--funcs.py18
2 files changed, 227 insertions, 195 deletions
diff --git a/contractor b/contractor
index 0fdff50..7929e7c 100755
--- a/contractor
+++ b/contractor
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
+import argparse
import json
import logging
import os
@@ -9,7 +10,6 @@ import time
import subprocess
from subprocess import PIPE, STDOUT
-import cliapp
import yaml
@@ -33,190 +33,6 @@ WORKER_UID = 1000
WORKER_GID = 1000
-class ContractorApplication(cliapp.Application):
-
- def add_settings(self):
- self.settings.string(
- ['manager-address', 'm'],
- 'address of the manager VM',
- metavar='ADDR')
-
- self.settings.integer(
- ['manager-port', 'p'],
- 'SSH port of the manager VM',
- default=22,
- metavar='ADDR')
-
- self.settings.string(
- ['manager-user'],
- 'user of the manager in the manager VM',
- default='manager',
- metavar='USERNAME')
-
- self.settings.boolean(
- ['verbose', 'v'],
- 'be verbose',
- default=False)
-
- def cmd_dump(self, args):
- bs = self.load_build_spec(args[0])
- self.output.write('{}\n'.format(json.dumps(bs.as_dict(), indent=4)))
-
- def cmd_build(self, args):
- self.verbose('building according to {}'.format(args[0]))
- bs = self.load_build_spec(args[0])
- dest, port = self.manager_destination()
- manager = RemoteServer(dest, port, verbose=self.verbose)
- self.verbose('manager is at {} (port {})'.format(dest, port))
-
- with Timer(self.verbose, 'complete-run'):
- with Timer(self.verbose, 'upload-worker-image'):
- self.upload_worker_image(bs.worker_image(), dest, port)
-
- # Do the minimum needed to start worker VM. The VM takes a
- # while to boot and we can do other things while that
- # happens.
- with Timer(self.verbose, 'start-worker'):
- execs = [
- DestroyWorkerVM(),
- UndefineWorkerVM(),
- CopyWorkerImage(),
- StartGuestNetworking(),
- CreateWorkerVM(),
- TryUnmountWS(),
- MountWS(),
- ChownWS(),
- ]
- self.exec_quietly(manager, *execs)
-
- with Timer(self.verbose, 'upload-saved-workspace'):
- ws = bs.workspace()
- if os.path.exists(ws):
- self.sync_to_workspace(ws, dest, port, '.')
-
- with Timer(self.verbose, 'upload-source'):
- self.exec_quietly(
- manager, Mkdir(
- '/mnt/src', owner=WORKER_UID, group=WORKER_GID))
- src = bs.source()
- self.sync_to_workspace(src, dest, port, 'src')
-
- with Timer(self.verbose, 'wait-for-worker-to-be-available'):
- execs = [
- UnmountWS(),
- WorkerIP(),
- AttachWS(),
- ]
- er = self.exec_quietly(manager, *execs)
- worker_ip = er.stdout.decode('UTF8').strip()
-
- with Timer(self.verbose, 'prepare-workspace-worker'):
- worker_dest = 'worker@{}'.format(worker_ip)
- self.verbose(
- 'worker is at {} (via manager)'.format(worker_dest))
- worker = OnWorker(dest, port, worker_dest, verbose=self.verbose)
- self.exec_quietly(
- worker, Mkdir('/workspace'), MountWSonWorker())
-
- with Timer(self.verbose, 'prepare-worker-with-ansible'):
- ansible = bs.ansible()
- if ansible:
- self.exec_verbosely(manager, Ansible(ansible, worker_ip))
-
- with Timer(self.verbose, 'build'):
- execs = [
- Chdir('/workspace/src'),
- Build(bs.build()),
- ]
- build_failed = worker.verbosely(*execs).failed()
-
- with Timer(self.verbose, 'shutdown-worker'):
- execs = [
- ShutdownWorkerVM(),
- MountWS(),
- ]
- self.exec_quietly(manager, *execs)
-
- with Timer(self.verbose, 'save-workspace'):
- if ws:
- self.verbose('saving workspace to {}'.format(ws))
- self.sync_from_workspace(dest, port, ws)
-
- if build_failed:
- self.error('build FAILED')
- sys.exit(1)
-
- self.verbose('build finished OK')
-
- def load_build_spec(self, filename):
- with open(filename) as f:
- return BuildSpec(f.read())
-
- def upload_worker_image(self, filename, dest, port):
- self.verbose(
- 'uploading to manager local worker image {}'.format(filename))
- target = '{}:{}'.format(dest, WORKER_IMG)
- if rsync(filename, target, port).failed():
- self.error('could not upload image to worker')
- sys.exit(1)
-
- def sync_to_workspace(self, frm, dest, port, subdir):
- destdir = '/mnt/{}'.format(subdir)
- self.verbose('syncing local {} to manager {}'.format(frm, destdir))
- er = rsync(
- '{}/.'.format(frm),
- '{}:{}/.'.format(dest, destdir),
- port)
- if er.failed():
- self.error('Failed to rsync saved workspace to worker')
- sys.exit(1)
-
- def sync_from_workspace(self, dest, port, ws):
- self.verbose('syncing manager /mnt to local {}'.format(ws))
- if not os.path.exists(ws):
- os.makedirs(ws)
- er = rsync('{}:/mnt/.'.format(dest), '{}/.'.format(ws), port)
- if er.failed():
- self.error('Failed to rsync workspace from worker')
- sys.exit(1)
-
- def exec_sequence(self, how, *execs):
- er = how(*execs)
- if er.failed():
- self.error('Failed to do that, giving up')
- sys.exit(1)
- return er
-
- def exec_quietly(self, manager, *execs):
- return self.exec_sequence(manager.quietly, *execs)
-
- def exec_verbosely(self, manager, *execs):
- return self.exec_sequence(manager.verbosely, *execs)
-
- def cmd_manager_status(self, args):
- dest, port = self.manager_destination()
- manager = RemoteServer(dest, port)
- if manager.quietly(TrueCmd()).failed():
- self.error('Manager VM is NOT available')
- sys.exit(1)
- self.verbose('Manager VM is available')
-
- def manager_destination(self):
- user = self.settings['manager-user']
- addr = self.settings['manager-address']
- port = self.settings['manager-port']
- return '{}@{}'.format(user, addr), port
-
- def error(self, msg):
- sys.stderr.write('ERROR: {}\n'.format(msg))
- logging.error('ERROR: {}'.format(msg))
-
- def verbose(self, msg):
- logging.info(msg)
- if self.settings['verbose']:
- print(msg)
-
-
class ExecResult:
def __init__(self, stdout, stderr, exit_code):
@@ -632,4 +448,220 @@ class Timer:
return False
-ContractorApplication().run()
+def load_build_spec(filename):
+ with open(filename) as f:
+ return BuildSpec(f.read())
+
+
+def error(msg):
+ sys.stderr.write('ERROR: {}\n'.format(msg))
+ logging.error('ERROR: {}'.format(msg))
+
+
+def verbose(args, msg):
+ logging.info(msg)
+ if args.verbose:
+ print(msg)
+
+def manager_destination(args):
+ user = args.manager_user
+ addr = args.manager_address
+ port = args.manager_port
+ return '{}@{}'.format(user, addr), port
+
+
+def upload_worker_image(vrb, filename, dest, port):
+ vrb('uploading to manager local worker image {}'.format(filename))
+ target = '{}:{}'.format(dest, WORKER_IMG)
+ if rsync(filename, target, port).failed():
+ error('could not upload image to worker')
+ sys.exit(1)
+
+
+def sync_to_workspace(vrb, frm, dest, port, subdir):
+ destdir = '/mnt/{}'.format(subdir)
+ vrb('syncing local {} to manager {}'.format(frm, destdir))
+ er = rsync('{}/.'.format(frm), '{}:{}/.'.format(dest, destdir), port)
+ if er.failed():
+ error('Failed to rsync saved workspace to worker')
+ sys.exit(1)
+
+
+def sync_from_workspace(vrb, dest, port, ws):
+ vrb('syncing manager /mnt to local {}'.format(ws))
+ if not os.path.exists(ws):
+ os.makedirs(ws)
+ er = rsync('{}:/mnt/.'.format(dest), '{}/.'.format(ws), port)
+ if er.failed():
+ error('Failed to rsync workspace from worker')
+ sys.exit(1)
+
+
+def exec_sequence(how, *execs):
+ er = how(*execs)
+ if er.failed():
+ error('Failed to do that, giving up')
+ sys.exit(1)
+ return er
+
+
+def exec_quietly(manager, *execs):
+ return exec_sequence(manager.quietly, *execs)
+
+
+def exec_verbosely(manager, *execs):
+ return exec_sequence(manager.verbosely, *execs)
+
+
+def cmd_dump(args):
+ bs = load_build_spec(args.spec)
+ sys.stdout.write('{}\n'.format(json.dumps(bs.as_dict(), indent=4)))
+
+
+def cmd_status(args):
+ dest, port = manager_destination(args)
+ verbose(args, 'manager VM is {}:{}'.format(dest, port))
+ manager = RemoteServer(dest, port)
+ if manager.quietly(TrueCmd()).failed():
+ error('Manager VM is NOT available')
+ sys.exit(1)
+ verbose(args, 'manager VM is available')
+
+
+def cmd_build(args):
+ vrb = lambda msg: verbose(args, msg)
+ vrb('building according to {}'.format(args.spec))
+ bs = load_build_spec(args.spec)
+ dest, port = manager_destination(args)
+ vrb('manager is at {} (port {})'.format(dest, port))
+
+ manager = RemoteServer(dest, port, verbose=vrb)
+
+ with Timer(vrb, 'complete-run'):
+ with Timer(vrb, 'upload-worker-image'):
+ upload_worker_image(vrb, bs.worker_image(), dest, port)
+
+ # Do the minimum needed to start worker VM. The VM takes a
+ # while to boot and we can do other things while that
+ # happens.
+ with Timer(vrb, 'start-worker'):
+ execs = [
+ DestroyWorkerVM(),
+ UndefineWorkerVM(),
+ CopyWorkerImage(),
+ StartGuestNetworking(),
+ CreateWorkerVM(),
+ TryUnmountWS(),
+ MountWS(),
+ ChownWS(),
+ ]
+ exec_quietly(manager, *execs)
+
+ with Timer(vrb, 'upload-saved-workspace'):
+ ws = bs.workspace()
+ if os.path.exists(ws):
+ sync_to_workspace(vrb, ws, dest, port, '.')
+
+ with Timer(vrb, 'upload-source'):
+ exec_quietly(manager, Mkdir('/mnt/src', owner=WORKER_UID, group=WORKER_GID))
+ src = bs.source()
+ sync_to_workspace(vrb, src, dest, port, 'src')
+
+ with Timer(vrb, 'wait-for-worker-to-be-available'):
+ execs = [
+ UnmountWS(),
+ WorkerIP(),
+ AttachWS(),
+ ]
+ er = exec_quietly(manager, *execs)
+ worker_ip = er.stdout.decode('UTF8').strip()
+
+ with Timer(vrb, 'prepare-workspace-worker'):
+ worker_dest = 'worker@{}'.format(worker_ip)
+ vrb('worker is at {} (via manager)'.format(worker_dest))
+ worker = OnWorker(dest, port, worker_dest, verbose=vrb)
+ exec_quietly(worker, Mkdir('/workspace'), MountWSonWorker())
+
+ with Timer(vrb, 'prepare-worker-with-ansible'):
+ ansible = bs.ansible()
+ if ansible:
+ exec_verbosely(manager, Ansible(ansible, worker_ip))
+
+ with Timer(vrb, 'build'):
+ execs = [
+ Chdir('/workspace/src'),
+ Build(bs.build()),
+ ]
+ build_failed = worker.verbosely(*execs).failed()
+
+ with Timer(vrb, 'shutdown-worker'):
+ execs = [
+ ShutdownWorkerVM(),
+ MountWS(),
+ ]
+ exec_quietly(manager, *execs)
+
+ with Timer(vrb, 'save-workspace'):
+ if ws:
+ vrb('saving workspace to {}'.format(ws))
+ sync_from_workspace(vrb, dest, port, ws)
+
+ if build_failed:
+ error('build FAILED')
+ sys.exit(1)
+
+ vrb('build finished OK')
+
+
+def setup_logging(args):
+ if args.log:
+ fmt = '%(asctime)s %(levelname)s %(message)s'
+ datefmt = '%Y-%m-%d %H:%M:%S'
+ formatter = logging.Formatter(fmt, datefmt)
+
+ handler = logging.FileHandler(args.log)
+ handler.setFormatter(formatter)
+ else:
+ handler = logging.NullHandler()
+
+ logger = logging.getLogger()
+ logger.addHandler(handler)
+ logger.setLevel(logging.INFO)
+
+
+def main():
+ p = argparse.ArgumentParser()
+ p.add_argument('-v', '--verbose', action='store_true')
+ p.add_argument('--log', help='log to a file')
+
+ sub = p.add_subparsers()
+
+ manager_defaults = {
+ 'manager_address': None,
+ 'manager_port': 22,
+ 'manager_user': 'manager',
+ }
+
+ dump = sub.add_parser('dump', help='dump parsed build spec as JSON')
+ dump.add_argument('spec')
+ dump.set_defaults(func=cmd_dump)
+
+ status = sub.add_parser('status', help='check status of manager VM')
+ status.add_argument('-m', '--manager-address', help='address of manager VM')
+ status.add_argument('-p', '--manager-port', help='SSH port of manager VM')
+ status.add_argument('-u', '--manager-usr', help='user on manager VM')
+ status.set_defaults(func=cmd_status, **manager_defaults)
+
+ build = sub.add_parser('build', help='build according to spec')
+ build.add_argument('spec')
+ build.add_argument('-m', '--manager-address', help='address of manager VM')
+ build.add_argument('-p', '--manager-port', help='SSH port of manager VM')
+ build.set_defaults(func=cmd_build, **manager_defaults)
+
+ args = p.parse_args()
+ setup_logging(args)
+ args.func(args)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/funcs.py b/funcs.py
index 0d157a8..2172cc2 100644
--- a/funcs.py
+++ b/funcs.py
@@ -25,13 +25,13 @@ def _run(ctx, argv):
# the address of the manager VM from CONTRACTOR_ADDRESS in the
# environment.
def _contractor():
- addr = os.environ['CONTRACTOR_ADDRESS']
- return [
- os.path.join(srcdir, 'contractor'),
- '--no-default-config',
- '--log', os.path.join(srcdir, 'contractor.log'),
- '--manager-address', addr,
- ]
+ return [os.path.join(srcdir, 'contractor')]
+
+
+# Return manager address.
+def _manager_address():
+ return os.environ['CONTRACTOR_ADDRESS']
+
#############################################################################
@@ -45,7 +45,7 @@ def nop(ctx, **kwargs):
# Check that we can access the contractor VM.
# FIXME: this hardcodes some things.
def contractor_is_working(ctx):
- argv = _contractor() + ['manager-status']
+ argv = _contractor() + ['status', '-m', _manager_address()]
_run(ctx, argv)
assert_eq(ctx['exit'], 0)
@@ -80,7 +80,7 @@ def run_contractor_dump(ctx, filename=None):
# Run the contractor to do a build.
def run_contractor_build(ctx, filename=None):
- argv = _contractor() + ['build', filename]
+ argv = _contractor() + ['build', filename, '-m', _manager_address()]
_run(ctx, argv)