| |
@@ -9,53 +9,50 @@
|
| |
import signal
|
| |
import multiprocessing
|
| |
import socket
|
| |
+ from contextlib import contextmanager
|
| |
+ import fcntl
|
| |
import subprocess
|
| |
import sys
|
| |
import tempfile
|
| |
import time
|
| |
- import distutils.util
|
| |
-
|
| |
- IDENTITY = """
|
| |
- -----BEGIN RSA PRIVATE KEY-----
|
| |
- MIIEpQIBAAKCAQEA1DrTSXQRF8isQQfPfK3U+eFC4zBrjur+Iy15kbHUYUeSHf5S
|
| |
- jXPYbHYqD1lHj4GJajC9okle9rykKFYZMmJKXLI6987wZ8vfucXo9/kwS6BDAJto
|
| |
- ZpZSj5sWCQ1PI0Ce8CbkazlTp5NIkjRfhXGP8mkNKMEhdNjaYceO49ilnNCIxhpb
|
| |
- eH5dH5hybmQQNmnzf+CGCCLBFmc4g3sFbWhI1ldyJzES5ZX3ahjJZYRUfnndoUM/
|
| |
- TzdkHGqZhL1EeFAsv5iV65HuYbchch4vBAn8jDMmHh8G1ixUCL3uAlosfarZLLyo
|
| |
- 3HrZ8U/llq7rXa93PXHyI/3NL/2YP3OMxE8baQIDAQABAoIBAQCxuOUwkKqzsQ9W
|
| |
- kdTWArfj3RhnKigYEX9qM+2m7TT9lbKtvUiiPc2R3k4QdmIvsXlCXLigyzJkCsqp
|
| |
- IJiPEbJV98bbuAan1Rlv92TFK36fBgC15G5D4kQXD/ce828/BSFT2C3WALamEPdn
|
| |
- v8Xx+Ixjokcrxrdeoy4VTcjB0q21J4C2wKP1wEPeMJnuTcySiWQBdAECCbeZ4Vsj
|
| |
- cmRdcvL6z8fedRPtDW7oec+IPkYoyXPktVt8WsQPYkwEVN4hZVBneJPCcuhikYkp
|
| |
- T3WGmPV0MxhUvCZ6hSG8D2mscZXRq3itXVlKJsUWfIHaAIgGomWrPuqC23rOYCdT
|
| |
- 5oSZmTvFAoGBAPs1FbbxDDd1fx1hisfXHFasV/sycT6ggP/eUXpBYCqVdxPQvqcA
|
| |
- ktplm5j04dnaQJdHZ8TPlwtL+xlWhmhFhlCFPtVpU1HzIBkp6DkSmmu0gvA/i07Z
|
| |
- pzo5Z+HRZFzruTQx6NjDtvWwiXVLwmZn2oiLeM9xSqPu55OpITifEWNjAoGBANhH
|
| |
- XwV6IvnbUWojs7uiSGsXuJOdB1YCJ+UF6xu8CqdbimaVakemVO02+cgbE6jzpUpo
|
| |
- krbDKOle4fIbUYHPeyB0NMidpDxTAPCGmiJz7BCS1fCxkzRgC+TICjmk5zpaD2md
|
| |
- HCrtzIeHNVpTE26BAjOIbo4QqOHBXk/WPen1iC3DAoGBALsD3DSj46puCMJA2ebI
|
| |
- 2EoWaDGUbgZny2GxiwrvHL7XIx1XbHg7zxhUSLBorrNW7nsxJ6m3ugUo/bjxV4LN
|
| |
- L59Gc27ByMvbqmvRbRcAKIJCkrB1Pirnkr2f+xx8nLEotGqNNYIawlzKnqr6SbGf
|
| |
- Y2wAGWKmPyEoPLMLWLYkhfdtAoGANsFa/Tf+wuMTqZuAVXCwhOxsfnKy+MNy9jiZ
|
| |
- XVwuFlDGqVIKpjkmJyhT9KVmRM/qePwgqMSgBvVOnszrxcGRmpXRBzlh6yPYiQyK
|
| |
- 2U4f5dJG97j9W7U1TaaXcCCfqdZDMKnmB7hMn8NLbqK5uLBQrltMIgt1tjIOfofv
|
| |
- BNx0raECgYEApAvjwDJ75otKz/mvL3rUf/SNpieODBOLHFQqJmF+4hrSOniHC5jf
|
| |
- f5GS5IuYtBQ1gudBYlSs9fX6T39d2avPsZjfvvSbULXi3OlzWD8sbTtvQPuCaZGI
|
| |
- Df9PUWMYZ3HRwwdsYovSOkT53fG6guy+vElUEDkrpZYczROZ6GUcx70=
|
| |
- -----END RSA PRIVATE KEY-----
|
| |
- """
|
| |
-
|
| |
-
|
| |
- AUTH_KEY = ("AAAAB3NzaC1yc2EAAAADAQABAAABAQDUOtNJdBEXyKxBB898rdT54ULjMGuO6v4jLX"
|
| |
- "mRsdRhR5Id/lKNc9hsdioPWUePgYlqML2iSV72vKQoVhkyYkpcsjr3zvBny9+5xej3"
|
| |
- "+TBLoEMAm2hmllKPmxYJDU8jQJ7wJuRrOVOnk0iSNF+FcY/yaQ0owSF02Nphx47j2K"
|
| |
- "Wc0IjGGlt4fl0fmHJuZBA2afN/4IYIIsEWZziDewVtaEjWV3InMRLllfdqGMllhFR+"
|
| |
- "ed2hQz9PN2QcapmEvUR4UCy/mJXrke5htyFyHi8ECfyMMyYeHwbWLFQIve4CWix9qt"
|
| |
- "ksvKjcetnxT+WWrutdr3c9cfIj/c0v/Zg/c4zETxtp")
|
| |
-
|
| |
+ from copy import deepcopy
|
| |
+
|
| |
+
|
| |
+ SSH_PRIV_KEY = ("-----BEGIN RSA PRIVATE KEY-----"
|
| |
+ "MIIEpQIBAAKCAQEA1DrTSXQRF8isQQfPfK3U+eFC4zBrjur+Iy15kbHUYUeSHf5S"
|
| |
+ "jXPYbHYqD1lHj4GJajC9okle9rykKFYZMmJKXLI6987wZ8vfucXo9/kwS6BDAJto"
|
| |
+ "ZpZSj5sWCQ1PI0Ce8CbkazlTp5NIkjRfhXGP8mkNKMEhdNjaYceO49ilnNCIxhpb"
|
| |
+ "eH5dH5hybmQQNmnzf+CGCCLBFmc4g3sFbWhI1ldyJzES5ZX3ahjJZYRUfnndoUM/"
|
| |
+ "TzdkHGqZhL1EeFAsv5iV65HuYbchch4vBAn8jDMmHh8G1ixUCL3uAlosfarZLLyo"
|
| |
+ "3HrZ8U/llq7rXa93PXHyI/3NL/2YP3OMxE8baQIDAQABAoIBAQCxuOUwkKqzsQ9W"
|
| |
+ "kdTWArfj3RhnKigYEX9qM+2m7TT9lbKtvUiiPc2R3k4QdmIvsXlCXLigyzJkCsqp"
|
| |
+ "IJiPEbJV98bbuAan1Rlv92TFK36fBgC15G5D4kQXD/ce828/BSFT2C3WALamEPdn"
|
| |
+ "v8Xx+Ixjokcrxrdeoy4VTcjB0q21J4C2wKP1wEPeMJnuTcySiWQBdAECCbeZ4Vsj"
|
| |
+ "cmRdcvL6z8fedRPtDW7oec+IPkYoyXPktVt8WsQPYkwEVN4hZVBneJPCcuhikYkp"
|
| |
+ "T3WGmPV0MxhUvCZ6hSG8D2mscZXRq3itXVlKJsUWfIHaAIgGomWrPuqC23rOYCdT"
|
| |
+ "5oSZmTvFAoGBAPs1FbbxDDd1fx1hisfXHFasV/sycT6ggP/eUXpBYCqVdxPQvqcA"
|
| |
+ "ktplm5j04dnaQJdHZ8TPlwtL+xlWhmhFhlCFPtVpU1HzIBkp6DkSmmu0gvA/i07Z"
|
| |
+ "pzo5Z+HRZFzruTQx6NjDtvWwiXVLwmZn2oiLeM9xSqPu55OpITifEWNjAoGBANhH"
|
| |
+ "XwV6IvnbUWojs7uiSGsXuJOdB1YCJ+UF6xu8CqdbimaVakemVO02+cgbE6jzpUpo"
|
| |
+ "krbDKOle4fIbUYHPeyB0NMidpDxTAPCGmiJz7BCS1fCxkzRgC+TICjmk5zpaD2md"
|
| |
+ "HCrtzIeHNVpTE26BAjOIbo4QqOHBXk/WPen1iC3DAoGBALsD3DSj46puCMJA2ebI"
|
| |
+ "2EoWaDGUbgZny2GxiwrvHL7XIx1XbHg7zxhUSLBorrNW7nsxJ6m3ugUo/bjxV4LN"
|
| |
+ "L59Gc27ByMvbqmvRbRcAKIJCkrB1Pirnkr2f+xx8nLEotGqNNYIawlzKnqr6SbGf"
|
| |
+ "Y2wAGWKmPyEoPLMLWLYkhfdtAoGANsFa/Tf+wuMTqZuAVXCwhOxsfnKy+MNy9jiZ"
|
| |
+ "XVwuFlDGqVIKpjkmJyhT9KVmRM/qePwgqMSgBvVOnszrxcGRmpXRBzlh6yPYiQyK"
|
| |
+ "2U4f5dJG97j9W7U1TaaXcCCfqdZDMKnmB7hMn8NLbqK5uLBQrltMIgt1tjIOfofv"
|
| |
+ "BNx0raECgYEApAvjwDJ75otKz/mvL3rUf/SNpieODBOLHFQqJmF+4hrSOniHC5jf"
|
| |
+ "f5GS5IuYtBQ1gudBYlSs9fX6T39d2avPsZjfvvSbULXi3OlzWD8sbTtvQPuCaZGI"
|
| |
+ "Df9PUWMYZ3HRwwdsYovSOkT53fG6guy+vElUEDkrpZYczROZ6GUcx70="
|
| |
+ "-----END RSA PRIVATE KEY-----")
|
| |
+ SSH_PUB_KEY = ("AAAAB3NzaC1yc2EAAAADAQABAAABAQDUOtNJdBEXyKxBB898rdT54ULjMGuO6v4jLX"
|
| |
+ "mRsdRhR5Id/lKNc9hsdioPWUePgYlqML2iSV72vKQoVhkyYkpcsjr3zvBny9+5xej3"
|
| |
+ "+TBLoEMAm2hmllKPmxYJDU8jQJ7wJuRrOVOnk0iSNF+FcY/yaQ0owSF02Nphx47j2K"
|
| |
+ "Wc0IjGGlt4fl0fmHJuZBA2afN/4IYIIsEWZziDewVtaEjWV3InMRLllfdqGMllhFR+"
|
| |
+ "ed2hQz9PN2QcapmEvUR4UCy/mJXrke5htyFyHi8ECfyMMyYeHwbWLFQIve4CWix9qt"
|
| |
+ "ksvKjcetnxT+WWrutdr3c9cfIj/c0v/Zg/c4zETxtp")
|
| |
DEF_USER = "root"
|
| |
DEF_PASSWD = "foobar"
|
| |
-
|
| |
USER_DATA = """#cloud-config
|
| |
users:
|
| |
- default
|
| |
@@ -67,232 +64,700 @@
|
| |
list: |
|
| |
{0}:{1}
|
| |
expire: False
|
| |
- """.format(DEF_USER, DEF_PASSWD, AUTH_KEY)
|
| |
-
|
| |
-
|
| |
- def main(argv):
|
| |
- parser = argparse.ArgumentParser(description="Inventory for a QCow2 test image")
|
| |
- parser.add_argument("--list", action="store_true", help="Verbose output")
|
| |
- parser.add_argument('--host', help="Get host variables")
|
| |
- parser.add_argument("subjects", nargs="*", default=shlex.split(os.environ.get("TEST_SUBJECTS", "")))
|
| |
- opts = parser.parse_args()
|
| |
-
|
| |
- try:
|
| |
- if opts.host:
|
| |
- data = inv_host(opts.host)
|
| |
- else:
|
| |
- data = inv_list(opts.subjects)
|
| |
- sys.stdout.write(json.dumps(data, indent=4, separators=(',', ': ')))
|
| |
- except RuntimeError as ex:
|
| |
- sys.stderr.write("{0}: {1}\n".format(os.path.basename(sys.argv[0]), str(ex)))
|
| |
- return 1
|
| |
-
|
| |
+ """.format(DEF_USER, DEF_PASSWD, SSH_PUB_KEY)
|
| |
+
|
| |
+ VM_IPV4_ADDR = "127.0.0.3"
|
| |
+ VM_START_TRIES = 5
|
| |
+ VM_PING_TRIES = 30
|
| |
+
|
| |
+ # Private to manage_debug() and debug(), do not use.
|
| |
+ _debug_fmt = 'DEBUG: {0}'
|
| |
+ _debug_enabled = False
|
| |
+
|
| |
+
|
| |
+ def manage_debug(enable=None, debug_fmt=None):
|
| |
+ """
|
| |
+ Change or retrieve state of debugging enablement and message format
|
| |
+ """
|
| |
+ global _debug_enabled, _debug_fmt
|
| |
+ if enable is not None:
|
| |
+ _debug_enabled = enable
|
| |
+ if debug_fmt is not None:
|
| |
+ _debug_fmt = debug_fmt
|
| |
+ return _debug_enabled
|
| |
+
|
| |
+
|
| |
+ def log(msg):
|
| |
+ """
|
| |
+ Format msg with a newline, then write to sys.stderr + flush
|
| |
+ """
|
| |
+ msgnl = '{0}\n'.format(msg)
|
| |
+ result = sys.stderr.write(msgnl)
|
| |
+ sys.stderr.flush()
|
| |
+ return result
|
| |
+
|
| |
+
|
| |
+ def debug(msg):
|
| |
+ """
|
| |
+ When enabled by manage_debug, format msg for log()
|
| |
+ """
|
| |
+ global _debug_fmt
|
| |
+ if manage_debug():
|
| |
+ return log(_debug_fmt.format(msg))
|
| |
return 0
|
| |
|
| |
|
| |
- def inv_list(subjects):
|
| |
- hosts = []
|
| |
- variables = {}
|
| |
- for subject in subjects:
|
| |
- if subject.endswith((".qcow2", ".qcow2c")):
|
| |
- host_vars = inv_host(subject)
|
| |
- if host_vars:
|
| |
- hosts.append(subject)
|
| |
- variables[subject] = host_vars
|
| |
- return {"localhost": {"hosts": hosts, "vars": {}},
|
| |
- "subjects": {"hosts": hosts, "vars": {}},
|
| |
- "_meta": {"hostvars": variables}}
|
| |
+ class InvCache(object):
|
| |
+ """
|
| |
+ Represents a single-source, on-disk cache of Ansible inventory details
|
| |
+
|
| |
+ :param cachefile_basedir: Existing directory path where persistent cache
|
| |
+ file should live. If None, ``tempfile.gettempdir()``
|
| |
+ is used.
|
| |
+ """
|
| |
+
|
| |
+ # When non-none, represents the "empty" default contents of newly created cache
|
| |
+ DEFAULT_CACHE = None
|
| |
+
|
| |
+ # When non-none, contains the base-directory for the persistent cache file
|
| |
+ basedir = None
|
| |
+
|
| |
+ # Private, do not use
|
| |
+ _singleton = None
|
| |
+ _invcache = None
|
| |
+
|
| |
+ def __new__(cls, cachefile_basedir=None):
|
| |
+ if not getattr(cls, '_singleton', None):
|
| |
+ debug("Initializing Inventory Cache")
|
| |
+ cls.reset()
|
| |
+ cls.DEFAULT_CACHE = dict(localhost=dict(hosts=[], vars={}),
|
| |
+ subjects=dict(hosts=[], vars={}),
|
| |
+ _meta=dict(hostvars={}))
|
| |
+ cls._singleton = super(InvCache, cls).__new__(cls)
|
| |
+ return cls._singleton # ``__init__()`` runs next
|
| |
+
|
| |
+ def __init__(self, cachefile_basedir=None):
|
| |
+ # Don't touch it if already set by class
|
| |
+ if cachefile_basedir and not self.basedir:
|
| |
+ self.basedir = cachefile_basedir
|
| |
+ elif not self.basedir:
|
| |
+ self.basedir = tempfile.gettempdir()
|
| |
+ try:
|
| |
+ debug("Using inventory cache file: {0}".format(self.filepath))
|
| |
+ except AttributeError:
|
| |
+ pass # don't fail when unittesting
|
| |
+ # '_io.StringIO' object has no attribute 'name'
|
| |
+
|
| |
+ def __str__(self):
|
| |
+ return "{0}\n".format(json.dumps(self(), indent=4, separators=(',', ': ')))
|
| |
+
|
| |
+ def __call__(self, new_obj=None):
|
| |
+ """
|
| |
+ Replace and/or return current cached JSON object
|
| |
+
|
| |
+ :param new_obj: When not None, replaces current cache.
|
| |
+ :returns: Current cache object or None
|
| |
+ """
|
| |
+ if new_obj:
|
| |
+ debug("Replacing persistent inventory cache file contents")
|
| |
+ self.cachefile.seek(0)
|
| |
+ self.cachefile.truncate()
|
| |
+ json.dump(new_obj, self.cachefile)
|
| |
+ return self()
|
| |
+ else:
|
| |
+ self.cachefile.seek(0)
|
| |
+ try:
|
| |
+ debug("Loading inventory cache from persistent file")
|
| |
+ loaded_cache = json.load(self.cachefile)
|
| |
+ except ValueError as xcpt:
|
| |
+ debug("Persistent cache file is empty/invalid, initializing with defaults")
|
| |
+ try:
|
| |
+ self.cachefile.truncate()
|
| |
+ loaded_cache = deepcopy(self.DEFAULT_CACHE)
|
| |
+ json.dump(loaded_cache, self.cachefile)
|
| |
+ except RecursionError:
|
| |
+ raise RuntimeError("Error loading or parsing default 'empty' cache"
|
| |
+ " after writing to disk: {0}".format(str(self.DEFAULT_CACHE)))
|
| |
+ return loaded_cache
|
| |
+
|
| |
+ @property
|
| |
+ def cachefile(self):
|
| |
+ """Represents the active file backing the cache"""
|
| |
+ if self._invcache:
|
| |
+ return self._invcache
|
| |
+ # Truncate if new, open for r/w otherwise
|
| |
+ self._invcache = open(self.filepath, 'a+')
|
| |
+ return self._invcache
|
| |
+
|
| |
+ @property
|
| |
+ def filepath(self):
|
| |
+ """Represents complete path to on-disk cache file"""
|
| |
+ if self._invcache: # Might not be open yet
|
| |
+ return self._invcache.name
|
| |
+ return os.path.join(self.basedir,
|
| |
+ self.filename)
|
| |
+
|
| |
+ @property
|
| |
+ def filename(self):
|
| |
+ """Represents the filename component of the on-disk cache file"""
|
| |
+ cls = self.__class__ # Can't be classmethod && property
|
| |
+ return "standard-inventory.cache".format(os.path.basename(sys.argv[0]))
|
| |
+
|
| |
+ @classmethod
|
| |
+ def reset(cls):
|
| |
+ """
|
| |
+ Wipe-out current cache state, except on-disk file
|
| |
+ """
|
| |
+ if cls._singleton and cls._singleton._invcache:
|
| |
+ try:
|
| |
+ cls._singleton._invcache = None
|
| |
+ except IOError:
|
| |
+ pass
|
| |
+ cls._singleton = None
|
| |
+
|
| |
+ @contextmanager
|
| |
+ def locked(self, mode=fcntl.LOCK_EX):
|
| |
+ """
|
| |
+ Context manager protecting returned cache with mode
|
| |
+
|
| |
+ :param mode: A value accepted by ``fcntl.flock()``'s ``op`` parameter.
|
| |
+ :returns: Standard Ansible inventory dictionary
|
| |
+ """
|
| |
+ debug("Acquiring Lock for persistent inventory cache file")
|
| |
+ try:
|
| |
+ fcntl.flock(self.cachefile, mode) # __enter__
|
| |
+ yield self()
|
| |
+ finally:
|
| |
+ debug("Lock released")
|
| |
+ fcntl.flock(self.cachefile, fcntl.LOCK_UN) # __exit__
|
| |
+
|
| |
+ def gethost(self, hostname):
|
| |
+ """
|
| |
+ Look up details about a host from inventory cache.
|
| |
+
|
| |
+ :param hostname: Name of host to retrieve.
|
| |
+ :returns: Tuple containing a dictionary of host variables,
|
| |
+ and a list of groups. None if host not
|
| |
+ found.
|
| |
+ """
|
| |
+ debug("Retrieving cached details about {0}.".format(hostname))
|
| |
+ with self.locked(fcntl.LOCK_SH) as inventory:
|
| |
+ groups = []
|
| |
+ for group, hostvars in inventory.items():
|
| |
+ hosts = hostvars.get("hosts", [])
|
| |
+ if hostname in hosts:
|
| |
+ groups.append(group)
|
| |
+ meta = inventory.get("_meta", dict(hostvars=dict()))
|
| |
+ hostvars = meta["hostvars"].get(hostname, None)
|
| |
+ if hostvars is not None:
|
| |
+ debug("Host found")
|
| |
+ return (hostvars, groups)
|
| |
+ else:
|
| |
+ debug("Host not found")
|
| |
+ return None
|
| |
+
|
| |
+ def addhost(self, hostname, hostvars=None, groups=('localhost', 'subjects')):
|
| |
+ """
|
| |
+ Update cache, adding hostname with hostvars to specified groups.
|
| |
+
|
| |
+ :param hostname: An Ansible inventory-hostname (may not be actual hostname).
|
| |
+ :param hostvars: A dictionary of host variables to add, or None
|
| |
+ :param groups: A list of groups for the host to join.
|
| |
+ :returns: Tuple containing a dictionary of host variables, and a list of groups.
|
| |
+ """
|
| |
+ debug("Adding host {0} to cache".format(hostname))
|
| |
+ if not groups:
|
| |
+ groups = ('all',)
|
| |
+ if not hostvars:
|
| |
+ hostvars = {}
|
| |
+ with self.locked(fcntl.LOCK_EX) as inventory:
|
| |
+ meta = inventory.get("_meta", dict(hostvars=dict()))
|
| |
+ meta["hostvars"][hostname] = hostvars
|
| |
+ for group in groups:
|
| |
+ inv_group = inventory.get(group, dict(hosts=[], vars=dict()))
|
| |
+ inv_group["hosts"].append(hostname)
|
| |
+ # Update and write cache to disk
|
| |
+ self(inventory)
|
| |
+ return self.gethost(hostname)
|
| |
+
|
| |
+ def delhost(self, hostname, keep_empty=False):
|
| |
+ """
|
| |
+ Remove hostname from inventory, return tuple of host vars. dict and group list
|
| |
+
|
| |
+ :param hostname: Ansible hostname to remove from inventory.
|
| |
+ :param keep_empty: If False, remove cache file & reset if no hosts remain in cache.
|
| |
+ :returns: Tuple containing a former dictionary of host variables, and a list of
|
| |
+ groups or None
|
| |
+ """
|
| |
+ debug("Deleting host {0} from cache".format(hostname))
|
| |
+ with self.locked(fcntl.LOCK_EX) as inventory:
|
| |
+ hostvars_groups = self.gethost(hostname)
|
| |
+ if hostvars_groups:
|
| |
+ hostvars, groups = hostvars_groups
|
| |
+ meta = inventory.get("_meta", dict(hostvars=dict()))
|
| |
+ if hostname in meta["hostvars"]:
|
| |
+ del meta["hostvars"][hostname]
|
| |
+ for group in groups:
|
| |
+ inv_group = inventory.get(group, dict(hosts=[], vars=dict()))
|
| |
+ if hostname in inv_group["hosts"]:
|
| |
+ inv_group["hosts"].remove(hostname)
|
| |
+ # Write out to disk
|
| |
+ self(inventory)
|
| |
+ conditions = [not keep_empty,
|
| |
+ not hostvars_groups or not meta["hostvars"]]
|
| |
+ if all(conditions): # No more hosts
|
| |
+ debug("Inventory cache is empty, removing file.")
|
| |
+ self.reset()
|
| |
+ try:
|
| |
+ os.unlink(self.filepath)
|
| |
+ except FileNotFoundError:
|
| |
+ pass
|
| |
+ else:
|
| |
+ return None
|
| |
+ return hostvars_groups
|
| |
+
|
| |
+ @staticmethod
|
| |
+ def make_hostvars(priv_key_file, port, host=VM_IPV4_ADDR,
|
| |
+ user=DEF_USER, passwd=DEF_PASSWD):
|
| |
+ """Return dictionary of standard/boiler-plate hostvars"""
|
| |
+
|
| |
+ return dict(ansible_ssh_private_key_file=str(priv_key_file),
|
| |
+ ansible_port=str(port),
|
| |
+ ansible_host=str(host),
|
| |
+ ansible_user=str(user),
|
| |
+ ansible_ssh_pass=str(passwd),
|
| |
+ ansible_ssh_common_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no")
|
| |
+
|
| |
+ def str_hostvars(self, hostname):
|
| |
+ hostvars, groups = self.gethost(hostname)
|
| |
+ if hostvars == groups == None:
|
| |
+ raise ValueError("Host '{0}' not found in cache file"
|
| |
+ " '{1}'".format(hostname, self.cachefile.name))
|
| |
+ del groups # not used
|
| |
+ return "{0}\n".format(json.dumps(hostvars, indent=4, separators=(',', ': ')))
|
| |
+
|
| |
+
|
| |
+ def try_replace_stderr_devtty():
|
| |
+ """
|
| |
+ Duplicate the controling terminal as if it were stderr to make debugging easier
|
| |
+ """
|
| |
+ try:
|
| |
+ tty = os.open("/dev/tty", os.O_WRONLY)
|
| |
+ os.dup2(tty, 2)
|
| |
+ except OSError:
|
| |
+ pass
|
| |
|
| |
|
| |
- def start_qemu(image, cloudinit, log, portrange=(2222, 5555)):
|
| |
+ def main(argv=None, environ=None):
|
| |
+ if argv is None: # Makes unittesting easier
|
| |
+ argv = sys.argv
|
| |
+ if environ is None:
|
| |
+ environ = os.environ # Side-effects: this isn't a dumb-dictionary
|
| |
+ test_debug_env = environ.get("TEST_DEBUG", "0")[0].lower() in ['t','y','1']
|
| |
+
|
| |
+ parser = argparse.ArgumentParser(description="Inventory for a QCow2 test image",
|
| |
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
| |
+
|
| |
+ # Ansible inventory 'script' plugin's interface.
|
| |
+ # ref: http://docs.ansible.com/ansible/devel/plugins/inventory/script.html
|
| |
+ parser.add_argument("--list", action="store_true", default=True,
|
| |
+ help="Get variables for entire inventory")
|
| |
+
|
| |
+ # This is not really needed since --list provides '_meta'.
|
| |
+ # It's implemented (below) and functional, for nostalga sake.
|
| |
+ parser.add_argument('--host', default=None, help="Get variables for a single host")
|
| |
+
|
| |
+ # Auxiliary arguments (not used by Ansible)
|
| |
+ parser.add_argument('--debug', action="store_true", default=test_debug_env,
|
| |
+ help="Disable qemu process cleanup & inventory cache removal."
|
| |
+ " As an alternative, set $TEST_DEBUG == true, yes, or 1")
|
| |
+ parser.add_argument("subjects", nargs="*", default=[],
|
| |
+ help=("With --list, an optional list of file"
|
| |
+ " path(s) to qcow2 image(s) - in addition to any"
|
| |
+ " specified in $TEST_SUBJECTS. If --host, "
|
| |
+ " a single path to a qcow2 image is required."))
|
| |
+
|
| |
+ opts = parser.parse_args(args=argv[1:])
|
| |
+ if opts.debug:
|
| |
+ manage_debug(True)
|
| |
+ debug('# Debugging enabled\n')
|
| |
+ try_replace_stderr_devtty()
|
| |
+
|
| |
+ # Load / Create cache
|
| |
+ artifacts = artifacts_dirpath(environ)
|
| |
+ invcache = InvCache(artifacts)
|
| |
+ if opts.host:
|
| |
+ opts.list = False
|
| |
+ hostname = os.path.basename(opts.host)
|
| |
+ debug("Operating in --host mode for {0}".format(hostname))
|
| |
+
|
| |
+ if not invcache.gethost(hostname): # Enforce unique hostnames
|
| |
+ try_create_host(opts, opts.host, environ)
|
| |
+ try:
|
| |
+ sys.stdout.write(invcache.str_hostvars(hostname))
|
| |
+ except TypeError: # try_create_host() failed
|
| |
+ sys.stdout.write('{}\n')
|
| |
+
|
| |
+ else: # opts.list (subjects is optional)
|
| |
+ opts.host = False
|
| |
+ # De-duplicate paths with $TEST_SUBJECTS
|
| |
+ subjects = set(s for s in shlex.split(environ.get("TEST_SUBJECTS", "")) if s.strip())
|
| |
+ subjects |= set(s for s in opts.subjects if s.strip())
|
| |
+ subjects -= set([None, '']) # Just in case
|
| |
+
|
| |
+ # Sets are un-ordered, coorespondence is needed between paths and hostnames
|
| |
+ subjects = tuple(subjects)
|
| |
+ # Catch null/empty paths/subjects while forming dict
|
| |
+ subject_hostname = dict(zip(subjects,
|
| |
+ tuple(os.path.basename(subject.strip())
|
| |
+ for subject in subjects)))
|
| |
+ if len(subject_hostname) != len(subjects):
|
| |
+ # Fatal, do not allow clashes in cache over hostnames, differing paths OK
|
| |
+ log("Error: One or more subjects from parameters"
|
| |
+ " or $TEST_SUBJECTS found with clashing"
|
| |
+ " hostnames (basenames):"
|
| |
+ " {0}\n".format(subjects))
|
| |
+ parser.print_help() # side-effect: exits non-zero
|
| |
+ sys.exit(1) # Required for unitesting
|
| |
+
|
| |
+ debug("Operating in --list mode for subjects: {0}".format(subject_hostname.keys()))
|
| |
+ for subject, hostname in subject_hostname.items():
|
| |
+ if not invcache.gethost(hostname): # doesn't already exist
|
| |
+ try_create_host(opts, subject, environ)
|
| |
+
|
| |
+ sys.stdout.write(str(invcache))
|
| |
+
|
| |
+
|
| |
+ def start_qemu(image_path, cloudinit, console_log, portrange=(2222, 5555)):
|
| |
+ """
|
| |
+ Start QEMU, return tuple of subprocess instance, ssh and monitor port numbers.
|
| |
+ """
|
| |
+ ioxcept = None
|
| |
for port in range(*portrange):
|
| |
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| |
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
| |
+ ssh_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| |
+ ssh_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
| |
+ mon_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
| |
+ mon_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
| |
+ cmd = ["/usr/bin/qemu-system-x86_64", "-m", "1024", image_path,
|
| |
+ "-cpu", "host", "-smp", "{}".format(multiprocessing.cpu_count()),
|
| |
+ "-enable-kvm", "-snapshot", "-cdrom", cloudinit,
|
| |
+ "-net", "nic,model=virtio", "-net",
|
| |
+ "user,hostfwd=tcp:{0}:{1}-:22".format(VM_IPV4_ADDR, port),
|
| |
+ "-device", "isa-serial,chardev=pts2", "-chardev",
|
| |
+ "file,id=pts2,path=" + console_log,
|
| |
+ # Monitor port allows rebooting VM w/o causing qemu-kvm process to exit
|
| |
+ "-chardev",
|
| |
+ "socket,host={0},port={1},id=mon0,server,nowait"
|
| |
+ "".format(VM_IPV4_ADDR, port + 1),
|
| |
+ "-mon", "chardev=mon0,mode=readline",
|
| |
+ "-display", "none"]
|
| |
try:
|
| |
- sock.bind(("127.0.0.3", port))
|
| |
- except IOError:
|
| |
+ ssh_sock.bind((VM_IPV4_ADDR, port))
|
| |
+ mon_sock.bind((VM_IPV4_ADDR, port + 1))
|
| |
+ # If launching qemu fails, having this means we get to see why.
|
| |
+ with open(console_log, 'a+') as console_logf:
|
| |
+ console_logf.write("Qemu command-line: {0}\n".format(' '.join(cmd)))
|
| |
+ console_logf.flush()
|
| |
+ return subprocess.Popen(cmd, stdout=console_logf,
|
| |
+ stderr=subprocess.STDOUT), port, port + 1
|
| |
+ except IOError as ioxcept:
|
| |
pass
|
| |
- else:
|
| |
- break
|
| |
- finally:
|
| |
- sock.close()
|
| |
else:
|
| |
- raise RuntimeError("unable to find free local port to map SSH to")
|
| |
+ msgfmt = "Error launching qemu process: {0}: {1}"
|
| |
+ if ioxcept:
|
| |
+ raise RuntimeError(msgfmt.format(ioxcept.__class__.__name__, ioxcept))
|
| |
+ else:
|
| |
+ raise RuntimeError(msgfmt.format("Could not allocate a ssh & monitor port from range",
|
| |
+ portrange))
|
| |
|
| |
- # Use -cpu host and -smp by default.
|
| |
- # virtio-rng-pci: https://wiki.qemu.org/Features/VirtIORNG
|
| |
- return subprocess.Popen(["/usr/bin/qemu-system-x86_64",
|
| |
- "-cpu", "host", "-smp", "{}".format(multiprocessing.cpu_count()),
|
| |
- "-m", "1024", image, "-enable-kvm", "-snapshot", "-cdrom", cloudinit,
|
| |
- "-net", "nic,model=virtio", "-net", "user,hostfwd=tcp:127.0.0.3:{0}-:22".format(port),
|
| |
- "-device", "virtio-rng-pci", "-rtc", "base=utc",
|
| |
- "-device", "isa-serial,chardev=pts2", "-chardev", "file,id=pts2,path=" + log,
|
| |
- "-display", "none"], stdout=open(log, 'a'), stderr=subprocess.STDOUT), port
|
| |
|
| |
+ class NoQcow2Error(ValueError):
|
| |
+ """A non-fatal exception if --list, fatal if --host"""
|
| |
+ pass
|
| |
|
| |
- def inv_host(image):
|
| |
- null = open(os.devnull, 'w')
|
| |
|
| |
+ def artifacts_dirpath(environ=None):
|
| |
+ """Return complete path to directory where testing artifacts should be stored"""
|
| |
+ if environ is None:
|
| |
+ environ = os.environ # Side-effects: this isn't a dumb-dictionary
|
| |
try:
|
| |
- tty = os.open("/dev/tty", os.O_WRONLY)
|
| |
- os.dup2(tty, 2)
|
| |
- except OSError:
|
| |
- tty = None
|
| |
-
|
| |
- # A directory for temporary stuff
|
| |
- directory = tempfile.mkdtemp(prefix="inventory-cloud")
|
| |
- identity = os.path.join(directory, "identity")
|
| |
- with open(identity, 'w') as f:
|
| |
- f.write(IDENTITY)
|
| |
- os.chmod(identity, 0o600)
|
| |
- metadata = os.path.join(directory, "meta-data")
|
| |
+ artifacts = environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))
|
| |
+ os.makedirs(artifacts)
|
| |
+ except OSError as exc:
|
| |
+ if exc.errno != errno.EEXIST or not os.path.isdir(artifacts):
|
| |
+ raise
|
| |
+ # Behave like a singleton: Update environ w/ proper path
|
| |
+ environ["TEST_ARTIFACTS"] = artifacts
|
| |
+ return artifacts
|
| |
+
|
| |
+
|
| |
+ def try_create_host(opts, image_path, environ):
|
| |
+ try:
|
| |
+ create_host(opts, image_path, environ)
|
| |
+ except NoQcow2Error as xcept: # Not fatal by itself
|
| |
+ log("Warning: {0}".format(xcept))
|
| |
+ pass # Presume another inventory script will handle it
|
| |
+
|
| |
+
|
| |
+ def create_host(opts, image_path, environ):
|
| |
+ image_path = image_path.strip()
|
| |
+ # directory-separators in hostnames could be bad
|
| |
+ hostname = os.path.basename(image_path)
|
| |
+ log("Creating host {0}".format(hostname))
|
| |
+ if not image_path.endswith(".qcow2") and not image_path.endswith(".qcow2c"):
|
| |
+ # Ignored in main(), printed if --debug used, required for unittesting
|
| |
+ raise NoQcow2Error("Subject '{0}' / '{1}',"
|
| |
+ " image does not end in '.qcow2' or '.qcow2c'."
|
| |
+ "".format(hostname, image_path))
|
| |
+
|
| |
+ # temporary until subject destroyed, allow secondary debugging w/o artifacts-clutter
|
| |
+ temp_dir = tempfile.mkdtemp(prefix=".{0}_".format(hostname),
|
| |
+ suffix='.temp', dir=artifacts_dirpath())
|
| |
+ ssh_priv_key_file = os.path.join(temp_dir, "ssh_priv_key")
|
| |
+ debug("Using ssh key {0}".format(ssh_priv_key_file))
|
| |
+ with open(ssh_priv_key_file, 'w') as f:
|
| |
+ f.write(SSH_PRIV_KEY)
|
| |
+ os.chmod(ssh_priv_key_file, 0o600)
|
| |
+ metadata = os.path.join(temp_dir, "meta-data")
|
| |
with open(metadata, 'w') as f:
|
| |
f.write("")
|
| |
- userdata = os.path.join(directory, "user-data")
|
| |
+ userdata = os.path.join(temp_dir, "user-data")
|
| |
with open(userdata, 'w') as f:
|
| |
f.write(USER_DATA)
|
| |
|
| |
# Create our cloud init so we can log in
|
| |
- cloudinit = os.path.join(directory, "cloud-init.iso")
|
| |
+ null = open(os.devnull, 'w')
|
| |
+ cloudinit = os.path.join(temp_dir, "cloud-init.iso")
|
| |
subprocess.check_call(["/usr/bin/genisoimage", "-input-charset", "utf-8",
|
| |
"-volid", "cidata", "-joliet", "-rock", "-quiet",
|
| |
"-output", cloudinit, userdata, metadata], stdout=null)
|
| |
-
|
| |
- # Determine if virtual machine should be kept available for diagnosis after completion
|
| |
- try:
|
| |
- diagnose = distutils.util.strtobool(os.getenv("TEST_DEBUG", "0"))
|
| |
- except ValueError:
|
| |
- diagnose = 0
|
| |
-
|
| |
- sys.stderr.write("Launching virtual machine for {0}\n".format(image))
|
| |
-
|
| |
+ debug("Using cloud-init {0}".format(cloudinit))
|
| |
# And launch the actual VM
|
| |
- artifacts = os.environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))
|
| |
- try:
|
| |
- os.makedirs(artifacts)
|
| |
- except OSError as exc:
|
| |
- if exc.errno != errno.EEXIST or not os.path.isdir(artifacts):
|
| |
- raise
|
| |
- log = os.path.join(artifacts, "{0}.log".format(os.path.basename(image)))
|
| |
-
|
| |
+ console_log = os.path.join(artifacts_dirpath(environ), "{0}.log".format(hostname))
|
| |
+ debug("Using qemu log {0}".format(log))
|
| |
proc = None # for failure detection
|
| |
cpe = None # for exception scoping
|
| |
- for _ in range(0, 5):
|
| |
+ for tries in range(0, VM_START_TRIES):
|
| |
+ log(" Launching {0}, attempt #{1}/{2}"
|
| |
+ "".format(hostname, tries+1, VM_START_TRIES))
|
| |
try:
|
| |
- proc, port = start_qemu(image, cloudinit, log)
|
| |
+ proc, ssh_port, mon_port = start_qemu(image_path, cloudinit, console_log)
|
| |
break
|
| |
except subprocess.CalledProcessError as cpe:
|
| |
time.sleep(1)
|
| |
continue
|
| |
- if proc is None:
|
| |
- raise RuntimeError("Could not launch VM for qcow2 image"
|
| |
- " '{0}':{1}".format(image, cpe.output))
|
| |
-
|
| |
- for _ in range(0, 30):
|
| |
+ if proc.poll():
|
| |
+ raise RuntimeError("Could not launch VM, exited:{0}: {1}"
|
| |
+ "".format(proc.returncode, cpe.output))
|
| |
+
|
| |
+ # Ansible ping signals VM up, needs temporary inventory
|
| |
+ hostvars = InvCache.make_hostvars(ssh_priv_key_file, int(ssh_port))
|
| |
+ hostvars['qemu_monitor_port'] = mon_port
|
| |
+ inv_vars_sfx = " ".join(tuple("{0}='{1}'".format(k,v) for k,v in hostvars.items()))
|
| |
+ inventory_filepath = os.path.join(temp_dir, "inventory.ini")
|
| |
+ with open(inventory_filepath, "w") as inventory_file:
|
| |
+ # Exclude dictionary prefix/suffix in JSON
|
| |
+ inventory_file.write("{0} {1}\n".format(hostname, inv_vars_sfx))
|
| |
+
|
| |
+ ping = [
|
| |
+ "/usr/bin/ansible",
|
| |
+ "--inventory",
|
| |
+ inventory_filepath,
|
| |
+ hostname,
|
| |
+ "--module-name",
|
| |
+ "raw",
|
| |
+ "--args",
|
| |
+ "/bin/true"
|
| |
+ ]
|
| |
+
|
| |
+ for tries in range(0, VM_PING_TRIES):
|
| |
+ log(" Contacting {0}, attempt #{1}/{2}"
|
| |
+ "".format(hostname, tries+1, VM_PING_TRIES))
|
| |
try:
|
| |
- # The variables
|
| |
- variables = {
|
| |
- "ansible_port": "{0}".format(port),
|
| |
- "ansible_host": "127.0.0.3",
|
| |
- "ansible_user": "root",
|
| |
- "ansible_ssh_pass": "foobar",
|
| |
- "ansible_ssh_private_key_file": identity,
|
| |
- "ansible_ssh_common_args": "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
|
| |
- }
|
| |
-
|
| |
- # Write out a handy inventory file, for our use and for debugging
|
| |
- args = " ".join(["{0}='{1}'".format(*item) for item in variables.items()])
|
| |
- inventory = os.path.join(directory, "inventory")
|
| |
- with open(inventory, "w") as f:
|
| |
- f.write("[subjects]\nlocalhost {0}\n".format(args))
|
| |
-
|
| |
- # Wait for ssh to come up
|
| |
- ping = [
|
| |
- "/usr/bin/ansible",
|
| |
- "--inventory",
|
| |
- inventory,
|
| |
- "localhost",
|
| |
- "--module-name",
|
| |
- "raw",
|
| |
- "--args",
|
| |
- "/bin/true"
|
| |
- ]
|
| |
-
|
| |
- (pid, _) = os.waitpid(proc.pid, os.WNOHANG)
|
| |
- if pid != 0:
|
| |
- raise RuntimeError("qemu failed to launch qcow2 image: {0}".format(image))
|
| |
- subprocess.check_call(ping, stdout=null, stderr=null)
|
| |
+ if proc.poll():
|
| |
+ raise RuntimeError("Error launching '{0}' for '{1}',"
|
| |
+ " qemu exited {2}.\n".format(hostname, image_path,
|
| |
+ proc.returncode))
|
| |
+ subprocess.check_call(ping, stdout=null, stderr=null, close_fds=True)
|
| |
break
|
| |
except subprocess.CalledProcessError:
|
| |
time.sleep(3)
|
| |
+ else: # tries >= 30
|
| |
+ raise RuntimeError("Error launching VM '{0}' for '{1}',"
|
| |
+ " excessive Ansible ping attempts.\n".format(hostname, image_path))
|
| |
+
|
| |
+ invcache = InvCache()
|
| |
+ invcache.addhost(hostname, hostvars)
|
| |
+
|
| |
+ debug("Access host with:")
|
| |
+ debug(" ssh -p {0} -o StrictHostKeyChecking=no"
|
| |
+ " -o UserKnownHostsFile=/dev/null {2}@{1}"
|
| |
+ "".format(ssh_port, VM_IPV4_ADDR, DEF_USER))
|
| |
+ debug(" {0}'s password: {1}".format(DEF_USER, DEF_PASSWD))
|
| |
+ debug("Access host's monitor with:")
|
| |
+ debug(" telnet {0} {1}".format(VM_IPV4_ADDR, mon_port))
|
| |
+ debug("")
|
| |
+ # subject_watcher() will not kill QEMU when --debug was specified
|
| |
+ debug("kill {0} # when finished\n".format(proc.pid))
|
| |
+ # Even if debugging, the inventory cache file needs grooming on qemu exit
|
| |
+ subject_watcher(proc.pid, hostname, temp_dir, environ, opts.debug)
|
| |
+
|
| |
+
|
| |
+ def monitor(pid_or_filepath, qemu_pid):
|
| |
+ """Helper for subject_watcher() to monitor processes"""
|
| |
+
|
| |
+ if isinstance(pid_or_filepath, int):
|
| |
+ expected_errno = (errno.ESRCH, )
|
| |
+ lockonfile = None
|
| |
+ parent_pid = pid_or_filepath
|
| |
+ mon_what = "qemu parent-pid {2}"
|
| |
else:
|
| |
- # Kill the qemu process
|
| |
+ expected_errno = (errno.EACCES, errno.ENOENT, # reg. file
|
| |
+ errno.EEXIST, # pipe/fifo
|
| |
+ errno.ECONNRESET, # socket
|
| |
+ errno.ECONNABORTED, # socket
|
| |
+ errno.ECONNREFUSED) # socket
|
| |
+ lockonfile = pid_or_filepath
|
| |
+ parent_pid = None
|
| |
+ mon_what = "$LOCK_ON_FILE {2}"
|
| |
+
|
| |
+ mon_msg = 'QEMU PID {1} and ' + mon_what + ' at {0}'
|
| |
+ monitor_signaled = False
|
| |
+ qemu_died = False
|
| |
+ # Monitor pid_or_filepath, and qemu-process
|
| |
+ while all([not monitor_signaled, not qemu_died]):
|
| |
+ log(mon_msg.format(int(time.time()), qemu_pid, pid_or_filepath))
|
| |
try:
|
| |
- os.kill(proc.pid, signal.SIGTERM)
|
| |
- except OSError:
|
| |
- pass
|
| |
- raise RuntimeError("could not access launched qcow2 image: {0}".format(image))
|
| |
-
|
| |
- # Process of our parent
|
| |
- ppid = os.getppid()
|
| |
-
|
| |
- child = os.fork()
|
| |
- if child:
|
| |
- return variables
|
| |
+ monitor_signaled = True # If exception thrown
|
| |
+ if parent_pid:
|
| |
+ os.kill(parent_pid, 0) # Exception if process doesn't exist
|
| |
+ else: # lockonfile
|
| |
+ open(lockonfile) # Exception if read not allowed
|
| |
+ monitor_signaled = False # No exception thrown
|
| |
+
|
| |
+ # Monitoring unnecessary if qemu process doesn't exist
|
| |
+ qemu_died = True # If exception thrown
|
| |
+ os.kill(qemu_pid, 0)
|
| |
+ qemu_died = False # No exception thrown
|
| |
+ except (IOError, OSError) as xcpt:
|
| |
+ if xcpt.errno not in expected_errno:
|
| |
+ debug("Unable to handle {0}: {1}".format(xcpt.__class__.__name__, xcpt))
|
| |
+ raise
|
| |
+ continue # exit loop immediatly
|
| |
+ time.sleep(10) # Don't busy-wait
|
| |
+
|
| |
+ return monitor_signaled, qemu_died
|
| |
+
|
| |
+
|
| |
+ def subject_watcher(qemu_pid, hostname, temp_dir, environ, debugging=True):
|
| |
+ """
|
| |
+ Monitor process that called this script, kill off VMs and cleanup on exit.
|
| |
+ """
|
| |
+ # Parent of our pid is Ansible process
|
| |
+ parent_pid = os.getppid()
|
| |
+
|
| |
+ if 1 in (parent_pid, qemu_pid):
|
| |
+ raise RuntimeError("Cowardly refusing to monitor pid 1")
|
| |
+
|
| |
+ # Needed at end of watcher process
|
| |
+ artifacts = artifacts_dirpath(environ)
|
| |
+ lockonfile = environ.get('LOCK_ON_FILE', None)
|
| |
+
|
| |
+ info_msg_pfx = ("Watching VM {0} qemu pid {1} from watcher"
|
| |
+ "".format(hostname, qemu_pid))
|
| |
+ if lockonfile:
|
| |
+ info_msg_fmt = ("{0} pid {1} for $LOCK_ON_FILE {2}"
|
| |
+ "".format(info_msg_pfx, "{0}", lockonfile))
|
| |
+ mon_args = (lockonfile, qemu_pid)
|
| |
+ lockonfile = os.path.realpath(lockonfile)
|
| |
+ try:
|
| |
+ open(lockonfile)
|
| |
+ except IOError as xcpt:
|
| |
+ log("WARNING: Can't open $LOCK_ON_FILE {0}: {1}: {2}.\n"
|
| |
+ "".format(lockonfile, xcpt.__class__.__name__, xcpt))
|
| |
+ if debugging:
|
| |
+ log("WARNING: Qemu process will be killed shortly after creation.\n")
|
| |
+ else:
|
| |
+ info_msg_fmt = ("{0} pid {1} for parent pid {2}"
|
| |
+ "".format(info_msg_pfx, "{0}", parent_pid))
|
| |
+ mon_args = (parent_pid, qemu_pid)
|
| |
+
|
| |
+ watcher_pid = os.fork()
|
| |
+ if watcher_pid: # parent
|
| |
+ log(info_msg_fmt.format(watcher_pid))
|
| |
+ return
|
| |
+ else:
|
| |
+ watcher_pid = os.getpid()
|
| |
|
| |
- # Daemonize and watch the processes
|
| |
+ # Harden child process
|
| |
os.chdir("/")
|
| |
os.setsid()
|
| |
os.umask(0)
|
| |
-
|
| |
- if tty is None:
|
| |
- tty = null.fileno()
|
| |
-
|
| |
- # Duplicate standard input to standard output and standard error.
|
| |
- os.dup2(null.fileno(), 0)
|
| |
- os.dup2(tty, 1)
|
| |
- os.dup2(tty, 2)
|
| |
-
|
| |
- # alternatively, lock on a file
|
| |
- lock_file = os.environ.get("LOCK_ON_FILE", None)
|
| |
- while True:
|
| |
- time.sleep(3)
|
| |
-
|
| |
- if lock_file:
|
| |
- if not os.path.exists(lock_file):
|
| |
- sys.stderr.write("Lock file is gone.")
|
| |
- break
|
| |
- else:
|
| |
- # Now wait for the parent process to go away, then kill the VM
|
| |
+ InvCache.reset() # don't hold onto cachefile object
|
| |
+ for _file in (sys.stdin, sys.stdout, sys.stderr):
|
| |
+ if _file:
|
| |
try:
|
| |
- os.kill(ppid, 0)
|
| |
- os.kill(proc.pid, 0)
|
| |
- except OSError:
|
| |
- sys.stderr.write("Either parent process or VM process is gone.")
|
| |
- break # Either of the processes no longer exist
|
| |
-
|
| |
- if diagnose:
|
| |
- sys.stderr.write("\n")
|
| |
- sys.stderr.write("DIAGNOSE: ssh -p {0} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "
|
| |
- "root@{1} # password: {2}\n".format(port, "127.0.0.3", "foobar"))
|
| |
- sys.stderr.write("DIAGNOSE: export ANSIBLE_INVENTORY={0}\n".format(inventory))
|
| |
- sys.stderr.write("DIAGNOSE: kill {0} # when finished\n".format(os.getpid()))
|
| |
-
|
| |
- def _signal_handler(*args):
|
| |
- sys.stderr.write("\nDIAGNOSE ending...\n")
|
| |
-
|
| |
- signal.signal(signal.SIGTERM, _signal_handler)
|
| |
- signal.pause()
|
| |
-
|
| |
- # Kill the qemu process
|
| |
- try:
|
| |
- os.kill(proc.pid, signal.SIGTERM)
|
| |
- except OSError:
|
| |
- pass
|
| |
-
|
| |
- shutil.rmtree(directory)
|
| |
+ _file.close()
|
| |
+ except IOError:
|
| |
+ pass
|
| |
+ os.closerange(0, 255) # Be certain nothing is open, ignore any errors
|
| |
+
|
| |
+ # Make multi-process debugging easier
|
| |
+ logfn = '{0}_watcher.log'.format(os.path.basename(sys.argv[0]).split('.')[0])
|
| |
+ sys.stderr = open(os.path.join(artifacts_dirpath(environ), logfn), 'a')
|
| |
+ os.dup2(sys.stderr.fileno(), 2)
|
| |
+ os.dup2(sys.stderr.fileno(), 1)
|
| |
+ manage_debug(debugging, "WATCHER {0}: {1}".format(watcher_pid, '{0}'))
|
| |
+ log(info_msg_fmt.format(watcher_pid))
|
| |
+ invcache = InvCache(artifacts) # re-opens cachefile
|
| |
+
|
| |
+ # Wait for monitor to signal exit time (blocking)
|
| |
+ monitor_signaled, qemu_died = monitor(*mon_args)
|
| |
+
|
| |
+ oops_msg = ("Monitor function exited unexpectedly "
|
| |
+ "monitor_signaled={0} and qemu_died={1}"
|
| |
+ "".format(monitor_signaled, qemu_died))
|
| |
+
|
| |
+ if debugging:
|
| |
+ msg_sfx = "NOT removing {0}".format(temp_dir)
|
| |
+ if monitor_signaled:
|
| |
+ debug("NOT killing qemu and {0}".format(msg_sfx))
|
| |
+ elif qemu_died:
|
| |
+ debug("Qemu process died, {0}"
|
| |
+ "".format(msg_sfx))
|
| |
+ else:
|
| |
+ debug(oops_msg)
|
| |
+ debug(msg_sfx)
|
| |
+ # Don't remove cache if empty
|
| |
+ invcache.delhost(hostname, keep_empty=True)
|
| |
+ debug("Removed host {0} from inventory cache.".format(hostname))
|
| |
+ else: # debugging==false
|
| |
+ msg_sfx = "removing {0}".format(temp_dir)
|
| |
+ try:
|
| |
+ if monitor_signaled:
|
| |
+ debug("Killing qemu process and {0}".format(msg_sfx))
|
| |
+ os.kill(qemu_pid, signal.SIGTERM) # Ensure it's actually dead
|
| |
+ elif qemu_died:
|
| |
+ debug("Qemu process died, {0}".format(msg_sfx))
|
| |
+ else:
|
| |
+ debug(oops_msg)
|
| |
+ debug(msg_sfx)
|
| |
+
|
| |
+ except OSError as xcpt:
|
| |
+ # Don't care if qemu_pid doesn't exist, that was the goal.
|
| |
+ if xcpt.errno != errno.ESRCH:
|
| |
+ debug("Qemu process dead, but unable to handle {0}: {1}"
|
| |
+ "".format(xcpt.__class__.__name__, xcpt))
|
| |
+ raise
|
| |
+ finally:
|
| |
+ shutil.rmtree(temp_dir, ignore_errors=True)
|
| |
+ invcache.delhost(hostname, keep_empty=False)
|
| |
+ log("Watcher {0} exiting".format(watcher_pid))
|
| |
+ if debugging:
|
| |
+ log("Remember to manually kill {0} if necessary".format(qemu_pid))
|
| |
+ log("FIXME: ignore 'IOError: [Errno 9] Bad file descriptor' coming next...")
|
| |
sys.exit(0)
|
| |
|
| |
|
| |
if __name__ == '__main__':
|
| |
- sys.exit(main(sys.argv))
|
| |
+ main(sys.argv)
|
| |
Fix standard-inventory-qcow2 standards compliance