#39 Fix standard-inventory-qcow2 standards compliance
Opened 4 years ago by cevich. Modified 4 years ago
cevich/standard-test-roles fix_qcow2  into  master

file added
+69
@@ -0,0 +1,69 @@ 

+ # Backup files

+ .*~

+ *~

+ 

+ # Vim swap files

+ .*.swp

+ 

+ # Ansible retry files

+ *.retry

+ 

+ # Byte-compiled / optimized / DLL files

+ __pycache__

+ *.py[cod]

+ *$py.class

+ 

+ # C extensions

+ *.so

+ 

+ # Distribution / packaging

+ .Python

+ env/

+ build/

+ develop-eggs/

+ dist/

+ downloads/

+ eggs/

+ .eggs/

+ lib/

+ lib64/

+ parts/

+ sdist/

+ var/

+ wheels/

+ *.egg-info/

+ .installed.cfg

+ *.egg

+ 

+ # PyInstaller

+ #  Usually these files are written by a python script from a template

+ #  before PyInstaller builds the exe, so as to inject date/other infos into it.

+ *.manifest

+ *.spec

+ 

+ # Installer logs

+ pip-log.txt

+ pip-delete-this-directory.txt

+ 

+ # Unit test / coverage reports

+ htmlcov/

+ .tox/

+ .coverage

+ .coverage.*

+ .cache

+ nosetests.xml

+ coverage.xml

+ *.cover

+ .hypothesis/

+ 

+ # Translations

+ *.mo

+ *.pot

+ 

+ # virtualenv

+ .venv

+ venv/

+ ENV/

+ 

+ # Default location if running from repo-clone

+ artifacts/

file modified
+689 -224
@@ -9,53 +9,50 @@ 

  import signal

  import multiprocessing

  import socket

+ from contextlib import contextmanager

+ import fcntl

  import subprocess

  import sys

  import tempfile

  import time

- import distutils.util

- 

- IDENTITY = """

- -----BEGIN RSA PRIVATE KEY-----

- MIIEpQIBAAKCAQEA1DrTSXQRF8isQQfPfK3U+eFC4zBrjur+Iy15kbHUYUeSHf5S

- jXPYbHYqD1lHj4GJajC9okle9rykKFYZMmJKXLI6987wZ8vfucXo9/kwS6BDAJto

- ZpZSj5sWCQ1PI0Ce8CbkazlTp5NIkjRfhXGP8mkNKMEhdNjaYceO49ilnNCIxhpb

- eH5dH5hybmQQNmnzf+CGCCLBFmc4g3sFbWhI1ldyJzES5ZX3ahjJZYRUfnndoUM/

- TzdkHGqZhL1EeFAsv5iV65HuYbchch4vBAn8jDMmHh8G1ixUCL3uAlosfarZLLyo

- 3HrZ8U/llq7rXa93PXHyI/3NL/2YP3OMxE8baQIDAQABAoIBAQCxuOUwkKqzsQ9W

- kdTWArfj3RhnKigYEX9qM+2m7TT9lbKtvUiiPc2R3k4QdmIvsXlCXLigyzJkCsqp

- IJiPEbJV98bbuAan1Rlv92TFK36fBgC15G5D4kQXD/ce828/BSFT2C3WALamEPdn

- v8Xx+Ixjokcrxrdeoy4VTcjB0q21J4C2wKP1wEPeMJnuTcySiWQBdAECCbeZ4Vsj

- cmRdcvL6z8fedRPtDW7oec+IPkYoyXPktVt8WsQPYkwEVN4hZVBneJPCcuhikYkp

- T3WGmPV0MxhUvCZ6hSG8D2mscZXRq3itXVlKJsUWfIHaAIgGomWrPuqC23rOYCdT

- 5oSZmTvFAoGBAPs1FbbxDDd1fx1hisfXHFasV/sycT6ggP/eUXpBYCqVdxPQvqcA

- ktplm5j04dnaQJdHZ8TPlwtL+xlWhmhFhlCFPtVpU1HzIBkp6DkSmmu0gvA/i07Z

- pzo5Z+HRZFzruTQx6NjDtvWwiXVLwmZn2oiLeM9xSqPu55OpITifEWNjAoGBANhH

- XwV6IvnbUWojs7uiSGsXuJOdB1YCJ+UF6xu8CqdbimaVakemVO02+cgbE6jzpUpo

- krbDKOle4fIbUYHPeyB0NMidpDxTAPCGmiJz7BCS1fCxkzRgC+TICjmk5zpaD2md

- HCrtzIeHNVpTE26BAjOIbo4QqOHBXk/WPen1iC3DAoGBALsD3DSj46puCMJA2ebI

- 2EoWaDGUbgZny2GxiwrvHL7XIx1XbHg7zxhUSLBorrNW7nsxJ6m3ugUo/bjxV4LN

- L59Gc27ByMvbqmvRbRcAKIJCkrB1Pirnkr2f+xx8nLEotGqNNYIawlzKnqr6SbGf

- Y2wAGWKmPyEoPLMLWLYkhfdtAoGANsFa/Tf+wuMTqZuAVXCwhOxsfnKy+MNy9jiZ

- XVwuFlDGqVIKpjkmJyhT9KVmRM/qePwgqMSgBvVOnszrxcGRmpXRBzlh6yPYiQyK

- 2U4f5dJG97j9W7U1TaaXcCCfqdZDMKnmB7hMn8NLbqK5uLBQrltMIgt1tjIOfofv

- BNx0raECgYEApAvjwDJ75otKz/mvL3rUf/SNpieODBOLHFQqJmF+4hrSOniHC5jf

- f5GS5IuYtBQ1gudBYlSs9fX6T39d2avPsZjfvvSbULXi3OlzWD8sbTtvQPuCaZGI

- Df9PUWMYZ3HRwwdsYovSOkT53fG6guy+vElUEDkrpZYczROZ6GUcx70=

- -----END RSA PRIVATE KEY-----

- """

- 

- 

- AUTH_KEY = ("AAAAB3NzaC1yc2EAAAADAQABAAABAQDUOtNJdBEXyKxBB898rdT54ULjMGuO6v4jLX"

-             "mRsdRhR5Id/lKNc9hsdioPWUePgYlqML2iSV72vKQoVhkyYkpcsjr3zvBny9+5xej3"

-             "+TBLoEMAm2hmllKPmxYJDU8jQJ7wJuRrOVOnk0iSNF+FcY/yaQ0owSF02Nphx47j2K"

-             "Wc0IjGGlt4fl0fmHJuZBA2afN/4IYIIsEWZziDewVtaEjWV3InMRLllfdqGMllhFR+"

-             "ed2hQz9PN2QcapmEvUR4UCy/mJXrke5htyFyHi8ECfyMMyYeHwbWLFQIve4CWix9qt"

-             "ksvKjcetnxT+WWrutdr3c9cfIj/c0v/Zg/c4zETxtp")

- 

+ from copy import deepcopy

+ 

+ 

+ SSH_PRIV_KEY = ("-----BEGIN RSA PRIVATE KEY-----"

+                 "MIIEpQIBAAKCAQEA1DrTSXQRF8isQQfPfK3U+eFC4zBrjur+Iy15kbHUYUeSHf5S"

+                 "jXPYbHYqD1lHj4GJajC9okle9rykKFYZMmJKXLI6987wZ8vfucXo9/kwS6BDAJto"

+                 "ZpZSj5sWCQ1PI0Ce8CbkazlTp5NIkjRfhXGP8mkNKMEhdNjaYceO49ilnNCIxhpb"

+                 "eH5dH5hybmQQNmnzf+CGCCLBFmc4g3sFbWhI1ldyJzES5ZX3ahjJZYRUfnndoUM/"

+                 "TzdkHGqZhL1EeFAsv5iV65HuYbchch4vBAn8jDMmHh8G1ixUCL3uAlosfarZLLyo"

+                 "3HrZ8U/llq7rXa93PXHyI/3NL/2YP3OMxE8baQIDAQABAoIBAQCxuOUwkKqzsQ9W"

+                 "kdTWArfj3RhnKigYEX9qM+2m7TT9lbKtvUiiPc2R3k4QdmIvsXlCXLigyzJkCsqp"

+                 "IJiPEbJV98bbuAan1Rlv92TFK36fBgC15G5D4kQXD/ce828/BSFT2C3WALamEPdn"

+                 "v8Xx+Ixjokcrxrdeoy4VTcjB0q21J4C2wKP1wEPeMJnuTcySiWQBdAECCbeZ4Vsj"

+                 "cmRdcvL6z8fedRPtDW7oec+IPkYoyXPktVt8WsQPYkwEVN4hZVBneJPCcuhikYkp"

+                 "T3WGmPV0MxhUvCZ6hSG8D2mscZXRq3itXVlKJsUWfIHaAIgGomWrPuqC23rOYCdT"

+                 "5oSZmTvFAoGBAPs1FbbxDDd1fx1hisfXHFasV/sycT6ggP/eUXpBYCqVdxPQvqcA"

+                 "ktplm5j04dnaQJdHZ8TPlwtL+xlWhmhFhlCFPtVpU1HzIBkp6DkSmmu0gvA/i07Z"

+                 "pzo5Z+HRZFzruTQx6NjDtvWwiXVLwmZn2oiLeM9xSqPu55OpITifEWNjAoGBANhH"

+                 "XwV6IvnbUWojs7uiSGsXuJOdB1YCJ+UF6xu8CqdbimaVakemVO02+cgbE6jzpUpo"

+                 "krbDKOle4fIbUYHPeyB0NMidpDxTAPCGmiJz7BCS1fCxkzRgC+TICjmk5zpaD2md"

+                 "HCrtzIeHNVpTE26BAjOIbo4QqOHBXk/WPen1iC3DAoGBALsD3DSj46puCMJA2ebI"

+                 "2EoWaDGUbgZny2GxiwrvHL7XIx1XbHg7zxhUSLBorrNW7nsxJ6m3ugUo/bjxV4LN"

+                 "L59Gc27ByMvbqmvRbRcAKIJCkrB1Pirnkr2f+xx8nLEotGqNNYIawlzKnqr6SbGf"

+                 "Y2wAGWKmPyEoPLMLWLYkhfdtAoGANsFa/Tf+wuMTqZuAVXCwhOxsfnKy+MNy9jiZ"

+                 "XVwuFlDGqVIKpjkmJyhT9KVmRM/qePwgqMSgBvVOnszrxcGRmpXRBzlh6yPYiQyK"

+                 "2U4f5dJG97j9W7U1TaaXcCCfqdZDMKnmB7hMn8NLbqK5uLBQrltMIgt1tjIOfofv"

+                 "BNx0raECgYEApAvjwDJ75otKz/mvL3rUf/SNpieODBOLHFQqJmF+4hrSOniHC5jf"

+                 "f5GS5IuYtBQ1gudBYlSs9fX6T39d2avPsZjfvvSbULXi3OlzWD8sbTtvQPuCaZGI"

+                 "Df9PUWMYZ3HRwwdsYovSOkT53fG6guy+vElUEDkrpZYczROZ6GUcx70="

+                 "-----END RSA PRIVATE KEY-----")

+ SSH_PUB_KEY = ("AAAAB3NzaC1yc2EAAAADAQABAAABAQDUOtNJdBEXyKxBB898rdT54ULjMGuO6v4jLX"

+                "mRsdRhR5Id/lKNc9hsdioPWUePgYlqML2iSV72vKQoVhkyYkpcsjr3zvBny9+5xej3"

+                "+TBLoEMAm2hmllKPmxYJDU8jQJ7wJuRrOVOnk0iSNF+FcY/yaQ0owSF02Nphx47j2K"

+                "Wc0IjGGlt4fl0fmHJuZBA2afN/4IYIIsEWZziDewVtaEjWV3InMRLllfdqGMllhFR+"

+                "ed2hQz9PN2QcapmEvUR4UCy/mJXrke5htyFyHi8ECfyMMyYeHwbWLFQIve4CWix9qt"

+                "ksvKjcetnxT+WWrutdr3c9cfIj/c0v/Zg/c4zETxtp")

  DEF_USER = "root"

  DEF_PASSWD = "foobar"

- 

  USER_DATA = """#cloud-config

  users:

    - default
@@ -67,232 +64,700 @@ 

    list: |

      {0}:{1}

    expire: False

- """.format(DEF_USER, DEF_PASSWD, AUTH_KEY)

- 

- 

- def main(argv):

-     parser = argparse.ArgumentParser(description="Inventory for a QCow2 test image")

-     parser.add_argument("--list", action="store_true", help="Verbose output")

-     parser.add_argument('--host', help="Get host variables")

-     parser.add_argument("subjects", nargs="*", default=shlex.split(os.environ.get("TEST_SUBJECTS", "")))

-     opts = parser.parse_args()

- 

-     try:

-         if opts.host:

-             data = inv_host(opts.host)

-         else:

-             data = inv_list(opts.subjects)

-         sys.stdout.write(json.dumps(data, indent=4, separators=(',', ': ')))

-     except RuntimeError as ex:

-         sys.stderr.write("{0}: {1}\n".format(os.path.basename(sys.argv[0]), str(ex)))

-         return 1

- 

+ """.format(DEF_USER, DEF_PASSWD, SSH_PUB_KEY)

+ 

+ VM_IPV4_ADDR = "127.0.0.3"

+ VM_START_TRIES = 5

+ VM_PING_TRIES = 30

+ 

+ # Private to manage_debug() and debug(), do not use.

+ _debug_fmt = 'DEBUG: {0}'

+ _debug_enabled = False

+ 

+ 

+ def manage_debug(enable=None, debug_fmt=None):

+     """

+     Change or retrieve state of debugging enablement and message format

+     """

+     global _debug_enabled, _debug_fmt

+     if enable is not None:

+         _debug_enabled = enable

+     if debug_fmt is not None:

+         _debug_fmt = debug_fmt

+     return _debug_enabled

+ 

+ 

+ def log(msg):

+     """

+     Format msg with a newline, then write to sys.stderr + flush

+     """

+     msgnl = '{0}\n'.format(msg)

+     result = sys.stderr.write(msgnl)

+     sys.stderr.flush()

+     return result

+ 

+ 

+ def debug(msg):

+     """

+     When enabled by manage_debug, format msg for log()

+     """

+     global _debug_fmt

+     if manage_debug():

+         return log(_debug_fmt.format(msg))

      return 0

  

  

- def inv_list(subjects):

-     hosts = []

-     variables = {}

-     for subject in subjects:

-         if subject.endswith((".qcow2", ".qcow2c")):

-             host_vars = inv_host(subject)

-             if host_vars:

-                 hosts.append(subject)

-                 variables[subject] = host_vars

-     return {"localhost": {"hosts": hosts, "vars": {}},

-             "subjects": {"hosts": hosts, "vars": {}},

-             "_meta": {"hostvars": variables}}

+ class InvCache(object):

+     """

+     Represents a single-source, on-disk cache of Ansible inventory details

+ 

+     :param cachefile_basedir: Existing directory path where persistent cache

+                               file should live.  If None, ``tempfile.gettempdir()``

+                               is used.

+     """

+ 

+     # When non-none, represents the "empty" default contents of newly created cache

+     DEFAULT_CACHE = None

+ 

+     # When non-none, contains the base-directory for the persistent cache file

+     basedir = None

+ 

+     # Private, do not use

+     _singleton = None

+     _invcache = None

+ 

+     def __new__(cls, cachefile_basedir=None):

+         if not getattr(cls, '_singleton', None):

+             debug("Initializing Inventory Cache")

+             cls.reset()

+             cls.DEFAULT_CACHE = dict(localhost=dict(hosts=[], vars={}),

+                                      subjects=dict(hosts=[], vars={}),

+                                      _meta=dict(hostvars={}))

+             cls._singleton = super(InvCache, cls).__new__(cls)

+         return cls._singleton  # ``__init__()`` runs next

+ 

+     def __init__(self, cachefile_basedir=None):

+         # Don't touch it if already set by class

+         if cachefile_basedir and not self.basedir:

+             self.basedir = cachefile_basedir

+         elif not self.basedir:

+             self.basedir = tempfile.gettempdir()

+         try:

+             debug("Using inventory cache file: {0}".format(self.filepath))

+         except AttributeError:

+             pass  # don't fail when unittesting

+                   # '_io.StringIO' object has no attribute 'name'

+ 

+     def __str__(self):

+         return "{0}\n".format(json.dumps(self(), indent=4, separators=(',', ': ')))

+ 

+     def __call__(self, new_obj=None):

+         """

+         Replace and/or return current cached JSON object

+ 

+         :param new_obj: When not None, replaces current cache.

+         :returns: Current cache object or None

+         """

+         if new_obj:

+             debug("Replacing persistent inventory cache file contents")

+             self.cachefile.seek(0)

+             self.cachefile.truncate()

+             json.dump(new_obj, self.cachefile)

+             return self()

+         else:

+             self.cachefile.seek(0)

+             try:

+                 debug("Loading inventory cache from persistent file")

+                 loaded_cache = json.load(self.cachefile)

+             except ValueError as xcpt:

+                 debug("Persistent cache file is empty/invalid, initializing with defaults")

+                 try:

+                     self.cachefile.truncate()

+                     loaded_cache = deepcopy(self.DEFAULT_CACHE)

+                     json.dump(loaded_cache, self.cachefile)

+                 except RecursionError:

+                     raise RuntimeError("Error loading or parsing default 'empty' cache"

+                                        " after writing to disk: {0}".format(str(self.DEFAULT_CACHE)))

+             return loaded_cache

+ 

+     @property

+     def cachefile(self):

+         """Represents the active file backing the cache"""

+         if self._invcache:

+             return self._invcache

+         # Truncate if new, open for r/w otherwise

+         self._invcache = open(self.filepath, 'a+')

+         return self._invcache

+ 

+     @property

+     def filepath(self):

+         """Represents complete path to on-disk cache file"""

+         if self._invcache:  # Might not be open yet

+             return self._invcache.name

+         return os.path.join(self.basedir,

+                             self.filename)

+ 

+     @property

+     def filename(self):

+         """Represents the filename component of the on-disk cache file"""

+         cls = self.__class__  # Can't be classmethod && property

+         return "standard-inventory.cache".format(os.path.basename(sys.argv[0]))

+ 

+     @classmethod

+     def reset(cls):

+         """

+         Wipe-out current cache state, except on-disk file

+         """

+         if cls._singleton and cls._singleton._invcache:

+             try:

+                 cls._singleton._invcache = None

+             except IOError:

+                 pass

+         cls._singleton = None

+ 

+     @contextmanager

+     def locked(self, mode=fcntl.LOCK_EX):

+         """

+         Context manager protecting returned cache with mode

+ 

+         :param mode: A value accepted by ``fcntl.flock()``'s ``op`` parameter.

+         :returns: Standard Ansible inventory dictionary

+         """

+         debug("Acquiring Lock for persistent inventory cache file")

+         try:

+             fcntl.flock(self.cachefile, mode)  # __enter__

+             yield self()

+         finally:

+             debug("Lock released")

+             fcntl.flock(self.cachefile, fcntl.LOCK_UN) # __exit__

+ 

+     def gethost(self, hostname):

+         """

+         Look up details about a host from inventory cache.

+ 

+         :param hostname: Name of host to retrieve.

+         :returns: Tuple containing a dictionary of host variables,

+                   and a list of groups.  None if host not

+                   found.

+         """

+         debug("Retrieving cached details about {0}.".format(hostname))

+         with self.locked(fcntl.LOCK_SH) as inventory:

+             groups = []

+             for group, hostvars in inventory.items():

+                 hosts = hostvars.get("hosts", [])

+                 if hostname in hosts:

+                     groups.append(group)

+             meta = inventory.get("_meta", dict(hostvars=dict()))

+             hostvars = meta["hostvars"].get(hostname, None)

+         if hostvars is not None:

+             debug("Host found")

+             return (hostvars, groups)

+         else:

+             debug("Host not found")

+             return None

+ 

+     def addhost(self, hostname, hostvars=None, groups=('localhost', 'subjects')):

+         """

+         Update cache, adding hostname with hostvars to specified groups.

+ 

+         :param hostname: An Ansible inventory-hostname (may not be actual hostname).

+         :param hostvars: A dictionary of host variables to add, or None

+         :param groups: A list of groups for the host to join.

+         :returns: Tuple containing a dictionary of host variables, and a list of groups.

+         """

+         debug("Adding host {0} to cache".format(hostname))

+         if not groups:

+             groups = ('all',)

+         if not hostvars:

+             hostvars = {}

+         with self.locked(fcntl.LOCK_EX) as inventory:

+             meta = inventory.get("_meta", dict(hostvars=dict()))

+             meta["hostvars"][hostname] = hostvars

+             for group in groups:

+                 inv_group = inventory.get(group, dict(hosts=[], vars=dict()))

+                 inv_group["hosts"].append(hostname)

+             # Update and write cache to disk

+             self(inventory)

+         return self.gethost(hostname)

+ 

+     def delhost(self, hostname, keep_empty=False):

+         """

+         Remove hostname from inventory, return tuple of host vars. dict and group list

+ 

+         :param hostname: Ansible hostname to remove from inventory.

+         :param keep_empty: If False, remove cache file & reset if no hosts remain in cache.

+         :returns: Tuple containing a former dictionary of host variables, and a list of

+                   groups or None

+         """

+         debug("Deleting host {0} from cache".format(hostname))

+         with self.locked(fcntl.LOCK_EX) as inventory:

+             hostvars_groups = self.gethost(hostname)

+             if hostvars_groups:

+                 hostvars, groups = hostvars_groups

+                 meta = inventory.get("_meta", dict(hostvars=dict()))

+                 if hostname in meta["hostvars"]:

+                     del meta["hostvars"][hostname]

+                 for group in groups:

+                     inv_group = inventory.get(group, dict(hosts=[], vars=dict()))

+                     if hostname in inv_group["hosts"]:

+                         inv_group["hosts"].remove(hostname)

+                 # Write out to disk

+                 self(inventory)

+             conditions = [not keep_empty,

+                           not hostvars_groups or not meta["hostvars"]]

+             if all(conditions):  # No more hosts

+                 debug("Inventory cache is empty, removing file.")

+                 self.reset()

+                 try:

+                     os.unlink(self.filepath)

+                 except FileNotFoundError:

+                     pass

+             else:

+                 return None

+         return hostvars_groups

+ 

+     @staticmethod

+     def make_hostvars(priv_key_file, port, host=VM_IPV4_ADDR,

+                       user=DEF_USER, passwd=DEF_PASSWD):

+         """Return dictionary of standard/boiler-plate  hostvars"""

+ 

+         return dict(ansible_ssh_private_key_file=str(priv_key_file),

+                     ansible_port=str(port),

+                     ansible_host=str(host),

+                     ansible_user=str(user),

+                     ansible_ssh_pass=str(passwd),

+                     ansible_ssh_common_args="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no")

+ 

+     def str_hostvars(self, hostname):

+         hostvars, groups = self.gethost(hostname)

+         if hostvars == groups == None:

+             raise ValueError("Host '{0}' not found in cache file"

+                              " '{1}'".format(hostname, self.cachefile.name))

+         del groups  # not used

+         return "{0}\n".format(json.dumps(hostvars, indent=4, separators=(',', ': ')))

+ 

+ 

+ def try_replace_stderr_devtty():

+     """

+     Duplicate the controling terminal as if it were stderr to make debugging easier

+     """

+     try:

+         tty = os.open("/dev/tty", os.O_WRONLY)

+         os.dup2(tty, 2)

+     except OSError:

+         pass

  

  

- def start_qemu(image, cloudinit, log, portrange=(2222, 5555)):

+ def main(argv=None, environ=None):

+     if argv is None:  # Makes unittesting easier

+         argv = sys.argv

+     if environ is None:

+         environ = os.environ  # Side-effects: this isn't a dumb-dictionary

+     test_debug_env = environ.get("TEST_DEBUG", "0")[0].lower() in ['t','y','1']

+ 

+     parser = argparse.ArgumentParser(description="Inventory for a QCow2 test image",

+                                      formatter_class=argparse.ArgumentDefaultsHelpFormatter)

+ 

+     # Ansible inventory 'script' plugin's interface.

+     # ref: http://docs.ansible.com/ansible/devel/plugins/inventory/script.html

+     parser.add_argument("--list", action="store_true", default=True,

+                        help="Get variables for entire inventory")

+ 

+     # This is not really needed since --list provides '_meta'.

+     # It's implemented (below) and functional, for nostalga sake.

+     parser.add_argument('--host', default=None, help="Get variables for a single host")

+ 

+     # Auxiliary arguments (not used by Ansible)

+     parser.add_argument('--debug', action="store_true", default=test_debug_env,

+                         help="Disable qemu process cleanup & inventory cache removal."

+                              " As an alternative, set $TEST_DEBUG == true, yes, or 1")

+     parser.add_argument("subjects", nargs="*", default=[],

+                         help=("With --list, an optional list of file"

+                               " path(s) to qcow2 image(s) - in addition to any"

+                               " specified in $TEST_SUBJECTS.  If --host, "

+                               " a single path to a qcow2 image is required."))

+ 

+     opts = parser.parse_args(args=argv[1:])

+     if opts.debug:

+         manage_debug(True)

+         debug('# Debugging enabled\n')

+     try_replace_stderr_devtty()

+ 

+     # Load / Create cache

+     artifacts = artifacts_dirpath(environ)

+     invcache = InvCache(artifacts)

+     if opts.host:

+         opts.list = False

+         hostname = os.path.basename(opts.host)

+         debug("Operating in --host mode for {0}".format(hostname))

+ 

+         if not invcache.gethost(hostname):  # Enforce unique hostnames

+             try_create_host(opts, opts.host, environ)

+         try:

+             sys.stdout.write(invcache.str_hostvars(hostname))

+         except TypeError:  # try_create_host() failed

+             sys.stdout.write('{}\n')

+ 

+     else:  # opts.list (subjects is optional)

+         opts.host = False

+         # De-duplicate paths with $TEST_SUBJECTS

+         subjects = set(s for s in shlex.split(environ.get("TEST_SUBJECTS", "")) if s.strip())

+         subjects |= set(s for s in opts.subjects if s.strip())

+         subjects -= set([None, ''])  # Just in case

+ 

+         # Sets are un-ordered, coorespondence is needed between paths and hostnames

+         subjects = tuple(subjects)

+         # Catch null/empty paths/subjects while forming dict

+         subject_hostname = dict(zip(subjects,

+                                     tuple(os.path.basename(subject.strip())

+                                           for subject in subjects)))

+         if len(subject_hostname) != len(subjects):

+             # Fatal, do not allow clashes in cache over hostnames, differing paths OK

+             log("Error: One or more subjects from parameters"

+                 " or $TEST_SUBJECTS found with clashing"

+                 " hostnames (basenames):"

+                 " {0}\n".format(subjects))

+             parser.print_help()  # side-effect: exits non-zero

+             sys.exit(1)  # Required for unitesting

+ 

+         debug("Operating in --list mode for subjects: {0}".format(subject_hostname.keys()))

+         for subject, hostname in subject_hostname.items():

+             if not invcache.gethost(hostname):  # doesn't already exist

+                 try_create_host(opts, subject, environ)

+ 

+         sys.stdout.write(str(invcache))

+ 

+ 

+ def start_qemu(image_path, cloudinit, console_log, portrange=(2222, 5555)):

+     """

+     Start QEMU, return tuple of subprocess instance, ssh and monitor port numbers.

+     """

+     ioxcept = None

      for port in range(*portrange):

-         sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

-         sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

+         ssh_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

+         ssh_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

+         mon_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

+         mon_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

+         cmd = ["/usr/bin/qemu-system-x86_64", "-m", "1024", image_path,

+                "-cpu", "host", "-smp", "{}".format(multiprocessing.cpu_count()),

+                "-enable-kvm", "-snapshot", "-cdrom", cloudinit,

+                "-net", "nic,model=virtio", "-net",

+                "user,hostfwd=tcp:{0}:{1}-:22".format(VM_IPV4_ADDR, port),

+                "-device", "isa-serial,chardev=pts2", "-chardev",

+                "file,id=pts2,path=" + console_log,

+                # Monitor port allows rebooting VM w/o causing qemu-kvm process to exit

+                "-chardev",

+                "socket,host={0},port={1},id=mon0,server,nowait"

+                "".format(VM_IPV4_ADDR, port + 1),

+                "-mon", "chardev=mon0,mode=readline",

+                "-display", "none"]

          try:

-             sock.bind(("127.0.0.3", port))

-         except IOError:

+             ssh_sock.bind((VM_IPV4_ADDR, port))

+             mon_sock.bind((VM_IPV4_ADDR, port + 1))

+             # If launching qemu fails, having this means we get to see why.

+             with open(console_log, 'a+') as console_logf:

+                 console_logf.write("Qemu command-line: {0}\n".format(' '.join(cmd)))

+                 console_logf.flush()

+                 return subprocess.Popen(cmd, stdout=console_logf,

+                                         stderr=subprocess.STDOUT), port, port + 1

+         except IOError as ioxcept:

              pass

-         else:

-             break

-         finally:

-             sock.close()

      else:

-         raise RuntimeError("unable to find free local port to map SSH to")

+         msgfmt = "Error launching qemu process: {0}: {1}"

+         if ioxcept:

+             raise RuntimeError(msgfmt.format(ioxcept.__class__.__name__, ioxcept))

+         else:

+             raise RuntimeError(msgfmt.format("Could not allocate a ssh & monitor port from range",

+                                              portrange))

  

-     # Use -cpu host and -smp by default.

-     # virtio-rng-pci: https://wiki.qemu.org/Features/VirtIORNG

-     return subprocess.Popen(["/usr/bin/qemu-system-x86_64",

-                              "-cpu", "host", "-smp", "{}".format(multiprocessing.cpu_count()),

-                              "-m", "1024", image, "-enable-kvm", "-snapshot", "-cdrom", cloudinit,

-                              "-net", "nic,model=virtio", "-net", "user,hostfwd=tcp:127.0.0.3:{0}-:22".format(port),

-                              "-device", "virtio-rng-pci", "-rtc", "base=utc",

-                              "-device", "isa-serial,chardev=pts2", "-chardev", "file,id=pts2,path=" + log,

-                              "-display", "none"], stdout=open(log, 'a'), stderr=subprocess.STDOUT), port

  

+ class NoQcow2Error(ValueError):

+     """A non-fatal exception if --list, fatal if --host"""

+     pass

  

- def inv_host(image):

-     null = open(os.devnull, 'w')

  

+ def artifacts_dirpath(environ=None):

+     """Return complete path to directory where testing artifacts should be stored"""

+     if environ is None:

+         environ = os.environ  # Side-effects: this isn't a dumb-dictionary

      try:

-         tty = os.open("/dev/tty", os.O_WRONLY)

-         os.dup2(tty, 2)

-     except OSError:

-         tty = None

- 

-     # A directory for temporary stuff

-     directory = tempfile.mkdtemp(prefix="inventory-cloud")

-     identity = os.path.join(directory, "identity")

-     with open(identity, 'w') as f:

-         f.write(IDENTITY)

-     os.chmod(identity, 0o600)

-     metadata = os.path.join(directory, "meta-data")

+         artifacts = environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))

+         os.makedirs(artifacts)

+     except OSError as exc:

+         if exc.errno != errno.EEXIST or not os.path.isdir(artifacts):

+             raise

+     # Behave like a singleton: Update environ w/ proper path

+     environ["TEST_ARTIFACTS"] = artifacts

+     return artifacts

+ 

+ 

+ def try_create_host(opts, image_path, environ):

+     try:

+         create_host(opts, image_path, environ)

+     except NoQcow2Error as xcept:  # Not fatal by itself

+         log("Warning: {0}".format(xcept))

+         pass  # Presume another inventory script will handle it

+ 

+ 

+ def create_host(opts, image_path, environ):

+     image_path = image_path.strip()

+     # directory-separators in hostnames could be bad

+     hostname = os.path.basename(image_path)

+     log("Creating host {0}".format(hostname))

+     if not image_path.endswith(".qcow2") and not image_path.endswith(".qcow2c"):

+         # Ignored in main(), printed if --debug used, required for unittesting

+         raise NoQcow2Error("Subject '{0}' / '{1}',"

+                            " image does not end in '.qcow2' or '.qcow2c'."

+                            "".format(hostname, image_path))

+ 

+     # temporary until subject destroyed, allow secondary debugging w/o artifacts-clutter

+     temp_dir = tempfile.mkdtemp(prefix=".{0}_".format(hostname),

+                                 suffix='.temp', dir=artifacts_dirpath())

+     ssh_priv_key_file = os.path.join(temp_dir, "ssh_priv_key")

+     debug("Using ssh key {0}".format(ssh_priv_key_file))

+     with open(ssh_priv_key_file, 'w') as f:

+         f.write(SSH_PRIV_KEY)

+         os.chmod(ssh_priv_key_file, 0o600)

+     metadata = os.path.join(temp_dir, "meta-data")

      with open(metadata, 'w') as f:

          f.write("")

-     userdata = os.path.join(directory, "user-data")

+     userdata = os.path.join(temp_dir, "user-data")

      with open(userdata, 'w') as f:

          f.write(USER_DATA)

  

      # Create our cloud init so we can log in

-     cloudinit = os.path.join(directory, "cloud-init.iso")

+     null = open(os.devnull, 'w')

+     cloudinit = os.path.join(temp_dir, "cloud-init.iso")

      subprocess.check_call(["/usr/bin/genisoimage", "-input-charset", "utf-8",

                             "-volid", "cidata", "-joliet", "-rock", "-quiet",

                             "-output", cloudinit, userdata, metadata], stdout=null)

- 

-     # Determine if virtual machine should be kept available for diagnosis after completion

-     try:

-         diagnose = distutils.util.strtobool(os.getenv("TEST_DEBUG", "0"))

-     except ValueError:

-         diagnose = 0

- 

-     sys.stderr.write("Launching virtual machine for {0}\n".format(image))

- 

+     debug("Using cloud-init {0}".format(cloudinit))

      # And launch the actual VM

-     artifacts = os.environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))

-     try:

-         os.makedirs(artifacts)

-     except OSError as exc:

-         if exc.errno != errno.EEXIST or not os.path.isdir(artifacts):

-             raise

-     log = os.path.join(artifacts, "{0}.log".format(os.path.basename(image)))

- 

+     console_log = os.path.join(artifacts_dirpath(environ), "{0}.log".format(hostname))

+     debug("Using qemu log {0}".format(log))

      proc = None  # for failure detection

      cpe = None  # for exception scoping

-     for _ in range(0, 5):

+     for tries in range(0, VM_START_TRIES):

+         log("    Launching {0}, attempt #{1}/{2}"

+             "".format(hostname, tries+1, VM_START_TRIES))

          try:

-             proc, port = start_qemu(image, cloudinit, log)

+             proc, ssh_port, mon_port = start_qemu(image_path, cloudinit, console_log)

              break

          except subprocess.CalledProcessError as cpe:

              time.sleep(1)

              continue

-     if proc is None:

-         raise RuntimeError("Could not launch VM for qcow2 image"

-                            " '{0}':{1}".format(image, cpe.output))

- 

-     for _ in range(0, 30):

+     if proc.poll():

+         raise RuntimeError("Could not launch VM, exited:{0}: {1}"

+                            "".format(proc.returncode, cpe.output))

+ 

+     # Ansible ping signals VM up, needs temporary inventory

+     hostvars = InvCache.make_hostvars(ssh_priv_key_file, int(ssh_port))

+     hostvars['qemu_monitor_port'] = mon_port

+     inv_vars_sfx = " ".join(tuple("{0}='{1}'".format(k,v) for k,v in hostvars.items()))

+     inventory_filepath = os.path.join(temp_dir, "inventory.ini")

+     with open(inventory_filepath, "w") as inventory_file:

+         # Exclude dictionary prefix/suffix in JSON

+         inventory_file.write("{0} {1}\n".format(hostname, inv_vars_sfx))

+ 

+     ping = [

+         "/usr/bin/ansible",

+         "--inventory",

+         inventory_filepath,

+         hostname,

+         "--module-name",

+         "raw",

+         "--args",

+         "/bin/true"

+     ]

+ 

+     for tries in range(0, VM_PING_TRIES):

+         log("    Contacting {0}, attempt #{1}/{2}"

+               "".format(hostname, tries+1, VM_PING_TRIES))

          try:

-             # The variables

-             variables = {

-                 "ansible_port": "{0}".format(port),

-                 "ansible_host": "127.0.0.3",

-                 "ansible_user": "root",

-                 "ansible_ssh_pass": "foobar",

-                 "ansible_ssh_private_key_file": identity,

-                 "ansible_ssh_common_args": "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"

-             }

- 

-             # Write out a handy inventory file, for our use and for debugging

-             args = " ".join(["{0}='{1}'".format(*item) for item in variables.items()])

-             inventory = os.path.join(directory, "inventory")

-             with open(inventory, "w") as f:

-                 f.write("[subjects]\nlocalhost {0}\n".format(args))

- 

-             # Wait for ssh to come up

-             ping = [

-                 "/usr/bin/ansible",

-                 "--inventory",

-                 inventory,

-                 "localhost",

-                 "--module-name",

-                 "raw",

-                 "--args",

-                 "/bin/true"

-             ]

- 

-             (pid, _) = os.waitpid(proc.pid, os.WNOHANG)

-             if pid != 0:

-                 raise RuntimeError("qemu failed to launch qcow2 image: {0}".format(image))

-             subprocess.check_call(ping, stdout=null, stderr=null)

+             if proc.poll():

+                 raise RuntimeError("Error launching '{0}' for '{1}',"

+                                    " qemu exited {2}.\n".format(hostname, image_path,

+                                                                 proc.returncode))

+             subprocess.check_call(ping, stdout=null, stderr=null, close_fds=True)

              break

          except subprocess.CalledProcessError:

              time.sleep(3)

+     else:  # tries >= 30

+         raise RuntimeError("Error launching VM '{0}' for '{1}',"

+                            " excessive Ansible ping attempts.\n".format(hostname, image_path))

+ 

+     invcache = InvCache()

+     invcache.addhost(hostname, hostvars)

+ 

+     debug("Access host with:")

+     debug("    ssh -p {0} -o StrictHostKeyChecking=no"

+           " -o UserKnownHostsFile=/dev/null {2}@{1}"

+           "".format(ssh_port, VM_IPV4_ADDR, DEF_USER))

+     debug("    {0}'s password: {1}".format(DEF_USER, DEF_PASSWD))

+     debug("Access host's monitor with:")

+     debug("    telnet {0} {1}".format(VM_IPV4_ADDR, mon_port))

+     debug("")

+     # subject_watcher() will not kill QEMU when --debug was specified

+     debug("kill {0} # when finished\n".format(proc.pid))

+     # Even if debugging, the inventory cache file needs grooming on qemu exit

+     subject_watcher(proc.pid, hostname, temp_dir, environ, opts.debug)

+ 

+ 

+ def monitor(pid_or_filepath, qemu_pid):

+     """Helper for subject_watcher() to monitor processes"""

+ 

+     if isinstance(pid_or_filepath, int):

+         expected_errno = (errno.ESRCH, )

+         lockonfile = None

+         parent_pid = pid_or_filepath

+         mon_what = "qemu parent-pid {2}"

      else:

-         # Kill the qemu process

+         expected_errno = (errno.EACCES, errno.ENOENT,  # reg. file

+                           errno.EEXIST,                # pipe/fifo

+                           errno.ECONNRESET,            # socket

+                           errno.ECONNABORTED,          # socket

+                           errno.ECONNREFUSED)          # socket

+         lockonfile = pid_or_filepath

+         parent_pid = None

+         mon_what = "$LOCK_ON_FILE {2}"

+ 

+     mon_msg = 'QEMU PID {1} and ' + mon_what + ' at {0}'

+     monitor_signaled = False

+     qemu_died = False

+     # Monitor pid_or_filepath, and qemu-process

+     while all([not monitor_signaled, not qemu_died]):

+         log(mon_msg.format(int(time.time()), qemu_pid, pid_or_filepath))

          try:

-             os.kill(proc.pid, signal.SIGTERM)

-         except OSError:

-             pass

-         raise RuntimeError("could not access launched qcow2 image: {0}".format(image))

- 

-     # Process of our parent

-     ppid = os.getppid()

- 

-     child = os.fork()

-     if child:

-         return variables

+             monitor_signaled = True  # If exception thrown

+             if parent_pid:

+                 os.kill(parent_pid, 0)  # Exception if process doesn't exist

+             else:  # lockonfile

+                 open(lockonfile)        # Exception if read not allowed

+             monitor_signaled = False    # No exception thrown

+ 

+             # Monitoring unnecessary if qemu process doesn't exist

+             qemu_died = True   # If exception thrown

+             os.kill(qemu_pid, 0)

+             qemu_died = False  # No exception thrown

+         except (IOError, OSError) as xcpt:

+             if xcpt.errno not in expected_errno:

+                 debug("Unable to handle {0}: {1}".format(xcpt.__class__.__name__, xcpt))

+                 raise

+             continue  # exit loop immediatly

+         time.sleep(10)  # Don't busy-wait

+ 

+     return monitor_signaled, qemu_died

+ 

+ 

+ def subject_watcher(qemu_pid, hostname, temp_dir, environ, debugging=True):

+     """

+     Monitor process that called this script, kill off VMs and cleanup on exit.

+     """

+     # Parent of our pid is Ansible process

+     parent_pid = os.getppid()

+ 

+     if 1 in (parent_pid, qemu_pid):

+         raise RuntimeError("Cowardly refusing to monitor pid 1")

+ 

+     # Needed at end of watcher process

+     artifacts = artifacts_dirpath(environ)

+     lockonfile = environ.get('LOCK_ON_FILE', None)

+ 

+     info_msg_pfx = ("Watching VM {0} qemu pid {1} from watcher"

+                     "".format(hostname, qemu_pid))

+     if lockonfile:

+         info_msg_fmt = ("{0} pid {1} for $LOCK_ON_FILE {2}"

+                         "".format(info_msg_pfx, "{0}", lockonfile))

+         mon_args = (lockonfile, qemu_pid)

+         lockonfile = os.path.realpath(lockonfile)

+         try:

+             open(lockonfile)

+         except IOError as xcpt:

+             log("WARNING: Can't open $LOCK_ON_FILE {0}: {1}: {2}.\n"

+                    "".format(lockonfile, xcpt.__class__.__name__, xcpt))

+             if debugging:

+                 log("WARNING: Qemu process will be killed shortly after creation.\n")

+     else:

+         info_msg_fmt = ("{0} pid {1} for parent pid {2}"

+                         "".format(info_msg_pfx, "{0}", parent_pid))

+         mon_args = (parent_pid, qemu_pid)

+ 

+     watcher_pid = os.fork()

+     if watcher_pid:  # parent

+         log(info_msg_fmt.format(watcher_pid))

+         return

+     else:

+         watcher_pid = os.getpid()

  

-     # Daemonize and watch the processes

+     # Harden child process

      os.chdir("/")

      os.setsid()

      os.umask(0)

- 

-     if tty is None:

-         tty = null.fileno()

- 

-     # Duplicate standard input to standard output and standard error.

-     os.dup2(null.fileno(), 0)

-     os.dup2(tty, 1)

-     os.dup2(tty, 2)

- 

-     # alternatively, lock on a file

-     lock_file = os.environ.get("LOCK_ON_FILE", None)

-     while True:

-         time.sleep(3)

- 

-         if lock_file:

-             if not os.path.exists(lock_file):

-                 sys.stderr.write("Lock file is gone.")

-                 break

-         else:

-             # Now wait for the parent process to go away, then kill the VM

+     InvCache.reset()  # don't hold onto cachefile object

+     for _file in (sys.stdin, sys.stdout, sys.stderr):

+         if _file:

              try:

-                 os.kill(ppid, 0)

-                 os.kill(proc.pid, 0)

-             except OSError:

-                 sys.stderr.write("Either parent process or VM process is gone.")

-                 break  # Either of the processes no longer exist

- 

-     if diagnose:

-         sys.stderr.write("\n")

-         sys.stderr.write("DIAGNOSE: ssh -p {0} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "

-                          "root@{1} # password: {2}\n".format(port, "127.0.0.3", "foobar"))

-         sys.stderr.write("DIAGNOSE: export ANSIBLE_INVENTORY={0}\n".format(inventory))

-         sys.stderr.write("DIAGNOSE: kill {0} # when finished\n".format(os.getpid()))

- 

-         def _signal_handler(*args):

-             sys.stderr.write("\nDIAGNOSE ending...\n")

- 

-         signal.signal(signal.SIGTERM, _signal_handler)

-         signal.pause()

- 

-     # Kill the qemu process

-     try:

-         os.kill(proc.pid, signal.SIGTERM)

-     except OSError:

-         pass

- 

-     shutil.rmtree(directory)

+                 _file.close()

+             except IOError:

+                 pass

+     os.closerange(0, 255)  # Be certain nothing is open, ignore any errors

+ 

+     # Make multi-process debugging easier

+     logfn = '{0}_watcher.log'.format(os.path.basename(sys.argv[0]).split('.')[0])

+     sys.stderr = open(os.path.join(artifacts_dirpath(environ), logfn), 'a')

+     os.dup2(sys.stderr.fileno(), 2)

+     os.dup2(sys.stderr.fileno(), 1)

+     manage_debug(debugging, "WATCHER {0}: {1}".format(watcher_pid, '{0}'))

+     log(info_msg_fmt.format(watcher_pid))

+     invcache = InvCache(artifacts)  # re-opens cachefile

+ 

+     # Wait for monitor to signal exit time (blocking)

+     monitor_signaled, qemu_died = monitor(*mon_args)

+ 

+     oops_msg = ("Monitor function exited unexpectedly "

+                 "monitor_signaled={0}  and  qemu_died={1}"

+                 "".format(monitor_signaled, qemu_died))

+ 

+     if debugging:

+         msg_sfx = "NOT removing {0}".format(temp_dir)

+         if monitor_signaled:

+             debug("NOT killing qemu and {0}".format(msg_sfx))

+         elif qemu_died:

+             debug("Qemu process died, {0}"

+                    "".format(msg_sfx))

+         else:

+             debug(oops_msg)

+             debug(msg_sfx)

+         # Don't remove cache if empty

+         invcache.delhost(hostname, keep_empty=True)

+         debug("Removed host {0} from inventory cache.".format(hostname))

+     else:  # debugging==false

+         msg_sfx = "removing {0}".format(temp_dir)

+         try:

+             if monitor_signaled:

+                 debug("Killing qemu process and {0}".format(msg_sfx))

+                 os.kill(qemu_pid, signal.SIGTERM)  # Ensure it's actually dead

+             elif qemu_died:

+                 debug("Qemu process died, {0}".format(msg_sfx))

+             else:

+                 debug(oops_msg)

+                 debug(msg_sfx)

+ 

+         except OSError as xcpt:

+             # Don't care if qemu_pid doesn't exist, that was the goal.

+             if xcpt.errno != errno.ESRCH:

+                 debug("Qemu process dead, but unable to handle {0}: {1}"

+                        "".format(xcpt.__class__.__name__, xcpt))

+                 raise

+         finally:

+             shutil.rmtree(temp_dir, ignore_errors=True)

+             invcache.delhost(hostname, keep_empty=False)

+     log("Watcher {0} exiting".format(watcher_pid))

+     if debugging:

+         log("Remember to manually kill {0} if necessary".format(qemu_pid))

+     log("FIXME: ignore 'IOError: [Errno 9] Bad file descriptor' coming next...")

      sys.exit(0)

  

  

  if __name__ == '__main__':

-     sys.exit(main(sys.argv))

+     main(sys.argv)

@@ -0,0 +1,671 @@ 

+ #!/usr/bin/env python3

+ 

+ import sys

+ import os

+ import tempfile

+ import fcntl

+ import json

+ import shutil

+ import subprocess

+ from errno import ESRCH

+ from io import StringIO, SEEK_SET, FileIO

+ from contextlib import contextmanager, redirect_stdout, redirect_stderr

+ import unittest

+ from unittest.mock import MagicMock, patch, mock_open, call, create_autospec, ANY

+ from glob import glob

+ import importlib.machinery

+ from pdb import Pdb

+ 

+ 

+ # Assumes directory structure as-is from repo. clone

+ TEST_FILENAME = os.path.basename(sys.argv[0])

+ TESTS_DIR = os.path.dirname(os.path.realpath(sys.argv[0]))

+ TESTS_DIR_PARENT = os.path.realpath(os.path.join(TESTS_DIR, '../'))

+ 

+ 

+ class TestCaseBase(unittest.TestCase):

+     """Exercize code from file based on TEST_FILENAME in TESTS_DIR_PARENT + SUBJECT_REL_PATH"""

+ 

+     # Mock'd call for opening files, used for patching

+     MockOpen = mock_open()

+ 

+     # When non-None, the file-like object returned by mock_open()

+     cachefile = None

+ 

+     # When non-None, a stand-in for fcntl module

+     mock_fcntl = None

+ 

+     # repo. relative path containing test subject python file

+     SUBJECT_REL_PATH = 'inventory'

+ 

+     # The name of the loaded code, as if it were a real module

+     SUBJECT_NAME = TEST_FILENAME[len('test_'):].split('.',1)[0]

+ 

+     # The complete path containing SUBJECT_NAME

+     SUBJECT_DIR = os.path.realpath(os.path.join(TESTS_DIR_PARENT, SUBJECT_REL_PATH))

+ 

+     # When non-none, reference to loaded subject as if it were a module

+     SUBJECT = None

+ 

+     # When non-none, complete path to unittest temporary directory

+     TEMPDIRPATH = None

+ 

+     # Fake proccess ID of parent process

+     PPID = 43

+ 

+     # When non-None, contains a tuple of patcher instances

+     patchers = None

+ 

+     # The complete path to the SUBJECT_NAME

+     for SUBJECT_PATH in glob(os.path.join(SUBJECT_DIR, '{}*'.format(SUBJECT_NAME))):

+         if os.path.isfile(SUBJECT_PATH):

+             # The subject may not conform to package.module standards

+             loader = importlib.machinery.SourceFileLoader(SUBJECT_NAME, SUBJECT_PATH)

+             # After python 3.6: Need loader for files w/o any extension

+             # so loader.exec_module() can be used.

+             SUBJECT = sys.modules[SUBJECT_NAME] = loader.load_module(SUBJECT_NAME)

+             break

+     else:

+         raise RuntimeError("Could not locate test subject: {} in {}".format(SUBJECT_NAME, SUBJECT_DIR))

+ 

+     def trace(self, statement=None):

+         """Enter the pdb debugger, 'n' will step back to self.trace() caller"""

+         return self._pdb.set_trace()

+ 

+     def subtests(self, items):

+         for item in items:

+             ctxmgr = self.subTest(item=item)

+             with ctxmgr:

+                 yield item

+ 

+     def reset(self):

+         self.SUBJECT.InvCache.reset()

+         self.MockOpen.reset_mock()

+         self.MockOpen.return_value = self.cachefile = StringIO()

+         self.mock_fcntl.reset_mock()

+         for attr in dir(fcntl):

+             mockattr = MagicMock(spec=getattr(fcntl, attr), spec_set=True)

+             # represent int constants properly

+             if attr.capitalize() == attr:

+                 mockattr.__int__ = getattr(fcntl, attr)

+             self.mock_fcntl.attach_mock(mockattr, attr)

+ 

+ 

+     def setUp(self):

+         super(TestCaseBase, self).setUp()

+         self._pdb = Pdb()

+         self.TEMPDIRPATH = tempfile.mkdtemp(prefix=os.path.basename(__file__))

+         self.mock_fcntl = create_autospec(spec=fcntl, spec_set=True, instance=True)

+         self.mock_makedirs = create_autospec(spec=os.makedirs, spec_set=True,

+                                                return_value=self.TEMPDIRPATH)

+         # TODO: All of ``os`` and ``sys`` should probably just be patched up inside a loop

+         if self.patchers is None:

+             self.patchers = []

+         self.patchers += [patch('{}.try_replace_stderr_devtty'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=None)),

+                           patch('{}.tempfile.gettempdir'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=self.TEMPDIRPATH)),

+                           patch('{}.tempfile.mkdtemp'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=self.TEMPDIRPATH)),

+                           patch('{}.os.makedirs'.format(TestCaseBase.SUBJECT_NAME),

+                                 self.mock_makedirs),

+                           patch('{}.os.getcwd'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=self.TEMPDIRPATH)),

+                           patch('{}.fcntl'.format(TestCaseBase.SUBJECT_NAME),

+                                 self.mock_fcntl),

+                           patch('{}.open'.format(format(TestCaseBase.SUBJECT_NAME)),

+                                 self.MockOpen, create=True),

+                           patch('{}.InvCache.filename'.format(TestCaseBase.SUBJECT_NAME),

+                                 'bar'),

+                           patch('{}.time.sleep'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(spec=self.SUBJECT.time.sleep)),

+                           patch('{}.os.chdir'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(spec=self.SUBJECT.os.chdir)),

+                           patch('{}.os.setsid'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(spec=self.SUBJECT.os.setsid)),

+                           patch('{}.os.umask'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(spec=self.SUBJECT.os.umask)),

+                           patch('{}.os.closerange'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(spec=self.SUBJECT.os.closerange)),

+                           patch('{}.os.fork'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=0)),

+                           patch('{}.os.getpid'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=42)),

+                           patch('{}.os.getppid'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=self.PPID)),

+                           patch('{}.os.getgid'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=44)),

+                           patch('{}.os.unlink'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=45)),

+                           patch('{}.os.chmod'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=46)),

+                           patch('{}.os.getuid'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=47)),

+                           patch('{}.shutil.rmtree'.format(TestCaseBase.SUBJECT_NAME),

+                                 MagicMock(return_value=48))]

+         for patcher in self.patchers:

+             patcher.start()

+         self.reset()

+ 

+     def tearDown(self):

+         if self.patchers:

+             for patcher in self.patchers:

+                 patcher.stop()

+         if self.TEMPDIRPATH:  # rm -rf /tmp/test_standard-inventory-qcow2.py*

+             for tempdirglob in glob('{}*'.format(self.TEMPDIRPATH)):

+                 shutil.rmtree(tempdirglob, ignore_errors=True)

+ 

+     @contextmanager

+     def silence_debug(self):

+         with patch('{}.debug'.format(self.SUBJECT_NAME), MagicMock(return_value=0)):

+             yield None

+ 

+     def validate_mock_fcntl(self):

+         """Helper to confirm cachefile-locking/unlocking was done properly"""

+         # Make sure unlock happens _after_ locking, and lock is released

+         locked = False

+         locks = 0

+         unlocks = 0

+         for args, dargs in self.mock_fcntl.flock.call_args_list:

+             if locked:

+                 if len(args) >= 2:

+                     op = args[1]

+                 elif 'op' in dargs:

+                     op = dargs['op']

+                 else:

+                     continue  # Don't care about this call

+                 # Is it a locking call?

+                 if op == self.mock_fcntl.LOCK_UN:

+                     locked = False

+                     unlocks += 1

+             else:

+                 if len(args) >= 2:

+                     op = args[1]

+                 elif 'op' in dargs:

+                     op = dargs['op']

+                 else:

+                     continue  # Don't care about this call

+                 if op in [self.mock_fcntl.LOCK_EX, self.mock_fcntl.LOCK_SH]:

+                     locked = True

+                     locks += 1

+         self.assertFalse(locked, "Mock cache file locked {} times but"

+                                  " unlocked {} times".format(locks, unlocks))

+ 

+ 

+ class TestToolFunctions(TestCaseBase):

+     """Tests for several misc. tooling functions"""

+ 

+     def validate_artifacts_dirpath(self, test_environ, expected_dirpath):

+         """Helper for actual tests"""

+         first = self.SUBJECT.artifacts_dirpath(test_environ)

+         second = self.SUBJECT.artifacts_dirpath(test_environ)

+         self.assertEqual(test_environ["TEST_ARTIFACTS"], first)

+         self.assertEqual(first, second)

+         self.assertEqual(len(self.mock_makedirs.mock_calls), 2)

+         self.assertEqual(self.mock_makedirs.mock_calls[0], self.mock_makedirs.mock_calls[1])

+         self.assertEqual(first, expected_dirpath)

+         self.mock_makedirs.assert_called_with(expected_dirpath)

+ 

+     def test_no_env_artifacts_dirpath(self):

+         """Verify artifacts_dirpath() always returns consistent path w/ env."""

+         self.validate_artifacts_dirpath({}, os.path.join(self.TEMPDIRPATH, 'artifacts'))

+ 

+     def test_no_env_artifacts_dirpath(self):

+         """Verify artifacts_dirpath() always returns consistent path w/ env."""

+         expected_dirpath = 'foo/../bar / baz../'

+         self.validate_artifacts_dirpath(dict(TEST_ARTIFACTS=expected_dirpath), expected_dirpath)

+ 

+ 

+ class TestInvCache(TestCaseBase):

+     """Tests for the InvCache class"""

+ 

+     def setUp(self):

+         super(TestInvCache, self).setUp()

+ 

+     def test_init_newcache(self):

+         """Verify InvCache initialization behavior"""

+         invcache = self.SUBJECT.InvCache()

+         self.assertDictEqual(invcache.DEFAULT_CACHE, invcache())

+         self.assertDictEqual(invcache.DEFAULT_CACHE, json.loads(self.cachefile.getvalue()))

+         self.validate_mock_fcntl()

+ 

+     def test_reset(self):

+         """Verify resetting results in new instance"""

+         invcache = self.SUBJECT.InvCache()

+         self.validate_mock_fcntl()

+         invcache.reset()

+         invcache_too = self.SUBJECT.InvCache()

+         self.validate_mock_fcntl()

+         self.assertTrue(id(invcache) != id(invcache_too))

+ 

+     def test_empty_gethost(self):

+         """Verify invcache.gethost() returns None when not found"""

+         invcache = self.SUBJECT.InvCache()

+         self.assertEqual(invcache.gethost('foobar'), None)

+         self.assertEqual(invcache.gethost(None), None)

+         self.validate_mock_fcntl()

+ 

+     def test_addgetdelhost(self):

+         """Verify invcache.gethost() == invcache.addhost() == invcache.delhost()"""

+         invcache = self.SUBJECT.InvCache()

+         added = invcache.addhost('foobar')

+         geted = invcache.gethost('foobar')

+         self.validate_mock_fcntl()

+         deled = invcache.delhost('foobar')

+         self.assertTrue(added)

+         self.assertTrue(geted)

+         self.assertTrue(deled)

+         self.assertEqual(added, geted)

+         self.assertEqual(geted, deled)

+         self.assertEqual(geted[0], {})

+         self.assertIn('localhost', geted[1])

+         self.assertIn('subjects', geted[1])

+         self.SUBJECT.os.unlink.assert_called_once_with(os.path.join(self.TEMPDIRPATH, 'bar'))

+         self.assertDictEqual(invcache.DEFAULT_CACHE, json.loads(self.cachefile.getvalue()))

+         self.validate_mock_fcntl()

+ 

+     def test_delhost_unlink(self):

+         """Verify empty invcache.delhost() returns None and unlinks file"""

+         invcache = self.SUBJECT.InvCache()

+         deled = invcache.delhost('foobar')

+         self.SUBJECT.os.unlink.assert_called_once_with(os.path.join(self.TEMPDIRPATH, 'bar'))

+         self.assertEqual(deled, None)

+         self.validate_mock_fcntl()

+         self.assertDictEqual(invcache.DEFAULT_CACHE, json.loads(self.cachefile.getvalue()))

+ 

+     def test_delhost_nounlink(self):

+         """Verify empty invcache.delhost() returns None and does NOT unlink"""

+         invcache = self.SUBJECT.InvCache()

+         deled = invcache.delhost('foobar', True)

+         self.assertFalse(self.SUBJECT.os.unlink.called)

+         self.assertEqual(deled, None)

+         self.validate_mock_fcntl()

+         self.assertDictEqual(invcache.DEFAULT_CACHE, json.loads(self.cachefile.getvalue()))

+ 

+ 

+ class TestMain(TestCaseBase):

+     """Tests for the ``main()`` function"""

+ 

+     mock_start_qemu = None

+     mock_create_host = None

+     mock_subject_watcher = None

+     fake_stdout = None

+     fake_stderr = None

+     exit_code = None

+     exit_msg = None

+ 

+     def mock_exit(self, code, msg):

+         self.exit_code = code

+         self.exit_msg = msg

+ 

+     def reset(self):

+         super(TestMain, self).reset()

+         self.fake_stdout = StringIO()

+         self.fake_stderr = StringIO()

+         self.mock_start_qemu.reset_mock()

+         self.mock_create_host.reset_mock()

+         self.mock_subject_watcher.reset_mock()

+         self.exit_code = None

+         self.exit_msg = None

+ 

+     def setUp(self):

+         self.fake_stdout = StringIO()

+         self.fake_stderr = StringIO()

+         self.mock_start_qemu = create_autospec(spec=self.SUBJECT.start_qemu,

+                                                spec_set=True, instance=True,

+                                                return_value=(MagicMock(), MagicMock()))

+         self.mock_create_host = create_autospec(spec=self.SUBJECT.create_host,

+                                                 spec_set=True, instance=True)

+         self.mock_subject_watcher = create_autospec(spec=self.SUBJECT.subject_watcher,

+                                                     spec_set=True, instance=True)

+         self.patchers = [patch('{}.argparse.ArgumentParser.exit'.format(TestCaseBase.SUBJECT_NAME),

+                                MagicMock(side_effect=self.mock_exit)),

+                          patch('{}.start_qemu'.format(TestCaseBase.SUBJECT_NAME),

+                                self.mock_start_qemu),

+                          patch('{}.create_host'.format(TestCaseBase.SUBJECT_NAME),

+                                self.mock_create_host),

+                          patch('{}.subject_watcher'.format(TestCaseBase.SUBJECT_NAME),

+                                self.mock_subject_watcher)]

+         super(TestMain, self).setUp()

+ 

+     def test_noargs(self):

+         """Script w/o args or environment behaves like --list"""

+         with redirect_stdout(self.fake_stdout), redirect_stderr(self.fake_stderr):

+             self.SUBJECT.main(argv=[self.SUBJECT_PATH], environ={})

+         # No exceptions, parser exit, or stderr

+         self.assertEqual(self.exit_code, None)  # mock_exit NOT called

+         # output format checked more thrroughly elsewhere

+         for regex in ('localhost', 'subjects', '_meta'):

+             self.assertRegex(self.fake_stdout.getvalue(), regex)

+ 

+     def test_debug_arg(self):

+         """Script with --debug enables debug mode"""

+         with redirect_stdout(self.fake_stdout), redirect_stderr(self.fake_stderr):

+             self.SUBJECT.main(argv=[self.SUBJECT_PATH, '--debug'], environ={})

+         # No exceptions, output, or input of any kind

+         self.assertRegex(self.fake_stdout.getvalue(), 'localhost')

+         self.assertRegex(self.fake_stderr.getvalue(), r'Debugging enabled')

+ 

+     def test_debug_env(self):

+         """Script with $TEST_DEBUG enables debug mode as expected"""

+         debug_enabled = []

+         for test_debug in ('TRUE', 'True', 'true', 'YES', 'Yes', 'yes', '1'):

+             with redirect_stdout(self.fake_stdout), redirect_stderr(self.fake_stderr):

+                 self.SUBJECT.main(argv=[self.SUBJECT_PATH],

+                                   environ=dict(TEST_DEBUG=test_debug))

+             # No exceptions, output, or input of any kind

+             self.assertRegex(self.fake_stdout.getvalue(), 'localhost')

+             self.assertRegex(self.fake_stderr.getvalue(), r'Debugging enabled')

+ 

+     def test_bad_mmhost(self):

+         """--host w/o any subjects exits non-zero."""

+         argv = [self.SUBJECT_PATH, '--host']

+         environ = {}

+         with redirect_stdout(self.fake_stdout), redirect_stderr(self.fake_stderr):

+             try:

+                 self.SUBJECT.main(argv, environ)

+             except TypeError as xcept:

+                 pass  # No clue why argparse throws this only while under test

+         self.assertFalse(self.fake_stdout.getvalue())

+         self.assertTrue(self.exit_code)

+         self.assertRegex(self.fake_stderr.getvalue(), r'usage:')

+ 

+     def validate_happy_path(self, argv, environ, n_subs=0):

+         """Helper for checking happy-path + negative input == empty cache"""

+         self.MockOpen.return_value = mock_cachefile = StringIO()

+         with redirect_stdout(self.fake_stdout), redirect_stderr(self.fake_stderr), self.silence_debug():

+             # Actual call to main() for test_empty_mmlist() (below)

+             self.SUBJECT.main(argv, environ)

+         self.assertEqual(self.exit_code, None)

+         self.assertFalse(self.fake_stdout.closed)

+         self.assertFalse(self.fake_stderr.closed)

+         # Verify no errors or any other output

+         _stderr = self.fake_stderr.getvalue()

+         try:

+             if 'Debugging enabled' not in _stderr:

+                 self.assertFalse(_stderr)

+         except AssertionError:  # Only happens when '--debug' was specified

+             # A non-fatal warning was issued

+             self.assertRegex(_stderr, r"[Ss]kipping.+qcow2.+does not end in.+")

+         # Verify locking was done correctly

+         self.validate_mock_fcntl()

+         msg_fmt = '\n{} JSON:\n{}\n'

+         jsons = dict(stdout=self.fake_stdout.getvalue().replace('(Pdb) ', ''),

+                      cache=mock_cachefile.getvalue())

+         # In case pdb is in use :D

+         for jsons_name, json_str in jsons.items():

+             try:

+                 jsons[jsons_name] = json.loads(json_str)

+             except ValueError:  # Parsing failed, assist debugging

+                 print(msg_fmt.format(jsons_name, json_str))

+                 raise

+ 

+         # Confirm any/all JSON output parses and always matches DEFAULT_CACHE (i.e. empty)

+         if '--list' in argv:

+             self.assertDictEqual(jsons['stdout'], self.SUBJECT.InvCache.DEFAULT_CACHE)

+             # Cache contents match stdout

+             self.assertDictEqual(jsons['cache'], jsons['stdout'])

+         if '--host' in argv:

+             self.assertDictEqual(jsons['stdout'], dict())

+         count = self.mock_create_host.call_count

+         self.assertEqual(count, n_subs,

+                          'create_host() called {} times, expecting {}'.format(count, n_subs))

+         # Confirm if/that create_host() was called according to it's API

+         if self.mock_create_host.called:

+             for kall in self.mock_create_host.call_args_list:

+                 args = list(kall[0])

+                 dargs = kall[1]

+                 _opts = dargs.get('opts')

+                 _image_path = dargs.get('image_path')

+                 _environ = dargs.get('environ')

+                 if args:

+                     _opts = args.pop(0)

+                 if args:

+                     _image_path = args.pop(0)

+                 if args:

+                     _environ = args.pop(0)

+                 if '--list' in argv:

+                     self.assertTrue(_opts.list)

+                     self.assertFalse(_opts.host)

+                 if '--host' in argv:

+                     self.assertFalse(_opts.list)

+                     self.assertTrue(_opts.host)

+                 self.assertDictEqual(_environ, environ)

+                 # Image must be found at least once

+                 foundit = False

+                 for arg in argv:

+                     if arg.find(_image_path.strip()):

+                         foundit = True

+                         break

+                 if not foundit:

+                     for val in environ.values():

+                         if val.find(_image_path.strip()):

+                             foundit = True

+                             break

+                 self.assertTrue(foundit, 'image_path {} parameter from create_host'

+                                          ' was not found in argv {} or environ {}'

+                                          ''.format(_image_path, argv, environ))

+ 

+     def test_empty_mmlist(self):

+         """--list w/ invalid subjects always results in empty JSON output & empty cache."""

+         # Subtest mesage paired with arguments to validate_happy_path()

+         test_input = {

+                 "Validate output with no subjects":

+                     dict(argv=[self.SUBJECT_PATH, '--list'],

+                          environ=dict()),

+                 "Validate output with non-qcow2 argument":

+                     dict(argv=[self.SUBJECT_PATH, '--list', '/path/to/foobar'],

+                          environ=dict(), n_subs=1),

+                 "Validate output with non-qcow2 argument containing a space":

+                     dict(argv=[self.SUBJECT_PATH, '--list', '/space.qcow2 /foobar'],

+                          environ=dict(), n_subs=1),

+                 "Validate output with whitespace argument":

+                     dict(argv=[self.SUBJECT_PATH, '--list', ' ', '--debug'],

+                          environ=dict()),

+                 "Validate output with multiple non-qcow2 arguments":

+                     dict(argv=[self.SUBJECT_PATH, '--list', '--debug', '/path/to/foobar', '../baz'],

+                          environ=dict(), n_subs=2),

+                 "Validate output with multiple non-qcow2 arguments containing a space":

+                     dict(argv=[self.SUBJECT_PATH, '--list', '/space.qcow2 /foobar', '../baz'],

+                          environ=dict(), n_subs=2),

+                 "Validate output with multiple whitespace arguments":

+                     dict(argv=[self.SUBJECT_PATH, '--list', '', '\n\t\t'],

+                          environ=dict()),

+                 "Validate output with empty TEST_SUBJECTS":

+                     dict(argv=[self.SUBJECT_PATH, '--list'],

+                          environ=dict(TEST_SUBJECTS="")),

+                 "Validate output with non-qcow2 TEST_SUBJECTS":

+                     dict(argv=[self.SUBJECT_PATH, '--list'],

+                          environ=dict(TEST_SUBJECTS="/path/to/foobar"), n_subs=1),

+                 "Validate output with non-qcow2 TEST_SUBJECTS containing a space":