#187 WIP - updated qcow2 and docker inventory to be static inventories
Opened 5 years ago by bgoncalv. Modified 5 years ago
bgoncalv/standard-test-roles yaml-inventory  into  master

provision/create-docker inventory/standard-inventory-docker
file renamed
+47 -110
@@ -25,81 +25,82 @@ 

  

  import argparse

  import errno

- import json

  import os

+ import re

  import shutil

  import shlex

- import signal

  import subprocess

  import sys

  import tempfile

  import time

- import distutils.util

+ import yaml

+ 

+ CLEANUP_CIDS = []

  

  

  def main(argv):

      parser = argparse.ArgumentParser(description="Inventory for a container image in a registry")

-     parser.add_argument("--list", action="store_true", help="Verbose output")

-     parser.add_argument('--host', help="Get host variables")

+     parser.add_argument("--output", "-o", default="inventory.yaml", help="Inventory output file")

      parser.add_argument('--docker-extra-args', help="Extra docker arguments for launching container",

                          default=os.environ.get("TEST_DOCKER_EXTRA_ARGS", ""))

      parser.add_argument("subjects", nargs="*", default=shlex.split(os.environ.get("TEST_SUBJECTS", "")))

      opts = parser.parse_args()

  

+     if os.path.isfile("inventory"):

+         with open("inventory") as f:

+             for line in f:

+                 m = re.match("export TEST_DOCKER_EXTRA_ARGS=(.*)", line)

+                 if m:

+                     opts.docker_extra_args = m.group(1).strip("\"")

+ 

      try:

-         if opts.host:

-             _, data = inv_host(opts.host, opts.docker_extra_args)

-         else:

-             data = inv_list(opts.subjects, opts.docker_extra_args)

-         sys.stdout.write(json.dumps(data, indent=4, separators=(',', ': ')))

+         data = inv_list(opts.subjects, opts.docker_extra_args)

+         if not data:

+             return 0

+         inv_file = opts.output

+         with open(inv_file, "w") as f:

+             yaml.dump(data, f, default_flow_style=False)

+         print("INFO: Successfully created {0}".format(inv_file))

      except RuntimeError as ex:

-         sys.stderr.write("{0}: {1}\n".format(os.path.basename(sys.argv[0]), str(ex)))

+         for cid in CLEANUP_CIDS:

+             subprocess.call(["/usr/bin/docker", "rm", "-f", cid])

+         sys.stderr.write("FAIL {0}: {1}\n".format(os.path.basename(sys.argv[0]), str(ex)))

          return 1

  

      return 0

  

  

  def inv_list(subjects, docker_extra_args):

-     hosts = []

-     variables = {}

+     if not subjects:

+         return None

+     hosts = {}

      for subject in subjects:

-         if subject.startswith("docker:"):

-             image = subject[7:]

-             name, host_vars = inv_host(image, docker_extra_args)

-             if host_vars:

-                 hosts.append(name)

-                 variables[name] = host_vars

-     return {"localhost": {"hosts": hosts, "vars": {}},

-             "subjects": {"hosts": hosts, "vars": {}},

-             "_meta": {"hostvars": variables}}

- 

- 

- def inv_host(image, docker_extra_args):

-     null = open(os.devnull, 'w')

+         if not subject.startswith("docker:"):

+             continue

+         image = subject[7:]

+         name, host_vars = create_host(image, docker_extra_args)

+         if host_vars:

+             hosts[name] = host_vars

+     if not hosts:

+         return None

+     inventory = {"localhost": {"hosts": hosts}}

+     return inventory

  

-     try:

-         tty = os.open("/dev/tty", os.O_WRONLY)

-         os.dup2(tty, 2)

-     except OSError:

-         tty = None

-         pass

+ 

+ def create_host(image, docker_extra_args):

+     global CLEANUP_CIDS

+     null = open(os.devnull, 'w')

  

      directory = tempfile.mkdtemp(prefix="inventory-docker")

      cidfile = os.path.join(directory, "cid")

  

-     # Determine if container should be kept available for diagnosis after completion

-     try:

-         diagnose = distutils.util.strtobool(os.getenv("TEST_DEBUG", "0"))

-     except ValueError:

-         diagnose = 0

- 

      # Check for any additional arguments to include when starting docker container

      try:

          extra_arg_list = shlex.split(docker_extra_args)

      except ValueError:

          raise RuntimeError("Could not parse DOCKER_EXTRA_ARGS")

  

-     sys.stderr.write("Launching Docker container for {0}\n".format(image))

+     print("Launching Docker container for {0}\n".format(image))

  

      # Make sure the docker service is running

      cmd = [
@@ -130,7 +131,9 @@ 

          raise RuntimeError("Could not find container file for launched container")

  

      with open(cidfile, "r") as f:

-         name = f.read().strip()

+         name = f.read().strip()[:12]

+     shutil.rmtree(directory)

+     CLEANUP_CIDS.append(name)

  

      # Need to figure out what python interpreter to use

      interpreters = ["/usr/bin/python2", "/usr/bin/python3"]
@@ -146,89 +149,23 @@ 

          sys.stderr.write("ERROR: Could not set ansible_python_interpreter")

          return None

  

-     python2_deps = ["python2-dnf", "libselinux-python"]

-     python3_deps = ["python3-dnf", "python3-libselinux"]

- 

-     # Now install the necessary stuff in the container :S

-     install = [

-         "/usr/bin/docker", "exec", "--user=root", name, "/usr/bin/yum", "-y", "install"

-     ]

-     if ansible_python_interpreter == "/usr/bin/python2":

-         install.extend(python2_deps)

-     if ansible_python_interpreter == "/usr/bin/python3":

-         install.extend(python3_deps)

-     try:

-         subprocess.check_call(install, stdout=sys.stderr.fileno())

-     except subprocess.CalledProcessError:

-         # Could not install necessary packages to run the tests.

-         # Need to stop and remove the container.

-         subprocess.call(["/usr/bin/docker", "rm", "-f", name], stdout=null)

-         raise RuntimeError("Could not install Ansible dependencies in launched container")

- 

-     # Directory to place artifacts

-     artifacts = os.environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))

- 

      # The variables

      variables = {

          "ansible_connection": "docker",

-         "ansible_python_interpreter": ansible_python_interpreter

+         "ansible_python_interpreter": ansible_python_interpreter,

      }

  

-     # Process of our parent

-     ppid = os.getppid()

- 

-     child = os.fork()

-     if child:

-         return name, variables

- 

-     # Daemonize and watch the processes

-     os.chdir("/")

-     os.setsid()

-     os.umask(0)

- 

-     if tty is None:

-         tty = null.fileno()

- 

-     # Duplicate standard input to standard output and standard error.

-     os.dup2(null.fileno(), 0)

-     os.dup2(tty, 1)

-     os.dup2(tty, 2)

- 

-     # Now wait for the parent process to go away, then kill the VM

-     while True:

-         time.sleep(3)

- 

-         try:

-             os.kill(ppid, 0)

-         except OSError:

-             break  # Either of the processes no longer exist

- 

-     if diagnose:

-         sys.stderr.write("\n")

-         sys.stderr.write("DIAGNOSE: docker exec -it {0} /bin/bash\n".format(name))

-         sys.stderr.write("DIAGNOSE: kill {0} # when finished\n".format(os.getpid()))

- 

-         def _signal_handler(*args):

-             sys.stderr.write("\nDIAGNOSE ending...\n")

- 

-         signal.signal(signal.SIGTERM, _signal_handler)

-         signal.pause()

- 

-     # Dump the container logs

+     artifacts = os.environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))

      try:

          os.makedirs(artifacts)

      except OSError as exc:

          if exc.errno != errno.EEXIST or not os.path.isdir(artifacts):

              raise

-     log = os.path.join(artifacts, "{0}.log".format(os.path.basename(image)))

- 

-     # Kill the container

+     log = os.path.join(artifacts, "docker-{0}.log".format(os.path.basename(name)))

      with open(log, "w") as f:

          subprocess.call(["/usr/bin/docker", "logs", name], stdout=f.fileno())

-     subprocess.call(["/usr/bin/docker", "rm", "-f", name], stdout=null)

  

-     shutil.rmtree(directory)

-     sys.exit(0)

+     return name, variables

  

  

  if __name__ == '__main__':

provision/create-local inventory/standard-inventory-local
file renamed
+24 -20
@@ -24,23 +24,29 @@ 

  # Author: Stef Walter <stefw@redhat.com>

  

  import argparse

- import json

  import os

+ import shlex

  import sys

+ import yaml

  

  

  def main(argv):

      parser = argparse.ArgumentParser(description="Inventory for local")

-     parser.add_argument("--list", action="store_true", help="Verbose output")

-     parser.add_argument('--host', help="Get host variables")

+     parser.add_argument("--output", "-o", default="inventory.yaml", help="Inventory output file")

+     parser.add_argument("subjects", nargs="*", default=shlex.split(os.environ.get("TEST_SUBJECTS", "")))

      opts = parser.parse_args()

  

+     if not opts.subjects:

+         opts.subjects = ["local"]

+ 

      try:

-         if opts.host:

-             data = inv_host(opts.host)

-         else:

-             data = inv_list()

-         sys.stdout.write(json.dumps(data, indent=4, separators=(',', ': ')))

+         data = inv_list(opts.subjects)

+         if not data:

+             return None

+         inv_file = opts.output

+         with open(inv_file, "w") as f:

+             yaml.dump(data, f, default_flow_style=False)

+         sys.stdout.write("INFO: Successfully created {0}\n".format(inv_file))

      except RuntimeError as ex:

          sys.stderr.write("{0}: {1}\n".format(os.path.basename(sys.argv[0]), str(ex)))

          return 1
@@ -48,20 +54,18 @@ 

      return 0

  

  

- def inv_list():

-     hosts = []

-     variables = {}

-     if os.environ.get("TEST_SUBJECTS", None) == "local":

-         host_vars = inv_host("local")

-         if host_vars:

-             hosts.append("local")

-             variables["local"] = host_vars

-     return {"subjects": {"hosts": hosts, "vars": {}},

-             "localhost": {"hosts": hosts, "vars": {}},

-             "_meta": {"hostvars": variables}}

+ def inv_list(subjects):

+     inventory = {"localhost": {"hosts": {}}}

+     for subject in subjects:

+         if subject == "local":

+             host_vars = create_host("local")

+             if host_vars:

+                 inventory["localhost"]["hosts"]["local"] = host_vars

+             return inventory

+     return None

  

  

- def inv_host(host):

+ def create_host(host):

      if host == "local":

          return {"ansible_connection": "local"}

      return None

provision/create-qcow2 inventory/standard-inventory-qcow2
file renamed
+63 -122
@@ -2,9 +2,7 @@ 

  

  import argparse

  import errno

- import json

  import os

- import shutil

  import shlex

  import signal

  import multiprocessing
@@ -13,7 +11,7 @@ 

  import sys

  import tempfile

  import time

- import distutils.util

+ import yaml

  

  IDENTITY = """

  -----BEGIN RSA PRIVATE KEY-----
@@ -69,39 +67,52 @@ 

    expire: False

  """.format(DEF_USER, DEF_PASSWD, AUTH_KEY)

  

+ CLEANUP_PIDS = []

+ 

  

  def main(argv):

      parser = argparse.ArgumentParser(description="Inventory for a QCow2 test image")

-     parser.add_argument("--list", action="store_true", help="Verbose output")

-     parser.add_argument('--host', help="Get host variables")

+     parser.add_argument("--output", "-o", default="inventory.yaml", help="Inventory output file")

      parser.add_argument("subjects", nargs="*", default=shlex.split(os.environ.get("TEST_SUBJECTS", "")))

      opts = parser.parse_args()

  

      try:

-         if opts.host:

-             data = inv_host(opts.host)

-         else:

-             data = inv_list(opts.subjects)

-         sys.stdout.write(json.dumps(data, indent=4, separators=(',', ': ')))

+         data = inv_list(opts.subjects)

+         if not data:

+             return 0

+         inv_file = opts.output

+         with open(inv_file, "w") as f:

+             yaml.dump(data, f, default_flow_style=False)

+         print("INFO: Successfully created {0}".format(inv_file))

      except RuntimeError as ex:

-         sys.stderr.write("{0}: {1}\n".format(os.path.basename(sys.argv[0]), str(ex)))

+         for pid in CLEANUP_PIDS:

+             # Kill the qemu process

+             try:

+                 os.kill(pid, signal.SIGTERM)

+             except OSError:

+                 pass

+         sys.stderr.write("FAIL {0}: {1}\n".format(os.path.basename(sys.argv[0]), str(ex)))

          return 1

  

      return 0

  

  

  def inv_list(subjects):

-     hosts = []

-     variables = {}

+     if not subjects:

+         return None

+     hosts = {}

      for subject in subjects:

-         if subject.endswith((".qcow2", ".qcow2c")):

-             host_vars = inv_host(subject)

-             if host_vars:

-                 hosts.append(subject)

-                 variables[subject] = host_vars

-     return {"localhost": {"hosts": hosts, "vars": {}},

-             "subjects": {"hosts": hosts, "vars": {}},

-             "_meta": {"hostvars": variables}}

+         if not subject.endswith((".qcow2", ".qcow2c")):

+             continue

+         host_vars = create_host(subject)

+         if host_vars:

+             # Use image name and pid info to allow multiple VMs with same image

+             name = "{0}-{1}".format(host_vars["image"], host_vars["host_pid"])

+             hosts[name] = host_vars

+     if not hosts:

+         return None

+     inventory = {"localhost": {"hosts": hosts}}

+     return inventory

  

  

  def start_qemu(image, cloudinit, log, portrange=(2222, 5555)):
@@ -130,15 +141,10 @@ 

                               "-display", "none", "-vnc", ":1"], stdout=open(log, 'a'), stderr=subprocess.STDOUT), port

  

  

- def inv_host(image):

+ def create_host(image):

+     global CLEANUP_PIDS

      null = open(os.devnull, 'w')

  

-     try:

-         tty = os.open("/dev/tty", os.O_WRONLY)

-         os.dup2(tty, 2)

-     except OSError:

-         tty = None

- 

      # A directory for temporary stuff

      directory = tempfile.mkdtemp(prefix="inventory-cloud")

      identity = os.path.join(directory, "identity")
@@ -158,13 +164,7 @@ 

                             "-volid", "cidata", "-joliet", "-rock", "-quiet",

                             "-output", cloudinit, userdata, metadata], stdout=null)

  

-     # Determine if virtual machine should be kept available for diagnosis after completion

-     try:

-         diagnose = distutils.util.strtobool(os.getenv("TEST_DEBUG", "0"))

-     except ValueError:

-         diagnose = 0

- 

-     sys.stderr.write("Launching virtual machine for {0}\n".format(image))

+     print("Launching virtual machine for {0}".format(image))

  

      # And launch the actual VM

      artifacts = os.environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))
@@ -187,11 +187,12 @@ 

      if proc is None:

          raise RuntimeError("Could not launch VM for qcow2 image"

                             " '{0}':{1}".format(image, cpe.output))

- 

+     CLEANUP_PIDS.append(proc.pid)

      for _ in range(0, 30):

          try:

              # The variables

              variables = {

+                 "image": "{0}".format(image),

                  "ansible_port": "{0}".format(port),

                  "ansible_host": "127.0.0.3",

                  "ansible_user": "root",
@@ -233,93 +234,33 @@ 

              pass

          raise RuntimeError("could not access launched qcow2 image: {0}".format(image))

  

-     # Process of our parent

-     ppid = os.getppid()

- 

-     child = os.fork()

-     if child:

-         # Need to figure out what python interpreter to use

-         interpreters = ["/usr/bin/python2", "/usr/bin/python3"]

-         for interpreter in interpreters:

-             check_file = [

-                 "/usr/bin/ansible",

-                 "--inventory",

-                 inventory,

-                 "localhost",

-                 "--module-name",

-                 "raw",

-                 "--args",

-                 "ls %s" % interpreter

-             ]

-             try:

-                 subprocess.check_call(check_file, stdout=null, stderr=null)

-                 ansible_python_interpreter = interpreter

-                 break

-             except subprocess.CalledProcessError:

-                 pass

-         else:

-             sys.stderr.write("ERROR: Could not set ansible_python_interpreter")

-             return None

-         variables["ansible_python_interpreter"] = ansible_python_interpreter

-         # Update inventory file

-         args = " ".join(["{0}='{1}'".format(*item) for item in variables.items()])

-         inventory = os.path.join(directory, "inventory")

-         with open(inventory, "w") as f:

-             f.write("[subjects]\nlocalhost {0}\n".format(args))

-         return variables

- 

-     # Daemonize and watch the processes

-     os.chdir("/")

-     os.setsid()

-     os.umask(0)

- 

-     if tty is None:

-         tty = null.fileno()

- 

-     # Duplicate standard input to standard output and standard error.

-     os.dup2(null.fileno(), 0)

-     os.dup2(tty, 1)

-     os.dup2(tty, 2)

- 

-     # alternatively, lock on a file

-     lock_file = os.environ.get("LOCK_ON_FILE", None)

-     while True:

-         time.sleep(3)

- 

-         if lock_file:

-             if not os.path.exists(lock_file):

-                 sys.stderr.write("Lock file is gone.")

-                 break

-         else:

-             # Now wait for the parent process to go away, then kill the VM

-             try:

-                 os.kill(ppid, 0)

-                 os.kill(proc.pid, 0)

-             except OSError:

-                 sys.stderr.write("Either parent process or VM process is gone.")

-                 break  # Either of the processes no longer exist

- 

-     if diagnose:

-         sys.stderr.write("\n")

-         sys.stderr.write("DIAGNOSE: ssh -p {0} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "

-                          "root@{1} # password: {2}\n".format(port, "127.0.0.3", "foobar"))

-         sys.stderr.write("DIAGNOSE: export ANSIBLE_INVENTORY={0}\n".format(inventory))

-         sys.stderr.write("DIAGNOSE: kill {0} # when finished\n".format(os.getpid()))

- 

-         def _signal_handler(*args):

-             sys.stderr.write("\nDIAGNOSE ending...\n")

- 

-         signal.signal(signal.SIGTERM, _signal_handler)

-         signal.pause()

- 

-     # Kill the qemu process

-     try:

-         os.kill(proc.pid, signal.SIGTERM)

-     except OSError:

-         pass

+     # Need to figure out what python interpreter to use

+     interpreters = ["/usr/bin/python2", "/usr/bin/python3"]

+     for interpreter in interpreters:

+         check_file = [

+             "/usr/bin/ansible",

+             "--inventory",

+             inventory,

+             "localhost",

+             "--module-name",

+             "raw",

+             "--args",

+             "ls %s" % interpreter

+         ]

+         try:

+             subprocess.check_call(check_file, stdout=null, stderr=null)

+             ansible_python_interpreter = interpreter

+             break

+         except subprocess.CalledProcessError:

+             pass

+     else:

+         sys.stderr.write("ERROR: Could not set ansible_python_interpreter")

+         return None

+     variables["ansible_python_interpreter"] = ansible_python_interpreter

+     variables["host_pid"] = proc.pid

+     variables["host_directory"] = directory

  

-     shutil.rmtree(directory)

-     sys.exit(0)

+     return variables

  

  

  if __name__ == '__main__':

@@ -0,0 +1,56 @@ 

+ #!/usr/bin/env python

+ 

+ import argparse

+ import os

+ import signal

+ import shutil

+ import subprocess

+ import sys

+ import yaml

+ 

+ 

+ def main():

+     """

+     Clean up everything that was created by the inventory.

+     """

+     parser = argparse.ArgumentParser(description="Inventory for a QCow2 test image")

+     parser.add_argument("--inventory", "-i", default="inventory.yaml", help="Inventory file")

+     opts = parser.parse_args()

+ 

+     inv_name = opts.inventory

+     if not os.path.isfile(inv_name):

+         # Nothing to clean up

+         return 0

+     with open(inv_name) as ifile:

+         inventory = yaml.load(ifile)

+ 

+     try:

+         hosts = inventory["localhost"]["hosts"]

+     except Exception:

+         raise RuntimeError("{0} does not seem to be valid".format(inv_name))

+ 

+     for name in hosts:

+         host = hosts[name]

+         if "host_pid" in host:

+             try:

+                 os.kill(host["host_pid"], signal.SIGTERM)

+             except OSError:

+                 pass

+         if "host_directory" in host:

+             try:

+                 shutil.rmtree(host["host_directory"])

+             except OSError:

+                 pass

+         if "ansible_connection" in host and host["ansible_connection"] == "docker":

+             null = open(os.devnull, 'w')

+             try:

+                 subprocess.call(["/usr/bin/docker", "rm", "-f", name], stdout=null, stderr=null)

+             except subprocess.CalledProcessError:

+                 pass

+ 

+     os.remove(inv_name)

+     return 0

+ 

+ 

+ if __name__ == '__main__':

+     sys.exit(main())

@@ -0,0 +1,123 @@ 

+ #!/usr/bin/env python

+ 

+ import os

+ import sys

+ import subprocess

+ import yaml

+ 

+ 

+ def main(argv):

+     """

+     Run all standard inventory scripts and return their merged output.

+     """

+ 

+     # run and merge output from standard inventory scripts

+     merge_standard_inventories(argv[1:])

+ 

+     return 0

+ 

+ 

+ def _run(cmd):

+     # Force command output to be printed while it is executed

+     p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)

+     while p.poll() is None:

+         sys.stdout.write(p.stdout.readline())

+         sys.stdout.flush()

+ 

+     return p.returncode

+ 

+ 

+ def merge_standard_inventories(args):

+ 

+     provision_dir = os.environ.get("TEST_PROVISION_DIRECTORY", "/usr/share/sti/provision")

+ 

+     ignore_ext_string = ("~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo")

+     ignore_ext_list = []

+     for s in ignore_ext_string.split(','):

+         if s.strip():

+             ignore_ext_list.append(s.strip())

+     inventory_ignore_extensions = tuple(ignore_ext_list)

+ 

+     merged = Inventory()

+ 

+     inv_name = "inventory.yaml"

+ 

+     for i in os.listdir(provision_dir):

+         ipath = os.path.join(provision_dir, i)

+         if not i.startswith("create-"):

+             continue

+         if i.endswith(inventory_ignore_extensions):

+             continue

+         if not os.access(ipath, os.X_OK):

+             continue

+ 

+         cmd = [ipath] + args

+ 

+         # Make sure old inventory file does not exist

+         if os.path.isfile(inv_name):

+             os.remove(inv_name)

+         try:

+             _run(cmd)

+         except subprocess.CalledProcessError:

+             raise RuntimeError("Could not run: {0}".format(str(cmd)))

+ 

+         if not os.path.isfile(inv_name):

+             continue

+ 

+         merged.merge_file(inv_name)

+         # After merging the inventory file it is not needed any more

+         os.remove(inv_name)

+ 

+     return merged.save(inv_name)

+ 

+ 

+ class Inventory:

+     """

+     Merge JSON data from standard test dynamic inventory scripts.

+ 

+     Note: This class is very specific to the YAML data written by the

+     ansible dynamic inventory scripts that are provided by the

+     standard-test-roles package. In particular, it insists on finding

+     and generating "localhost" group.

+     """

+ 

+     def __init__(self):

+         self.hosts = {}

+         self.inventory = {"localhost": {"hosts": {}}}

+ 

+     def merge_file(self, fname):

+         if not os.path.isfile(fname):

+             raise ValueError("File {0} does not exit".format(fname))

+         with open(fname) as ifile:

+             self.merge(yaml.load(ifile))

+ 

+     def merge(self, inventory):

+         if not isinstance(inventory, dict):

+             raise ValueError(

+                 "inventory does not contain the expected top level dictionary")

+ 

+         if "localhost" not in inventory or not isinstance(inventory["localhost"], dict):

+             raise ValueError(

+                 "inventory does not contain the expected 'localhost' dictionary key")

+         if "hosts" not in inventory["localhost"] or not isinstance(inventory["localhost"]["hosts"], dict):

+             raise ValueError(

+                 "inventory does not contain the expected [localhost][hosts] dict")

+ 

+         for h in inventory["localhost"]["hosts"]:

+             if h in self.hosts:

+                 raise ValueError("host: {0} already exist on inventory".format(h))

+             self.hosts[h] = inventory["localhost"]["hosts"][h]

+ 

+         self.inventory["localhost"]["hosts"].update(self.hosts)

+ 

+     def save(self, fname):

+         if len(self.inventory["localhost"]["hosts"]) == 0:

+             return True

+         with open(fname, "w") as f:

+             yaml.dump(self.inventory, f, default_flow_style=False)

+ 

+         return True

+ 

+ 

+ if __name__ == '__main__':

+     sys.exit(main(sys.argv))

file removed
-30
@@ -1,30 +0,0 @@ 

- # Standard Test Interface Scripts

- 

- ## `merge-standard-inventory`

- 

- `merge-standard-inventory` is a wrapper script that runs _all_ of the

- available Standard Testing Interface dynamic inventory scripts from

- `/usr/share/ansible/inventory` and merges their output. This allows

- an individual test to provide a single customized dynamic inventory

- script that sets up certain environment variables before running all

- the default standard-inventory scripts. To make use of it, simply

- have `ansible` use the custom inventory script (which makes use of

- the `merge-standard-inventory` wrapper) in place of the

- `/usr/share/ansible/inventory` directory.

- 

- The following short example custom inventory script sets the

- `TEST_DOCKER_EXTRA_ARGS` environment variable before running

- `merge-standard-inventory` with any command line arguments provided

- by ansible. By doing this, when `merge-standard-inventory` runs the

- `standard-inventory-docker` script it will launch the docker

- container with `CAP_SYS_ADMIN` capabilities (if the `TEST_SUBJECTS`

- environment variable triggers a container to be launched). Other

- `standard-inventory-*` scripts will simply ignore the

- `TEST_DOCKER_EXTRA_ARGS` environment variable--but may have their own

- environment variables set in the inventory script.

- 

- ```

- #!/bin/bash

- export TEST_DOCKER_EXTRA_ARGS="--cap-add=SYS_ADMIN"

- exec merge-standard-inventory "$@"

- ```

@@ -1,208 +0,0 @@ 

- #!/usr/bin/env python

- 

- import json

- import os

- import sys

- import subprocess

- import time

- 

- 

- def main(argv):

-     """

-     Run all standard inventory scripts and return their merged output.

- 

-     Note the standard inventory scripts clean up their spawned hosts

-     when they detect their parent processes go away. To accomodate

-     that behavior, this script forks a child process to run the

-     inventory scripts, send back the merged inventory, and then wait

-     for the parent of this script (ansible) to die before silently

-     exiting. In the mean time, this script outputs the merged

-     inventory gathered by the child and then exits.

-     """

- 

-     # Keep track of our parent

-     waitpid = os.getppid()

- 

-     tty = err_to_tty()

-     pipein, pipeout = os.pipe()

- 

-     childpid = os.fork()

-     if childpid == 0:

-         # this is the child process

- 

-         # close the inherited input side of the pipe

-         os.close(pipein)

- 

-         # run and merge output from standard inventory scripts

-         merged_data = merge_standard_inventories(argv[1:])

- 

-         # send merged data to parent via output pipe

-         os.write(pipeout, merged_data.encode('utf-8'))

- 

-         # close the pipe so the parent knows we are done

-         os.close(pipeout)

- 

-         # wait for the grandparent process to exit

-         linger(waitpid, tty)

- 

-         # exit cleanly

-         sys.exit(0)

- 

-     # this is the parent process

- 

-     # close the inherited output side of the pipe

-     os.close(pipeout)

- 

-     # send eveything from the child to stdout

-     while True:

-         data = os.read(pipein, 999)

-         if not data:

-             os.close(pipein)

-             break

-         sys.stdout.write(data.decode('utf-8'))

- 

-     return 0

- 

- 

- def merge_standard_inventories(args):

- 

-     inventory_dir = os.environ.get(

-         "TEST_DYNAMIC_INVENTORY_DIRECTORY", "/usr/share/ansible/inventory")

- 

-     ignore_ext_string = os.environ.get(

-         "ANSIBLE_INVENTORY_IGNORE", "~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo")

-     ignore_ext_list = []

-     for s in ignore_ext_string.split(','):

-         if s.strip():

-             ignore_ext_list.append(s.strip())

-     inventory_ignore_extensions = tuple(ignore_ext_list)

- 

-     merged = Inventory()

- 

-     for i in os.listdir(inventory_dir):

-         ipath = os.path.join(inventory_dir, i)

-         if not i.startswith("standard-inventory-"):

-             continue

-         if i.endswith(inventory_ignore_extensions):

-             continue

-         if not os.access(ipath, os.X_OK):

-             continue

- 

-         cmd = [ipath] + args

- 

-         try:

-             inv_out = subprocess.check_output(cmd, stdin=None, close_fds=True)

-         except subprocess.CalledProcessError:

-             raise RuntimeError("Could not run: {0}".format(str(cmd)))

- 

-         merged.merge(inv_out.decode('utf-8'))

- 

-     return merged.dumps()

- 

- 

- def err_to_tty():

-     try:

-         tty = os.open("/dev/tty", os.O_WRONLY)

-         os.dup2(tty, 2)

-     except OSError:

-         tty = None

- 

-     return tty

- 

- 

- def linger(waitpid, tty=None):

-     # Go into daemon mode and watch the process

- 

-     null = open(os.devnull, 'w')

- 

-     os.chdir("/")

-     os.setsid()

-     os.umask(0)

- 

-     if tty is None:

-         tty = null.fileno()

- 

-     # Duplicate standard input to standard output and standard error.

-     os.dup2(null.fileno(), 0)

-     os.dup2(tty, 1)

-     os.dup2(tty, 2)

- 

-     # Now wait for the watched process to go away, then return

-     while True:

-         time.sleep(3)

- 

-         try:

-             os.kill(waitpid, 0)

-         except OSError:

-             break  # The process no longer exists

- 

-     return

- 

- 

- class Inventory:

-     """

-     Merge JSON data from standard test dynamic inventory scripts.

- 

-     Note: This class is very specific to the JSON data written by the

-     ansible dynamic inventory scripts that are provided by the

-     standard-test-roles package. In particular, it insists on finding

-     and generating "subjects" and "localhost" members.

-     """

- 

-     def __init__(self):

-         self.hosts = []

-         self.variables = {}

- 

-     def merge_files(self, files):

-         for f in files:

-             with open(f) as ifile:

-                 s = ifile.read()

-                 self.merge(s)

- 

-     def merge(self, s):

-         # parse provided string as JSON

-         inventory = json.loads(s)

- 

-         if not isinstance(inventory, dict):

-             raise ValueError(

-                 "inventory JSON does not contain the expected top level dictionary")

- 

-         if "subjects" not in inventory or not isinstance(inventory["subjects"], dict):

-             raise ValueError(

-                 "inventory JSON does not contain the expected [subjects] dictionary")

-         if "hosts" not in inventory["subjects"] or not isinstance(inventory["subjects"]["hosts"], list):

-             raise ValueError(

-                 "inventory JSON does not contain the expected [subjects][hosts] list")

- 

-         for h in inventory["subjects"]["hosts"]:

-             self.hosts.append(h)

- 

-         if "_meta" not in inventory or not isinstance(inventory["_meta"], dict):

-             raise ValueError(

-                 "inventory JSON does not contain the expected [_meta] dictionary")

-         if "hostvars" not in inventory["_meta"] or not isinstance(inventory["_meta"]["hostvars"], dict):

-             raise ValueError(

-                 "inventory JSON does not contain the expected [_meta][hostvars] dict")

- 

-         for h in inventory["_meta"]["hostvars"]:

-             if not isinstance(inventory["_meta"]["hostvars"][h], dict):

-                 raise ValueError(

-                     "inventory JSON does not contain the expected [_meta][hostvars][{0}] dict".format(h))

- 

-             self.variables[h] = inventory["_meta"]["hostvars"][h]

- 

-         if "localhost" not in inventory or not isinstance(inventory["localhost"], dict):

-             raise ValueError(

-                 "inventory JSON does not contain the expected [localhost] dictionary")

-         if "hosts" not in inventory["localhost"] or not isinstance(inventory["localhost"]["hosts"], list):

-             raise ValueError(

-                 "inventory JSON does not contain the expected [localhost][hosts] list")

- 

-     def dumps(self):

-         data = {"subjects": {"hosts": self.hosts, "vars": {}}, "localhost": {

-             "hosts": self.hosts, "vars": {}}, "_meta": {"hostvars": self.variables}}

-         return json.dumps(data, indent=4, separators=(',', ': '))

- 

- 

- if __name__ == '__main__':

-     sys.exit(main(sys.argv))

file added
+121
@@ -0,0 +1,121 @@ 

+ #!/usr/bin/env python

+ 

+ import argparse

+ import os

+ import subprocess

+ import sys

+ import traceback

+ import yaml

+ 

+ 

+ SUCCESS = 0

+ FAILURE = 1

+ TEST_FAILURE = 2

+ 

+ 

+ class STI:

+ 

+     def __init__(self):

+         self.provision_directory = os.environ.get("TEST_PROVISION_DIRECTORY", "/usr/share/sti/provision")

+         self.test_filename = "tests.yml"

+         self.inventory_filename = "inventory.yaml"

+         self.debug = os.environ.get("TEST_DEBUG")

+ 

+     def __del__(self):

+         if self.debug:

+             self.print_debug_inv()

+         else:

+             self._run(["{0}/default_cleanup".format(self.provision_directory)])

+ 

+     def _run(self, cmd):

+         if self.debug:

+             print("INFO: Running '{0}'".format(" ".join(cmd)))

+         p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)

+         while p.poll() is None:

+             sys.stdout.write(p.stdout.readline())

+             sys.stdout.flush()

+ 

+         return p.returncode

+ 

+     def run_test(self, tag):

+         if not os.path.isfile(self.test_filename):

+             print("FAIL: {0} does not exist".format(self.test_filename))

+             return FAILURE

+ 

+         # Create all required inventories

+         args = ["{0}/provision-inventory".format(self.provision_directory)]

+         self._run(args)

+         if not os.path.isfile(self.inventory_filename):

+             print("FAIL: didn't create {0}".format(self.inventory_filename))

+             return FAILURE

+ 

+         args = ["ansible-playbook", "--inventory", "inventory.yaml", "--tag",

+                 tag, self.test_filename]

+         if self._run(args) != 0:

+             return TEST_FAILURE

+ 

+         return SUCCESS

+ 

+     def print_debug_inv(self):

+         """

+         Print information how to connect to test subjects and clean up environment

+         """

+         if not os.path.isfile(self.inventory_filename):

+             return

+         with open(self.inventory_filename) as ifile:

+             inventory = yaml.load(ifile)

+ 

+         try:

+             hosts = inventory["localhost"]["hosts"]

+         except Exception:

+             raise RuntimeError("{0} does not seem to be valid".format(self.inventory_filename))

+ 

+         print(80*"#")

+         for name in hosts:

+             host = hosts[name]

+             if "ansible_host" in host:

+                 port = host["ansible_port"]

+                 hostname = host["ansible_host"]

+                 user = host["ansible_user"]

+                 passwd = host["ansible_ssh_pass"]

+                 print("DIAGNOSE: ssh -p {0} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null "

+                       "{1}@{2} # password: {3}".format(port, user, hostname, passwd))

+ 

+             if "ansible_connection" in host and host["ansible_connection"] == "docker":

+                 print("DIAGNOSE: docker exec -it {0} /bin/bash".format(name))

+ 

+         print("DIAGNOSE: Don't forget to execute '{0}/default_cleanup' when finished".format(self.provision_directory))

+         print(80*"#")

+ 

+ 

+ def main():

+     parser = argparse.ArgumentParser()

+     parser.add_argument('--tag', '-t', choices=['atomic', 'classic', 'container'], required=True)

+     parser.add_argument('--test-subjects')

+     parser.add_argument('--test-artifacts', default=os.path.join(os.getcwd(), "artifacts"))

+     parser.add_argument('--debug', action='store_true')

+     args = parser.parse_args()

+ 

+     # Make sure we know where to create the artifacts

+     if not os.environ.get("TEST_ARTIFACTS"):

+         os.environ["TEST_ARTIFACTS"] = args.test_artifacts

+ 

+     if args.test_subjects and "TEST_SUBJECTS" not in os.environ:

+         os.environ["TEST_SUBJECTS"] = args.test_subjects

+ 

+     if args.debug and "TEST_DEBUG" not in os.environ:

+         os.environ["TEST_DEBUG"] = "1"

+ 

+     test = STI()

+     try:

+         ret_code = test.run_test(args.tag)

+     except Exception as e:

+         print("FAIL: {0}".format(e))

+         traceback.print_exc()

+         ret_code = FAILURE

+ 

+     return ret_code

+ 

+ 

+ if __name__ == '__main__':

+     sys.exit(main())

no initial comment

rebased onto f0077f19081ab1ce810f5635beffaa87bf48172a

5 years ago

rebased onto b8f083dab3bbe492caae9fb60a6403e82e42be6f

5 years ago

I have been able to create 1 single inventory file with 3 different hosts using:

TEST_SUBJECTS="../atomic.qcow2 local docker:docker.io/library/fedora:rawhide" and merge-standard-inventory script

localhost:
  hosts:
      ../atomic.qcow2-22719:
        ansible_host: 127.0.0.3
        ansible_port: '2222'
        ansible_python_interpreter: /usr/bin/python2
        ansible_ssh_common_args: -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no
        ansible_ssh_pass: foobar
        ansible_ssh_private_key_file: /tmp/inventory-cloudZ3WqeB/identity
        ansible_user: root
        host_directory: /tmp/inventory-cloudZ3WqeB
        host_pid: 22719
        image: ../atomic.qcow2
      551a34441393af4d321b059337556858543f04a8d6a9a3511a895071cc3f6366:
        ansible_connection: docker
        ansible_python_interpreter: /usr/bin/python3
      local:
        ansible_connection: local

rebased onto 3435640adb8ce334880a77dcba13ec0aa48c562b

5 years ago

1 new commit added

  • fixed subjects for create-local
5 years ago

rebased onto 73f92924874bf92db893b3704be9a6d20dd96139

5 years ago

Tested the command below with build https://copr.fedorainfracloud.org/coprs/g/osci/standard-test-roles/build/752735/

TEST_SUBJECTS=../atomic.qcow2 sti --tag atomic
TEST_SUBJECTS=docker:docker.io/library/fedora:rawhide sti --tag container
TEST_SUBJECTS="" sti --tag classic
TEST_SUBJECTS="local" sti --tag classic

sti --tag atomic --test-subjects ../atomic.qcow2
sti --tag container --test-subjects docker:docker.io/library/fedora:rawhide
sti --tag classic --test-subjects local
sti --tag classic

TEST_ARTIFACTS=<path>, TEST_DEBUG=1 and sti with "--debug" options are also supported.

rebased onto f3d37a32c038976f73702ede3c92e1022609f03f

5 years ago

rebased onto 4a153f07a65aa004d6ef32f16b9e7fe488c951d0

5 years ago

Exit codes:
0 - Pass
1 - Provision of test subject failed
2 - execution of test playbook failed.

rebased onto fe1bbd47d8ef5c2fdb1d8b7730cf617dd1c0ed53

5 years ago

Added support to local inventory file when starting container. The local inventory will probably be replaced in the future, but at least we don't break the current tests that use it.

rebased onto 7e35c10

5 years ago