#289 Reformat standard-inventory-qcow2
Merged 5 years ago by astepano. Opened 5 years ago by astepano.

file modified
+135 -170
@@ -1,39 +1,30 @@ 

  #!/usr/bin/python3

  

- import argparse

- import atexit

- import errno

- import json

+ # SPDX Licence identifier MIT

+ # Copyright (c) 2017-2018 Red Hat Inc.

+ # Authors: Merlin Mathesius <merlinm@redhat.com>

+ #          Andrei Stepanov <astepano@redhat.com>

+ #          Bruno Goncalves <bgoncalv@redhat.com>

+ 

  import os

  import fmf

- import shutil

+ import sys

+ import json

+ import yaml

+ import time

+ import errno

  import shlex

  import signal

- import logging

- import random

  import socket

- import subprocess

- import sys

+ import atexit

+ import shutil

+ import random

+ import logging

+ import argparse

  import tempfile

- import time

- import distutils.util

  import functools

- 

- import yaml

- 

- 

- def print_bad_inventory(exctype, value, tb):

-     """Print empty inventory on any uncatched exception. This will prevent

-     running playbook on localhost.

-     """

-     hosts = ["fake_hostname_failed_inventory"]

-     bad_inv = {"localhost": {"hosts": hosts, "vars": {}},

-                "subjects": {"hosts": hosts, "vars": {}},

-                "_meta": {"hostvars": {}}}

-     sys.stdout.write(json.dumps(bad_inv, indent=4, separators=(',', ': ')))

- 

- 

- sys.excepthook = print_bad_inventory

+ import subprocess

+ import distutils.util

  

  

  IDENTITY = """
@@ -65,19 +56,15 @@ 

  Df9PUWMYZ3HRwwdsYovSOkT53fG6guy+vElUEDkrpZYczROZ6GUcx70=

  -----END RSA PRIVATE KEY-----

  """

- 

- 

  AUTH_KEY = ("AAAAB3NzaC1yc2EAAAADAQABAAABAQDUOtNJdBEXyKxBB898rdT54ULjMGuO6v4jLX"

              "mRsdRhR5Id/lKNc9hsdioPWUePgYlqML2iSV72vKQoVhkyYkpcsjr3zvBny9+5xej3"

              "+TBLoEMAm2hmllKPmxYJDU8jQJ7wJuRrOVOnk0iSNF+FcY/yaQ0owSF02Nphx47j2K"

              "Wc0IjGGlt4fl0fmHJuZBA2afN/4IYIIsEWZziDewVtaEjWV3InMRLllfdqGMllhFR+"

              "ed2hQz9PN2QcapmEvUR4UCy/mJXrke5htyFyHi8ECfyMMyYeHwbWLFQIve4CWix9qt"

              "ksvKjcetnxT+WWrutdr3c9cfIj/c0v/Zg/c4zETxtp")

- 

  DEF_USER = "root"

  DEF_PASSWD = "foobar"

  DEF_HOST = "127.0.0.3"

- 

  USER_DATA = """#cloud-config

  users:

    - default
@@ -90,51 +77,64 @@ 

      {0}:{1}

    expire: False

  """.format(DEF_USER, DEF_PASSWD, AUTH_KEY)

- 

  EMPTY_INVENTORY = {}

  LOG_FILE = "default_provisioners.log"

  

  

- def get_artifact_path(path=""):

-     """Return path to an artifact file in artifacts directory. If path == ""

-     than return path artifacts dir.  Create artifacts dir if necessary.

+ class AdditionalDrives(object):

+     """Prepare additional drives options for qemu.  Based on FMF config creates

+     temporary sparse files and returns corresponding qemu command options.

+     cleanup() will be called eventually to close the files.

      """

-     artifacts = os.environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))

-     try:

-         os.makedirs(artifacts)

-     except OSError as exc:

-         if exc.errno != errno.EEXIST or not os.path.isdir(artifacts):

-             raise

-     return os.path.join(artifacts, path)

+ 

+     _tempfiles = list()

+ 

+     @classmethod

+     def generate(cls):

+         """Generate sparse files and return drive qemu options

+         Returns

+         -------

+         list of str

+                 qemu -drive options

+         """

+         drives = fmf_get(['qemu', 'drive'], list())

+         result = []

+         for drive in drives:

+             # create temporary sparse file

+             size = int(drive.get('size', 2 * 1024 ** 3))  # default size: 2G

+             path = drive.get('path', None)

+             path = str(path) if path is not None else None

+             drive_file = tempfile.NamedTemporaryFile(dir=path)

+             drive_file.truncate(size)

+             cls._tempfiles.append({'file': drive_file, 'path': path})

+             logger.info("Created temporary sparse file '%s'." % drive_file.name)

+             # translate data into qemu command options

+             result += ["-drive", "file=%s,media=disk,if=virtio" % drive_file.name]

+         atexit.register(cls.cleanup)

+         return result

+ 

+     @classmethod

+     def cleanup(cls):

+         """Close all temporary files created by this class

+         """

+         for tempfile in cls._tempfiles:

+             fullname = os.path.join(tempfile['path'], tempfile['file'].name)

+             logger.info("Closing and removing temporary sparse file '%s'" % fullname)

+             if os.path.isfile(fullname):

+                 tempfile['file'].close()

  

  

- logger = logging.getLogger(__name__)

- logger.setLevel(logging.DEBUG)

- # stderr output

- conhandler = logging.StreamHandler()

- # Print to strerr by default messages with level >= warning, can be changed

- # with setting TEST_DEBUG=1.

- try:

-     diagnose = distutils.util.strtobool(os.getenv("TEST_DEBUG", "0"))

- except ValueError:

-     diagnose = 0

- conhandler.setLevel(logging.WARNING)

- if diagnose:

-     # Collect all messages with any log level to stderr.

-     conhandler.setLevel(logging.NOTSET)

- # Log format for stderr.

- log_format = "[%(levelname)-5.5s] {}: %(message)s".format(os.path.basename(__file__))

- formatter = logging.Formatter(log_format)

- conhandler.setFormatter(formatter)

- logger.addHandler(conhandler)

- 

- # Temporary fix for issue #233.  Strings in Python3 are all unicode.  To get a

- # unicode string in Python2 necessary to use `unicode()`.  fmf.Tree() expects a

- # unicode string even in Python2 module.

- try:

-     UNICODE_EXISTS = bool(type(unicode))

- except NameError:

-     unicode = str

+ def print_bad_inventory():

+     """Print bad inventory on any uncatched exception. This will prevent

+     running playbook on localhost.

+     """

+     fake_host = "fake_host"

+     fake_hostname = "standard-inventory-qcow2_failed_check_logs"

+     hosts = [fake_host]

+     bad_inv = {"localhost": {"hosts": hosts, "vars": {}},

+                "subjects": {"hosts": hosts, "vars": {}},

+                "_meta": {"hostvars": {fake_host: {"ansible_host": fake_hostname}}}}

+     sys.stdout.write(json.dumps(bad_inv, indent=4, separators=(',', ': ')))

  

  

  def which(executable, default=None):
@@ -146,36 +146,17 @@ 

      return default

  

  

- def main(argv):

-     parser = argparse.ArgumentParser(description="Inventory for a QCow2 test image")

-     parser.add_argument("--list", action="store_true", help="Verbose output")

-     parser.add_argument('--host', help="Get host variables")

-     parser.add_argument("subjects", nargs="*", default=shlex.split(os.environ.get("TEST_SUBJECTS", "")))

-     opts = parser.parse_args()

-     # Send logs to common logfile for all default provisioners.

-     log_file = get_artifact_path(LOG_FILE)

-     fhandler = logging.FileHandler(log_file)

-     # Collect all messages with any log level to log file.

-     fhandler.setLevel(logging.NOTSET)

-     log_format = ("%(asctime)s [{}/%(threadName)-12.12s] [%(levelname)-5.5s]:"

-                   "%(message)s").format(os.path.basename(__file__))

-     logFormatter = logging.Formatter(log_format)

-     fhandler.setFormatter(logFormatter)

-     logger.addHandler(fhandler)

-     logger.info("Start provisioner.")

-     ansibles = ['ansible', 'ansible-3', None]

-     global ansible_bin

-     ansible_bin = functools.reduce(which, ansibles)

-     if not ansible_bin:

-         logger.error("Fail to find ansible.")

-         sys.exit(1)

-     logger.info("Path to ansible: %s", ansible_bin)

-     if opts.host:

-         data = inv_host(opts.host)

-     else:

-         data = inv_list(opts.subjects)

-     # Dump Ansible inventory.

-     sys.stdout.write(json.dumps(data, indent=4, separators=(',', ': ')))

+ def get_artifact_path(path=""):

+     """Return path to an artifact file in artifacts directory. If path == ""

+     than return path artifacts dir.  Create artifacts dir if necessary.

+     """

+     artifacts = os.environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))

+     try:

+         os.makedirs(artifacts)

+     except OSError as exc:

+         if exc.errno != errno.EEXIST or not os.path.isdir(artifacts):

+             raise

+     return os.path.join(artifacts, path)

  

  

  def inv_list(subjects):
@@ -240,7 +221,7 @@ 

          if self.path != path or self.tree is None:

              FmfMetadataTree.path = path

              try:

-                 FmfMetadataTree.tree = fmf.Tree(unicode(path))

+                 FmfMetadataTree.tree = fmf.Tree(path)

              except Exception:

                  """Fmf initialization failed. Do not try initialize further for this path.

                  """
@@ -280,52 +261,6 @@ 

      return value

  

  

- class AdditionalDrives(object):

-     """Prepare additional drives options for qemu

- 

-     Based on FMF config creates temporary sparse files and returns

-     corresponding qemu command options.

-     cleanup() will be called eventually to close the files.

-     """

- 

-     _tempfiles = list()

- 

-     @classmethod

-     def generate(cls):

-         """Generate sparse files and return drive qemu options

-         Returns

-         -------

-         list of str

-                 qemu -drive options

-         """

-         drives = fmf_get(['qemu', 'drive'], list())

-         result = []

-         for drive in drives:

-             # create temporary sparse file

-             size = int(drive.get('size', 2 * 1024 ** 3))  # default size: 2G

-             path = drive.get('path', None)

-             path = str(path) if path is not None else None

-             drive_file = tempfile.NamedTemporaryFile(dir=path)

-             drive_file.truncate(size)

-             cls._tempfiles.append({'file': drive_file, 'path': path})

-             logger.info("Created temporary sparse file '%s'." % drive_file.name)

- 

-             # translate data into qemu command options

-             result += ["-drive", "file=%s,media=disk,if=virtio" % drive_file.name]

-         atexit.register(cls.cleanup)

-         return result

- 

-     @classmethod

-     def cleanup(cls):

-         """Close all temporary files created by this class

-         """

-         for tempfile in cls._tempfiles:

-             fullname = os.path.join(tempfile['path'], tempfile['file'].name)

-             logger.info("Closing and removing temporary sparse file '%s'" % fullname)

-             if os.path.isfile(fullname):

-                 tempfile['file'].close()

- 

- 

  def start_qemu(image, cloudinit, portrange=(2222, 5555)):

      for _ in range(10):

          port = random.randint(*portrange)
@@ -341,7 +276,6 @@ 

              sock.close()

      else:

          raise RuntimeError("unable to find free local port to map SSH to")

- 

      # Log all traffic received from the guest to a file.

      log_file = "{0}.guest.log".format(os.path.basename(image))

      log_guest = get_artifact_path(log_file)
@@ -352,7 +286,6 @@ 

      param_net_nic_model = str(fmf_get(['qemu', 'net_nic', 'model'], 'virtio'))

      # Use -cpu host and -smp by default.

      # virtio-rng-pci: https://wiki.qemu.org/Features/VirtIORNG

- 

      qemu_cmd = ["/usr/bin/qemu-system-x86_64",

                  "-cpu", "host", "-smp", get_qemu_smp_arg(),

                  "-m", param_m, image, "-enable-kvm", "-snapshot", "-cdrom", cloudinit,
@@ -360,19 +293,14 @@ 

                  "-device", "virtio-rng-pci", "-rtc", "base=utc",

                  "-device", "isa-serial,chardev=pts2", "-chardev", "file,id=pts2,path=" + log_guest,

                  "-display", "none"]

- 

      qemu_cmd += AdditionalDrives.generate()

- 

      if diagnose:

          qemu_cmd += ["-vnc", DEF_HOST + ":1,to=4095"]

- 

      qemu_proc = subprocess.Popen(qemu_cmd, stdout=open(log_qemu, 'a'), stderr=subprocess.STDOUT)

      time.sleep(5)

- 

      if qemu_proc and diagnose:

          logger.info("qemu-kvm is running with VNC server. PID: {}".format(qemu_proc.pid))

          logger.info("netstat -ltpn4 | grep {0} # to find VNC server port".format(qemu_proc.pid))

- 

      return qemu_proc, port, log_guest

  

  
@@ -380,15 +308,12 @@ 

      if not image.endswith((".qcow2", ".qcow2c")):

          logger.info("Return empty inventory for image: %s.", image)

          return EMPTY_INVENTORY

- 

      null = open(os.devnull, 'w')

- 

      try:

          tty = os.open("/dev/tty", os.O_WRONLY)

          os.dup2(tty, 2)

      except OSError:

          tty = None

- 

      # A directory for temporary stuff

      directory = tempfile.mkdtemp(prefix="inventory-cloud")

      identity = os.path.join(directory, "identity")
@@ -401,17 +326,13 @@ 

      userdata = os.path.join(directory, "user-data")

      with open(userdata, 'w') as f:

          f.write(USER_DATA)

- 

      # Create our cloud init so we can log in

      cloudinit = os.path.join(directory, "cloud-init.iso")

      subprocess.check_call(["/usr/bin/genisoimage", "-input-charset", "utf-8",

                             "-volid", "cidata", "-joliet", "-rock", "-quiet",

                             "-output", cloudinit, userdata, metadata], stdout=null)

- 

      logger.info("Launching virtual machine for {0}".format(image))

- 

      # And launch the actual VM

- 

      proc = None  # for failure detection

      cpe = None  # for exception scoping

      log = None
@@ -425,7 +346,6 @@ 

      if proc is None:

          raise RuntimeError("Could not launch VM for qcow2 image"

                             " '{0}':{1}".format(image, cpe.output))

- 

      for _ in range(0, 30):

          try:

              # The variables
@@ -437,11 +357,9 @@ 

                  "ansible_ssh_private_key_file": identity,

                  "ansible_ssh_common_args": "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"

              }

- 

              # Write out a handy inventory file, for our use and for debugging

              inventory = os.path.join(directory, "inventory")

              write_debug_inventory(inventory, {image: variables})

- 

              # Wait for ssh to come up

              ping = [

                  ansible_bin,
@@ -475,10 +393,8 @@ 

          except OSError:

              output = ""

          raise RuntimeError("Could not access VM launched from qcow2 image: {0}{1}".format(image,  output))

- 

      # Process of our parent

      ppid = os.getppid()

- 

      child = os.fork()

      if child:

          # Need to figure out what python interpreter to use
@@ -507,20 +423,16 @@ 

          # Update inventory file

          write_debug_inventory(inventory, {image: variables})

          return variables

- 

      # Daemonize and watch the processes

      os.chdir("/")

      os.setsid()

      os.umask(0)

- 

      if tty is None:

          tty = null.fileno()

- 

      # Duplicate standard input to standard output and standard error.

      os.dup2(null.fileno(), 0)

      os.dup2(tty, 1)

      os.dup2(tty, 2)

- 

      # alternatively, lock on a file

      lock_file = os.environ.get("LOCK_ON_FILE", None)

      ssh_cmd = ("ssh -p {port} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i {identity} {user}@{host}"
@@ -532,7 +444,6 @@ 

      logger.info("Wait until parent for provision-script (ansible-playbook) dies or qemu.")

      while True:

          time.sleep(3)

- 

          if lock_file:

              if not os.path.exists(lock_file):

                  logger.error("Lock file is gone.")
@@ -560,14 +471,68 @@ 

      sys.exit(0)

  

  

+ def main(argv):

+     global logger

+     global diagnose

+     logger = logging.getLogger(__name__)

+     logger.setLevel(logging.DEBUG)

+     # stderr output

+     conhandler = logging.StreamHandler()

+     # Print to strerr by default messages with level >= warning, can be changed

+     # with setting TEST_DEBUG=1.

+     try:

+         diagnose = distutils.util.strtobool(os.getenv("TEST_DEBUG", "0"))

+     except ValueError:

+         diagnose = 0

+     conhandler.setLevel(logging.WARNING)

+     if diagnose:

+         # Collect all messages with any log level to stderr.

+         conhandler.setLevel(logging.NOTSET)

+     # Log format for stderr.

+     log_format = "[%(levelname)-5.5s] {}: %(message)s".format(os.path.basename(__file__))

+     formatter = logging.Formatter(log_format)

+     conhandler.setFormatter(formatter)

+     logger.addHandler(conhandler)

+     parser = argparse.ArgumentParser(description="Inventory for a QCow2 test image")

+     parser.add_argument("--list", action="store_true", help="Verbose output")

+     parser.add_argument('--host', help="Get host variables")

+     parser.add_argument("subjects", nargs="*", default=shlex.split(os.environ.get("TEST_SUBJECTS", "")))

+     opts = parser.parse_args()

+     # Send logs to common logfile for all default provisioners.

+     log_file = get_artifact_path(LOG_FILE)

+     fhandler = logging.FileHandler(log_file)

+     # Collect all messages with any log level to log file.

+     fhandler.setLevel(logging.NOTSET)

+     log_format = ("%(asctime)s [{}/%(threadName)-12.12s] [%(levelname)-5.5s]:"

+                   "%(message)s").format(os.path.basename(__file__))

+     logFormatter = logging.Formatter(log_format)

+     fhandler.setFormatter(logFormatter)

+     logger.addHandler(fhandler)

+     logger.info("Start provisioner.")

+     ansibles = ['ansible', 'ansible-3', None]

+     global ansible_bin

+     ansible_bin = functools.reduce(which, ansibles)

+     if not ansible_bin:

+         raise Exception("Fail to find ansible.")

+     logger.info("Path to ansible: %s", ansible_bin)

+     if opts.host:

+         data = inv_host(opts.host)

+     else:

+         data = inv_list(opts.subjects)

+     # Dump Ansible inventory.

+     sys.stdout.write(json.dumps(data, indent=4, separators=(',', ': ')))

+ 

+ 

  if __name__ == '__main__':

      ret = -1

      try:

          main(sys.argv)

          ret = 0

      except RuntimeError as ex:

+         print_bad_inventory()

          logger.error("{0}".format(ex))

      except Exception:

+         print_bad_inventory()

          # Backtrace stack goes to log file. If TEST_DEBUG == 1, it goes to stderr too.

          logger.error("Fatal error in provision script.", exc_info=True)

      sys.exit(ret)

Commit 22f7af3 fixes this pull-request

Pull-Request has been merged by astepano

5 years ago

Pull-Request has been merged by astepano

5 years ago