| |
@@ -0,0 +1,231 @@
|
| |
+ #!/usr/bin/python3
|
| |
+ # SPDX Licence identifier MIT
|
| |
+ # Copyright (c) 2019 Red Hat Inc.
|
| |
+
|
| |
+ import argparse
|
| |
+ import distutils.util
|
| |
+ import errno
|
| |
+ import json
|
| |
+ import logging
|
| |
+ import os
|
| |
+ import sys
|
| |
+ import shlex
|
| |
+ import signal
|
| |
+ import subprocess
|
| |
+ import time
|
| |
+ from distutils import dir_util
|
| |
+
|
| |
+ EMPTY_INVENTORY = {}
|
| |
+ LOG_FILE = "default_provisioners.log"
|
| |
+ CONTAINER_TEMP_ARTIFACTS = "/tmp/artifacts"
|
| |
+ PODMAN_BIN = "/usr/bin/podman"
|
| |
+
|
| |
+
|
| |
+ def print_bad_inventory():
|
| |
+ """Print bad inventory on any uncatched exception. This will prevent
|
| |
+ running playbook on localhost.
|
| |
+ """
|
| |
+ fake_host = "fake_host"
|
| |
+ fake_hostname = "standard-inventory-qcow2_failed_check_logs"
|
| |
+ hosts = [fake_host]
|
| |
+ bad_inv = {"localhost": {"hosts": hosts, "vars": {}},
|
| |
+ "subjects": {"hosts": hosts, "vars": {}},
|
| |
+ "_meta": {"hostvars": {fake_host: {"ansible_host": fake_hostname}}}}
|
| |
+
|
| |
+ sys.stdout.write(json.dumps(bad_inv, indent=4, separators=(',', ': ')))
|
| |
+
|
| |
+
|
| |
+ def get_artifact_path(path=""):
|
| |
+ """Return path to an artifact file in artifacts directory. If path == ""
|
| |
+ than return path artifacts dir. Create artifacts dir if necessary.
|
| |
+ """
|
| |
+ artifacts = os.environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))
|
| |
+ try:
|
| |
+ os.makedirs(artifacts)
|
| |
+ except OSError as exc:
|
| |
+ if exc.errno != errno.EEXIST or not os.path.isdir(artifacts):
|
| |
+ raise
|
| |
+ return os.path.join(artifacts, path)
|
| |
+
|
| |
+
|
| |
+ def copy_artifacts_from_container(name):
|
| |
+ container_path = subprocess.check_output([PODMAN_BIN, "mount", name]).rstrip().decode('utf-8')
|
| |
+ container_artifacts_path = container_path + CONTAINER_TEMP_ARTIFACTS
|
| |
+
|
| |
+ dir_util.copy_tree(container_artifacts_path, CONTAINER_TEMP_ARTIFACTS)
|
| |
+
|
| |
+ subprocess.call([PODMAN_BIN, "umount", name])
|
| |
+
|
| |
+
|
| |
+ def inv_list(subjects):
|
| |
+ hosts = []
|
| |
+ variables = {}
|
| |
+
|
| |
+ for subject in subjects:
|
| |
+ name, host_vars = inv_host(subject)
|
| |
+ if host_vars:
|
| |
+ hosts.append(name)
|
| |
+ variables[name] = host_vars
|
| |
+
|
| |
+ if not hosts:
|
| |
+ return EMPTY_INVENTORY
|
| |
+
|
| |
+ return {"localhost": {"hosts": hosts, "vars": {}},
|
| |
+ "subjects": {"hosts": hosts, "vars": {}},
|
| |
+ "_meta": {"hostvars": variables}}
|
| |
+
|
| |
+
|
| |
+ def inv_host(subject):
|
| |
+ words = subject.split(":")
|
| |
+ image = words[0]
|
| |
+ name = words[1]
|
| |
+
|
| |
+ null = open(os.devnull, 'w')
|
| |
+ try:
|
| |
+ tty = os.open("/dev/tty", os.O_WRONLY)
|
| |
+ os.dup2(tty, 2)
|
| |
+ except OSError:
|
| |
+ tty = None
|
| |
+ pass
|
| |
+
|
| |
+ logger.info("Launching Podman container for {0}".format(image))
|
| |
+
|
| |
+ # Make sure the podman service is running
|
| |
+ try:
|
| |
+ subprocess.check_call(["/usr/bin/systemctl", "start", "io.podman.service"])
|
| |
+ except subprocess.CalledProcessError:
|
| |
+ raise RuntimeError("Could not start podman service")
|
| |
+
|
| |
+ try:
|
| |
+ subprocess.check_call([PODMAN_BIN, "start", name], stdout=sys.stderr.fileno())
|
| |
+ except subprocess.CalledProcessError:
|
| |
+ cmd = [PODMAN_BIN, "run", "-d", "--name", format(name), "--entrypoint=/bin/sh", image, "-c", "sleep infinity"]
|
| |
+ try:
|
| |
+ subprocess.check_call(cmd, stdout=sys.stderr.fileno())
|
| |
+ except subprocess.CalledProcessError:
|
| |
+ raise RuntimeError("Could not start container image: {0}".format(image))
|
| |
+
|
| |
+ # Directory to place artifacts
|
| |
+ artifacts = os.environ.get("TEST_ARTIFACTS", os.path.join(os.getcwd(), "artifacts"))
|
| |
+
|
| |
+ # The variables
|
| |
+ variables = {
|
| |
+ "ansible_connection": "podman",
|
| |
+ "ansible_python_interpreter": "/usr/bin/python3"
|
| |
+ }
|
| |
+
|
| |
+ ppid = os.getppid()
|
| |
+ pid = os.fork()
|
| |
+ if pid > 0:
|
| |
+ return name, variables
|
| |
+
|
| |
+ # Daemonize and watch the processes
|
| |
+ os.chdir("/")
|
| |
+ os.setsid()
|
| |
+ os.umask(0)
|
| |
+ if tty is None:
|
| |
+ tty = null.fileno()
|
| |
+
|
| |
+ # Duplicate standard input to standard output and standard error.
|
| |
+ os.dup2(null.fileno(), 0)
|
| |
+ os.dup2(tty, 1)
|
| |
+ os.dup2(tty, 2)
|
| |
+
|
| |
+ # Now wait for the parent process to go away, then kill the VM
|
| |
+ while True:
|
| |
+ time.sleep(3)
|
| |
+ try:
|
| |
+ os.kill(ppid, 0)
|
| |
+ except OSError:
|
| |
+ break # Either of the processes no longer exist
|
| |
+
|
| |
+ if diagnose:
|
| |
+ def _signal_handler(*args):
|
| |
+ logger.info("Diagnose ending.")
|
| |
+
|
| |
+ logger.info("kill {0} # when finished".format(os.getpid()))
|
| |
+ signal.signal(signal.SIGTERM, _signal_handler)
|
| |
+ signal.pause()
|
| |
+
|
| |
+ # Dump the container logs
|
| |
+ os.makedirs(artifacts, exist_ok=True)
|
| |
+ log = os.path.join(artifacts, "{0}.log".format(os.path.basename(image)))
|
| |
+
|
| |
+ with open(log, "w") as f:
|
| |
+ subprocess.call([PODMAN_BIN, "logs", name], stdout=f.fileno())
|
| |
+
|
| |
+ # Fetch logs from container
|
| |
+ copy_artifacts_from_container(name)
|
| |
+ dir_util.copy_tree(CONTAINER_TEMP_ARTIFACTS, artifacts)
|
| |
+
|
| |
+ # Stop the container
|
| |
+ subprocess.check_call([PODMAN_BIN, "stop", name])
|
| |
+
|
| |
+ sys.exit(0)
|
| |
+
|
| |
+
|
| |
+ def main(argv):
|
| |
+ global logger
|
| |
+ global diagnose
|
| |
+ logger = logging.getLogger(__name__)
|
| |
+ logger.setLevel(logging.DEBUG)
|
| |
+
|
| |
+ # stderr output
|
| |
+ conhandler = logging.StreamHandler()
|
| |
+
|
| |
+ # Print to strerr by default messages with level >= warning, can be changed
|
| |
+ # with setting TEST_DEBUG=1.
|
| |
+ try:
|
| |
+ diagnose = distutils.util.strtobool(os.getenv("TEST_DEBUG", "0"))
|
| |
+ except ValueError:
|
| |
+ diagnose = 0
|
| |
+ conhandler.setLevel(logging.WARNING)
|
| |
+
|
| |
+ if diagnose:
|
| |
+ # Collect all messages with any log level to stderr.
|
| |
+ conhandler.setLevel(logging.NOTSET)
|
| |
+
|
| |
+ # Log format for stderr.
|
| |
+ log_format = "[%(levelname)-5.5s] {}: %(message)s".format(os.path.basename(__file__))
|
| |
+ formatter = logging.Formatter(log_format)
|
| |
+ conhandler.setFormatter(formatter)
|
| |
+ logger.addHandler(conhandler)
|
| |
+
|
| |
+ parser = argparse.ArgumentParser(description="Inventory for a container image in a registry")
|
| |
+ parser.add_argument("--list", action="store_true", help="Verbose output")
|
| |
+ parser.add_argument('--host', help="Get host variables")
|
| |
+ parser.add_argument("subjects", nargs="*", default=shlex.split(os.environ.get("TEST_SUBJECTS", "")))
|
| |
+ opts = parser.parse_args()
|
| |
+
|
| |
+ # Send logs to common logfile for all default provisioners.
|
| |
+ log_file = get_artifact_path(LOG_FILE)
|
| |
+ fhandler = logging.FileHandler(log_file)
|
| |
+
|
| |
+ # Collect all messages with any log level to log file.
|
| |
+ fhandler.setLevel(logging.NOTSET)
|
| |
+ log_format = ("%(asctime)s [{}/%(threadName)-12.12s] [%(levelname)-5.5s]:"
|
| |
+ "%(message)s").format(os.path.basename(__file__))
|
| |
+ logFormatter = logging.Formatter(log_format)
|
| |
+ fhandler.setFormatter(logFormatter)
|
| |
+ logger.addHandler(fhandler)
|
| |
+
|
| |
+ logger.info("Start provisioner.")
|
| |
+ if opts.host:
|
| |
+ _, data = inv_host(opts.host)
|
| |
+ else:
|
| |
+ data = inv_list(opts.subjects)
|
| |
+
|
| |
+ sys.stdout.write(json.dumps(data, indent=4, separators=(',', ': ')))
|
| |
+
|
| |
+
|
| |
+ if __name__ == '__main__':
|
| |
+ ret = -1
|
| |
+ try:
|
| |
+ main(sys.argv)
|
| |
+ ret = 0
|
| |
+ except Exception:
|
| |
+ print_bad_inventory()
|
| |
+ # Backtrace stack goes to log file. If TEST_DEBUG == 1, it goes to stderr too.
|
| |
+ logger.info("Fatal error in provision script.", exc_info=True)
|
| |
+
|
| |
+ sys.exit(ret)
|
| |
This works allows to run tests in podman container.
How to test?