#2 Add a runner script to make it easier to deploy in openshift
Merged 4 years ago by asaleh. Opened 4 years ago by pingou.

file modified
+42 -59
@@ -25,6 +25,7 @@ 

  import datetime

  import logging

  import os

+ import sys

  import tempfile

  import time

  
@@ -33,27 +34,8 @@ 

  

  from utils import(

      MonitoringException,

-     print_user,

+     MonitoringUtils,

      run_command,

-     clone_repo,

-     add_remote,

-     switch_branch,

-     bump_release,

-     commit_changes,

-     push_changes,

-     pull_changes,

-     open_pullrequest,

-     get_nevr,

-     build_package,

-     get_build_tags,

-     get_update_id,

-     lookup_results_datagrepper,

-     lookup_ci_resultsdb,

-     waive_update,

-     get_pr_flag,

-     merge_pr,

-     finalize,

-     create_update,

  )

  

  _log = logging.getLogger(__name__)
@@ -63,7 +45,7 @@ 

      """The base class for all exceptions raised by this script."""

  

  

- def get_arguments():

+ def get_arguments(args):

      """ Parse and return the CLI arguments.

      """

      parser = argparse.ArgumentParser(
@@ -108,16 +90,17 @@ 

          help="Configuration file to use, specifying the URLs and all",

      )

  

-     return parser.parse_args()

+     return parser.parse_args(args)

  

  

- def main():

+ def main(args):

      """ Main method used by this script. """

      start = datetime.datetime.utcnow()

  

-     args = get_arguments()

+     args = get_arguments(args)

  

      conf = toml.load(args.conf)

+     utils = MonitoringUtils()

  

      name = conf["name_single"]

      namespace = conf["namespace"]
@@ -127,28 +110,28 @@ 

      with tempfile.TemporaryDirectory(prefix="ci-test-") as folder:

          print(f"Working in {folder}\n")

          if not args.nevr:

-             clone_repo(conf["fedpkg"], namespace, name, folder=folder)

+             utils.clone_repo(conf["fedpkg"], namespace, name, folder=folder)

              gitfolder = os.path.join(folder, name)

-             switch_branch(conf["fedpkg"], branch, folder=gitfolder)

-             bump_release(name, folder=gitfolder)

-             commit_changes("Bump release", folder=gitfolder)

-             nevr = get_nevr(conf["fedpkg"], folder=gitfolder)

+             utils.switch_branch(conf["fedpkg"], branch, folder=gitfolder)

+             utils.bump_release(name, folder=gitfolder)

+             utils.commit_changes("Bump release", folder=gitfolder)

+             nevr = utils.get_nevr(conf["fedpkg"], folder=gitfolder)

              print(f"   Upcoming build : {nevr}")

  

              if args.no_pr:

                  # Push to the main repo

-                 push_changes(gitfolder, "origin", branch)

+                 utils.push_changes(gitfolder, "origin", branch)

              else:

                  # Add the fork as remote, push to the it, open the PR,

                  # wait for CI to flag the PR, twice, merge the PR

-                 add_remote(

+                 utils.add_remote(

                      f"{fas_username}",

                      f"ssh://{fas_username}@{conf['distgit_host']}/"

                      f"forks/{fas_username}/{namespace}/{name}.git",

                      folder=gitfolder,

                  )

-                 push_changes(gitfolder, fas_username, branch, force=True)

-                 pr_created, pr_id, pr_uid = open_pullrequest(

+                 utils.push_changes(gitfolder, fas_username, branch, force=True)

+                 pr_created, pr_id, pr_uid = utils.open_pullrequest(

                      base_url=conf["pagure_dist_git"],

                      username=fas_username,

                      namespace=namespace,
@@ -158,14 +141,14 @@ 

                  )

                  if pr_created:

                      # Check that pr pipeline is running

-                     lookup_results_datagrepper(

+                     utils.lookup_results_datagrepper(

                          base_url=conf["datagrepper"],

                          name="CI (running)",

                          topic=f"org.centos.{conf['_ci_env']}.ci.dist-git-pr.test.running",

                          rev=pr_uid,

                      )

                      # Check that CI flag pending was set

-                     get_pr_flag(

+                     utils.get_pr_flag(

                          base_url=conf["pagure_dist_git"],

                          username=fas_username,

                          namespace=namespace,
@@ -175,14 +158,14 @@ 

                          flag_status="pending",

                      )

                      # Check that pr pipeline has finished

-                     lookup_results_datagrepper(

+                     utils.lookup_results_datagrepper(

                          base_url=conf["datagrepper"],

                          name="CI (complete)",

                          topic=f"org.centos.{conf['_ci_env']}.ci.dist-git-pr.test.error",

                          rev=pr_uid,

                      )

                      # Check that CI flag failure was set

-                     get_pr_flag(

+                     utils.get_pr_flag(

                          base_url=conf["pagure_dist_git"],

                          username=fas_username,

                          namespace=namespace,
@@ -193,7 +176,7 @@ 

                          duration=25,

                      )

                      # Merge the PR: TODO

-                     merge_pr(

+                     utils.merge_pr(

                          base_url=conf["pagure_dist_git"],

                          username=fas_username,

                          namespace=namespace,
@@ -201,15 +184,15 @@ 

                          pr_id=pr_id,

                          token=conf["pagure_token"],

                      )

-                     pull_changes(gitfolder, "origin", branch)

+                     utils.pull_changes(gitfolder, "origin", branch)

                  else:

                      return

  

              # Build the package

-             build_package(conf["fedpkg"], folder=gitfolder)

+             utils.build_package(conf["fedpkg"], folder=gitfolder)

  

              # Check the tag of the build

-             get_build_tags(

+             utils.get_build_tags(

                  conf.get("koji_hub"),

                  nevr,

                  expected_ends=["updates-candidate", "signing-pending"],
@@ -218,9 +201,9 @@ 

              nevr = args.nevr

  

          # Retrieve or create the update

-         updateid = get_update_id(nevr, conf["bodhi"])

+         updateid = utils.get_update_id(nevr, conf["bodhi"])

          if not args.update and not args.auto_update:

-             create_update(conf["bodhi-cli"],

+             utils.create_update(conf["bodhi-cli"],

                  nevr,

                  prod=conf["_env"] == "prod",

                  username=conf.get("bodhi-user"),
@@ -232,18 +215,18 @@ 

              updateid = args.update

  

          # Check the tag of the build

-         get_build_tags(

+         utils.get_build_tags(

              conf.get("koji_hub"),

              nevr,

              expected_ends=["signing-pending", "testing-pending"],

          )

  

          if not updateid:

-             finalize(start)

+             utils.finalize(start)

              return

  

          # Check that bodhi notified the pipeline it can run

-         lookup_results_datagrepper(

+         utils.lookup_results_datagrepper(

              base_url=conf["datagrepper"],

              name="bodhi to CI",

              topic=f"org.fedoraproject.{conf['_env']}.bodhi.update.status."
@@ -252,14 +235,14 @@ 

          )

  

          # Check that the CI pipeline is running

-         lookup_results_datagrepper(

+         utils.lookup_results_datagrepper(

              base_url=conf["datagrepper"],

              name="CI (running)",

              topic=f"org.centos.{conf['_ci_env']}.ci.koji-build.test.running",

              nevr=nevr,

          )

          # Check at the CI pipeline has completed

-         lookup_results_datagrepper(

+         utils.lookup_results_datagrepper(

              base_url=conf["datagrepper"],

              name="CI (complete)",

              topic=f"org.centos.{conf['_ci_env']}.ci.koji-build.test.error",
@@ -268,19 +251,19 @@ 

          )

  

          # Check the tag of the build

-         get_build_tags(

+         utils.get_build_tags(

              conf.get("koji_hub"),

              nevr,

              expected_ends=["testing-pending"],

          )

  

          # Check that the CI results made it to resultsdb

-         lookup_ci_resultsdb(

+         utils.lookup_ci_resultsdb(

              nevr=nevr, name="resultsdb(phx)", url=conf["resultsdb"]

          )

  

          # Check that resultsdb announced the new results

-         lookup_results_datagrepper(

+         utils.lookup_results_datagrepper(

              base_url=conf["datagrepper"],

              name="resultsdb",

              topic=f"org.fedoraproject.{conf['_env']}.resultsdb.result.new",
@@ -288,7 +271,7 @@ 

          )

  

          # Check that greenwave reacted to resultsdb's new results

-         lookup_results_datagrepper(

+         utils.lookup_results_datagrepper(

              base_url=conf["datagrepper"],

              name="greenwave",

              topic=f"org.fedoraproject.{conf['_env']}.greenwave.decision.update",
@@ -296,14 +279,14 @@ 

          )

  

          # Check the tag of the build -- build is blocked but should be signed

-         get_build_tags(

+         utils.get_build_tags(

              conf.get("koji_hub"),

              nevr,

              expected_ends=["testing-pending"],

          )

  

          if not args.no_waive:

-             waive_update(

+             utils.waive_update(

                  conf["bodhi-cli"],

                  updateid,

                  prod=conf["_env"] == "prod",
@@ -312,7 +295,7 @@ 

              )

  

              # Check that waiverdb announced the new waiver

-             lookup_results_datagrepper(

+             utils.lookup_results_datagrepper(

                  base_url=conf["datagrepper"],

                  name="waiverdb",

                  topic=f"org.fedoraproject.{conf['_env']}.waiverdb.waiver.new",
@@ -320,7 +303,7 @@ 

              )

  

              # Check that greenwave reacted to the new waiver

-             lookup_results_datagrepper(

+             utils.lookup_results_datagrepper(

                  base_url=conf["datagrepper"],

                  name="greenwave",

                  topic=f"org.fedoraproject.{conf['_env']}.greenwave.decision.update",
@@ -328,17 +311,17 @@ 

              )

  

              # Check the tag of the build -- build was waived, let is through

-             get_build_tags(

+             utils.get_build_tags(

                  conf.get("koji_hub"),

                  nevr,

                  expected_ends=["f32"],

              )

  

-     finalize(start)

+     utils.finalize(start)

  

  

  if __name__ == "__main__":

      try:

-         main()

+         main(sys.argv[1:])

      except KeyboardInterrupt:

          print("  -- Interupted --")

@@ -25,6 +25,7 @@ 

  import datetime

  import logging

  import os

+ import sys

  import tempfile

  import time

  
@@ -64,7 +65,7 @@ 

      """The base class for all exceptions raised by this script."""

  

  

- def get_arguments():

+ def get_arguments(args):

      """ Parse and return the CLI arguments.

      """

      parser = argparse.ArgumentParser(
@@ -94,7 +95,7 @@ 

          help="Make a build, conflicting in the main tag",

      )

  

-     return parser.parse_args()

+     return parser.parse_args(args)

  

  

  def create_side_tag(command, folder):
@@ -138,11 +139,11 @@ 

      return (nevrs, target)

  

  

- def main():

+ def main(args):

      """ Main method used by this script. """

      start = datetime.datetime.utcnow()

  

-     args = get_arguments()

+     args = get_arguments(args)

  

      conf = toml.load(args.conf)

  
@@ -303,6 +304,6 @@ 

  

  if __name__ == "__main__":

      try:

-         main()

+         main(sys.argv[1:])

      except KeyboardInterrupt:

          print("  -- Interupted --")

file added
+5
@@ -0,0 +1,5 @@ 

+ # Time between two runs in second

+ delay = 3600

+ 

+ # CLI arguments to give to the script testing the single package gating workflow

+ workflow_single_gating_args = "--conf monitor_gating_stg.cfg --auto-update --no-pr"

file added
+100
@@ -0,0 +1,100 @@ 

+ """

+ This script is meant to run the different tests that we have sequentially, with

+ a scheduler, ie: after running all the tests, it will wait for a specified

+ amount of time and then run them again, until it's stopped.

+ """

+ 

+ import argparse

+ import datetime

+ import sched

+ import sys

+ import time

+ import utils

+ 

+ import fedora_messaging.api

+ import fedora_messaging.exceptions

+ import toml

+ 

+ import monitor_gating

+ 

+ s = sched.scheduler(time.time, time.sleep)

+ conf = toml.load

+ 

+ 

+ def get_arguments(args):

+     """ Load and parse the CLI arguments."""

+     parser = argparse.ArgumentParser(

+         description="Runner for the CI canary tests."

+     )

+     parser.add_argument(

+         "conf",

+         help="Configuration file for the different tests",

+     )

+ 

+     return parser.parse_args(args)

+ 

+ 

+ def notify(topic, message):

+     try:

+         msg = fedora_messaging.api.Message(

+             topic="monitor-gating.{}".format(topic),

+             body=message

+         )

+         fedora_messaging.api.publish(msg)

+     except fedora_messaging.exceptions.PublishReturned as e:

+         print(f"Fedora Messaging broker rejected message {msg.id}: {err}")

+     except fedora_messaging.exceptions.ConnectionException as err:

+         print(f"Error sending message {msg.id}: {err}")

+     except Exception as err:

+         print(f"Error sending fedora-messaging message: {err}")

+ 

+ 

+ def schedule(conf):

+     """ Run the test and schedules the next one. """

+     delay = conf["delay"]

+     print("Tests started:", datetime.datetime.utcnow())

+     try:

+         #TEST HERE

+         single_args = conf["workflow_single_gating_args"].split()

+         notify(

+             topic=f"single-build.start",

+             message={

+                 "arguments": single_args,

+             }

+         )

+         output = monitor_gating.main(single_args)

+         output_text="\n".join(output)

+         success="[FAILED]" not in output_text

+         notify(

+             topic=f"single-build.end.{success}",

+             message={

+                 "output": output,

+                 "output_text": output_text,

+                 "success": success,

+             }

+         )

+         print("Tests finished:", datetime.datetime.utcnow())

+     except Exception as err:

+         print(f"Tests failed with: {err}")

+     print(f"Next run in: {delay} seconds")

+     s.enter(delay, 1, schedule, argument=(conf,))

+ 

+ 

+ def main(args):

+     """ Schedule the first test and run the scheduler. """

+     args = get_arguments(args)

+     conf = toml.load(args.conf)

+     s.enter(0, 1, schedule, argument=(conf,))

+     s.run()

+ 

+ 

+ if __name__ == "__main__":

+     try:

+         main(sys.argv[1:])

+     except KeyboardInterrupt:

+         from code import InteractiveConsole

+         InteractiveConsole(

+             locals={"s": s}).interact(

+                 "ENTERING THE DEBUG CONSOLE:\n s is the scheduler\n ^d to quit",

+                 "LEAVING THE DEBUG CONSOLE"

+         )

file modified
+563 -548
@@ -23,630 +23,645 @@ 

      """The base class for all exceptions raised by this script."""

  

  

- def print_user(content, success=None):

-     """ Prints the specified content to the user.

-     """

-     spaces = 90

-     if success is not None:

-         end = None

-         if success:

-             content = "{} {}".format(content.ljust(spaces), "[DONE]")

-         else:

-             content = "{} {}".format(content.ljust(spaces), "[FAILED]")

-     else:

-         end = "\r"

+ class MonitoringUtils:

  

-     now = datetime.datetime.utcnow()

-     time = now.strftime("%H:%M:%S")

-     print(f"{time} - {content}", end=end, flush=True)

+     def __init__(self):

+         """ Instanciate the object. """

+         self.logs = []

  

+     def print_user(self, content, success=None):

+         """ Prints the specified content to the user.

+         """

+         spaces = 90

+         if success is not None:

+             end = None

+             if success:

+                 content = "{} {}".format(content.ljust(spaces), "[DONE]")

+             else:

+                 content = "{} {}".format(content.ljust(spaces), "[FAILED]")

+         else:

+             end = "\r"

  

- def run_command(command, cwd=None):

-     """ Run the specified command in a specific working directory if one

-     is specified.

-     """

-     output = None

-     try:

-         output = subprocess.check_output(

-             command, cwd=cwd, stderr=subprocess.PIPE

-         )

-     except subprocess.CalledProcessError as e:

-         _log.error(

-             "Command `{}` return code: `{}`".format(

-                 " ".join(command), e.returncode

-             )

-         )

-         _log.error("stdout:\n-------\n{}".format(e.stdout))

-         _log.error("stderr:\n-------\n{}".format(e.stderr))

-         raise MonitoringException("Command failed to run")

+         now = datetime.datetime.utcnow()

+         time = now.strftime("%H:%M:%S")

+         self.logs.append(f"{time} - {content}")

+         print(f"{time} - {content}", end=end, flush=True)

  

-     return output

  

- 

- def clone_repo(command, namespace, name, folder):

-     """ Clone the specified git repo into the specified folder.

-     """

-     info_log = f"Cloning the git repo: {namespace}/{name}"

-     print_user(info_log)

-     try:

-         run_command([command, "clone", f"{namespace}/{name}"], cwd=folder)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

+     def clone_repo(self, command, namespace, name, folder):

+         """ Clone the specified git repo into the specified folder.

+         """

+         info_log = f"Cloning the git repo: {namespace}/{name}"

+         self.print_user(info_log)

+         try:

+             run_command([command, "clone", f"{namespace}/{name}"], cwd=folder)

+             self.print_user(info_log, success=True)

+         except MonitoringException:

+             self.print_user(info_log, success=False)

  

  

- def add_remote(name, url, folder):

-     """ Add the specified remote to the git repo in the folder with the

-     specified url.

-     """

-     info_log = f"Adding remote: {name}"

-     print_user(info_log)

-     try:

-         run_command(["git", "remote", "add", name, url], cwd=folder)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

+     def add_remote(self, name, url, folder):

+         """ Add the specified remote to the git repo in the folder with the

+         specified url.

+         """

+         info_log = f"Adding remote: {name}"

+         self.print_user(info_log)

+         try:

+             run_command(["git", "remote", "add", name, url], cwd=folder)

+             self.print_user(info_log, success=True)

+         except MonitoringException:

+             self.print_user(info_log, success=False)

  

  

- def switch_branch(command, name, folder):

-     """ Switch to the specified git branch in the specified git repo.

-     """

-     info_log = f"Switching to branch: {name}"

-     print_user(info_log)

-     try:

-         run_command([command, "switch-branch", f"{name}"], cwd=folder)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

+     def switch_branch(self, command, name, folder):

+         """ Switch to the specified git branch in the specified git repo.

+         """

+         info_log = f"Switching to branch: {name}"

+         self.print_user(info_log)

+         try:

+             run_command([command, "switch-branch", f"{name}"], cwd=folder)

+             self.print_user(info_log, success=True)

+         except MonitoringException:

+             self.print_user(info_log, success=False)

  

  

- def bump_release(name, folder):

-     """ Bump the release of the spec file the specified git repo.

-     """

-     info_log = f"Bumping release of: {name}.spec"

-     print_user(info_log)

-     try:

-         run_command(["rpmdev-bumpspec", f"{name}.spec"], cwd=folder)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

+     def bump_release(self, name, folder):

+         """ Bump the release of the spec file the specified git repo.

+         """

+         info_log = f"Bumping release of: {name}.spec"

+         self.print_user(info_log)

+         try:

+             run_command(["rpmdev-bumpspec", f"{name}.spec"], cwd=folder)

+             self.print_user(info_log, success=True)

+         except MonitoringException:

+             self.print_user(info_log, success=False)

  

  

- def commit_changes(commit_log, folder):

-     """ Commit all the changes made to *tracked* files in the git repo

-     with the specified commit log.

-     """

-     info_log = f"Commiting changes"

-     print_user(info_log)

-     try:

-         run_command(["git", "commit", "-asm", commit_log], cwd=folder)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

+     def commit_changes(self, commit_log, folder):

+         """ Commit all the changes made to *tracked* files in the git repo

+         with the specified commit log.

+         """

+         info_log = f"Commiting changes"

+         self.print_user(info_log)

+         try:

+             run_command(["git", "commit", "-asm", commit_log], cwd=folder)

+             self.print_user(info_log, success=True)

+         except MonitoringException:

+             self.print_user(info_log, success=False)

  

  

- def push_changes(folder, target, branch, force=False):

-     """ Push all changes using git.

-     """

-     info_log = f"Pushing changes"

-     print_user(info_log)

-     try:

-         cmd = ["git", "push", target, branch]

-         if force:

-             cmd.append("-f")

-         run_command(cmd, cwd=folder)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

+     def push_changes(self, folder, target, branch, force=False):

+         """ Push all changes using git.

+         """

+         info_log = f"Pushing changes"

+         self.print_user(info_log)

+         try:

+             cmd = ["git", "push", target, branch]

+             if force:

+                 cmd.append("-f")

+             run_command(cmd, cwd=folder)

+             self.print_user(info_log, success=True)

+         except MonitoringException:

+             self.print_user(info_log, success=False)

  

  

- def pull_changes(folder, target, branch):

-     """ Pull all changes using git.

-     """

-     info_log = f"Pushing changes"

-     print_user(info_log)

-     try:

-         cmd = ["git", "pull", "--rebase", target, branch]

-         run_command(cmd, cwd=folder)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

+     def pull_changes(self, folder, target, branch):

+         """ Pull all changes using git.

+         """

+         info_log = f"Pushing changes"

+         self.print_user(info_log)

+         try:

+             cmd = ["git", "pull", "--rebase", target, branch]

+             run_command(cmd, cwd=folder)

+             self.print_user(info_log, success=True)

+         except MonitoringException:

+             self.print_user(info_log, success=False)

  

  

- def open_pullrequest(base_url, username, namespace, name, branch, token):

-     """ Open a pull-request from the user's fork to the main project for

-     the specified branch.

-     """

-     info_log = f"Creating PR from forks/{username}/{namespace}/{name}"

-     print_user(info_log)

-     url = "/".join(

-         [base_url.rstrip("/"), "api/0", namespace, name, "pull-request/new"]

-     )

-     data = {

-         "branch_to": branch,

-         "branch_from": branch,

-         "repo_from": name,

-         "repo_from_username": username,

-         "repo_from_namespace": namespace,

-         "initial_comment": "Testing PR",

-         "title": "Test PR for monitoring",

-     }

-     headers = {"Authorization": f"token {token}"}

-     req = requests.post(url=url, data=data, headers=headers)

-     if not req.ok:

-         print(req.text)

-         success = False

-         pr_id = None

-         pr_uid = None

-     else:

-         output = req.json()

-         pr_id = str(output["id"])

-         pr_uid = output["uid"]

+     def open_pullrequest(self, base_url, username, namespace, name, branch, token):

+         """ Open a pull-request from the user's fork to the main project for

+         the specified branch.

+         """

+         info_log = f"Creating PR from forks/{username}/{namespace}/{name}"

+         self.print_user(info_log)

          url = "/".join(

-             [base_url.rstrip("/"), namespace, name, "pull-request", pr_id]

+             [base_url.rstrip("/"), "api/0", namespace, name, "pull-request/new"]

          )

-         info_log = f"PR created {url}"

-         success = True

-     print_user(info_log, success=success)

-     return (success, pr_id, pr_uid)

- 

+         data = {

+             "branch_to": branch,

+             "branch_from": branch,

+             "repo_from": name,

+             "repo_from_username": username,

+             "repo_from_namespace": namespace,

+             "initial_comment": "Testing PR",

+             "title": "Test PR for monitoring",

+         }

+         headers = {"Authorization": f"token {token}"}

+         req = requests.post(url=url, data=data, headers=headers)

+         if not req.ok:

+             print(req.text)

+             success = False

+             pr_id = None

+             pr_uid = None

+         else:

+             output = req.json()

+             pr_id = str(output["id"])

+             pr_uid = output["uid"]

+             url = "/".join(

+                 [base_url.rstrip("/"), namespace, name, "pull-request", pr_id]

+             )

+             info_log = f"PR created {url}"

+             success = True

+         self.print_user(info_log, success=success)

+         return (success, pr_id, pr_uid)

  

- def get_nevr(command, folder):

-     """ Get the name-epoch-version-release presently in git

-     """

-     info_log = f"Getting nevr"

-     print_user(info_log)

-     try:

-         nevr = run_command([command, "verrel"], cwd=folder)

-         print_user(info_log, success=True)

-         return nevr.strip().decode("utf-8")

-     except MonitoringException:

-         print_user(info_log, success=False)

  

+     def get_nevr(self, command, folder):

+         """ Get the name-epoch-version-release presently in git

+         """

+         info_log = f"Getting nevr"

+         self.print_user(info_log)

+         try:

+             nevr = run_command([command, "verrel"], cwd=folder)

+             self.print_user(info_log, success=True)

+             return nevr.strip().decode("utf-8")

+         except MonitoringException:

+             self.print_user(info_log, success=False)

  

- def build_package(command, folder, target=None):

-     """ Build the package in the current branch

-     """

-     info_log = f"Building the package"

-     print_user(info_log)

-     command = [command, "build"]

-     if target:

-         command.extend(["--target", target])

-     try:

-         run_command(command, cwd=folder)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

  

+     def build_package(self, command, folder, target=None):

+         """ Build the package in the current branch

+         """

+         info_log = f"Building the package"

+         self.print_user(info_log)

+         command = [command, "build"]

+         if target:

+             command.extend(["--target", target])

+         try:

+             run_command(command, cwd=folder)

+             self.print_user(info_log, success=True)

+         except MonitoringException:

+             self.print_user(info_log, success=False)

+ 

+ 

+     def chain_build_packages(self, command, packages, folder, target=None):

+         """ Chain-build the packages in the current branch

+         """

+         if not isinstance(packages, list):

+             packages = [packages]

+         info_log = f"Chain-building the packages: {packages + [os.path.basename(folder)]}"

+         self.print_user(info_log)

+         command = [command, "chain-build"]

+         command.extend(packages)

+         if target:

+             command.extend(["--target", target])

+         try:

+             run_command(command, cwd=folder)

+             self.print_user(info_log, success=True)

+         except MonitoringException:

+             self.print_user(info_log, success=False)

  

- def chain_build_packages(command, packages, folder, target=None):

-     """ Chain-build the packages in the current branch

-     """

-     if not isinstance(packages, list):

-         packages = [packages]

-     info_log = f"Chain-building the packages: {packages + [os.path.basename(folder)]}"

-     print_user(info_log)

-     command = [command, "chain-build"]

-     command.extend(packages)

-     if target:

-         command.extend(["--target", target])

-     try:

-         run_command(command, cwd=folder)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

  

+     def get_build_tags(self, koji_url, nevr, expected_ends):

+         """ List the tags associated with the specified build.

+         """

+         # return

+         start = datetime.datetime.utcnow()

+         info_log = f"Retrieving koji tags"

+         self.print_user(info_log)

+         command = [

+             "koji",

+         ]

+         if koji_url:

+             command.extend([

+                 "-s",

+                 koji_url,

+             ])

+         command.extend([

+             "call",

+             "listTags",

+             nevr

+         ])

  

- def get_build_tags(koji_url, nevr, expected_ends):

-     """ List the tags associated with the specified build.

-     """

-     # return

-     start = datetime.datetime.utcnow()

-     info_log = f"Retrieving koji tags"

-     print_user(info_log)

-     command = [

-         "koji",

-         "-s",

-         koji_url,

-         "call",

-         "listTags",

-         nevr

-     ]

- 

-     success = False

-     tags = None

-     broke = False

-     while True:

-         try:

-             output = run_command(command)

-             output = output.decode('utf-8')

+         success = False

+         tags = None

+         broke = False

+         while True:

              try:

-                 data = ast.literal_eval(output.strip())

-             except Exception:

-                 print("Could not decode JSON in:")

-                 print(command)

-                 print(output)

-                 broke = True

-                 break

-             tags = [tag.get("name") for tag in data]

-             for tag_name in tags:

-                 for expectation in expected_ends:

-                     if tag_name.endswith(expectation):

-                         success = True

+                 output = run_command(command)

+                 output = output.decode('utf-8')

+                 try:

+                     data = ast.literal_eval(output.strip())

+                 except Exception:

+                     print("Could not decode JSON in:")

+                     print(command)

+                     print(output)

+                     broke = True

+                     break

+                 tags = [tag.get("name") for tag in data]

+                 for tag_name in tags:

+                     for expectation in expected_ends:

+                         if tag_name.endswith(expectation):

+                             success = True

+                             broke = True

+                             break

+                     if success:

                          broke = True

                          break

-                 if success:

-                     broke = True

+                 if broke:

                      break

-             if broke:

-                 break

  

-             if (datetime.datetime.utcnow() - start).seconds > (15 * 60):

+                 if (datetime.datetime.utcnow() - start).seconds > (15 * 60):

+                     success = False

+                     info_log = f"Update for {nevr} not created within 15 minutes"

+                     break

+ 

+                 # Only query koji every 30 seconds

+                 time.sleep(30)

+             except MonitoringException:

                  success = False

-                 info_log = f"Update for {nevr} not created within 15 minutes"

                  break

  

-             # Only query koji every 30 seconds

-             time.sleep(30)

+         info_log = f"Retrieving koji tags: {tags}"

+         self.print_user(info_log, success=success)

+ 

+ 

+     def create_update(self, command, item, prod=True, username=None, password=None, from_tag=False):

+         """ Create the update for the package built.

+         """

+         info_log = f"Creating a bodhi update"

+         self.print_user(info_log)

+         command = [

+             command,

+             "updates",

+             "new",

+             "--notes",

+             "Bump release to test CI",

+             "--type",

+             "bugfix",

+             "--autotime",

+         ]

+         if from_tag:

+             command.append("--from-tag")

+         command.append(item)

+ 

+         if not prod:

+             command.append("--staging")

+         if username:

+             command.extend(["--user", username])

+         if password:

+             command.extend(["--password", password])

+         try:

+             run_command(command)

+             self.print_user(info_log, success=True)

          except MonitoringException:

-             success = False

-             break

+             self.print_user(info_log, success=False)

  

-     info_log = f"Retrieving koji tags: {tags}"

-     print_user(info_log, success=success)

  

+     def get_update_id(self, nevr, url):

+         """ Retrieve the update identifier from bodhi for the given nevr. """

+         start = datetime.datetime.utcnow()

+         info_log = f"Retrieving update created"

+         self.print_user(info_log)

+         url = f"{url}/updates/?builds={nevr}"

+         updateid = None

+         success = True

+         while True:

+             req = requests.get(url)

+             data = req.json()

+             if data["updates"]:

+                 updateid = data["updates"][0]["updateid"]

+             if updateid:

+                 break

  

- def create_update(command, item, prod=True, username=None, password=None, from_tag=False):

-     """ Create the update for the package built.

-     """

-     info_log = f"Creating a bodhi update"

-     print_user(info_log)

-     command = [

-         command,

-         "updates",

-         "new",

-         "--notes",

-         "Bump release to test CI",

-         "--type",

-         "bugfix",

-         "--autotime",

-     ]

-     if from_tag:

-         command.append("--from-tag")

-     command.append(item)

- 

-     if not prod:

-         command.append("--staging")

-     if username:

-         command.extend(["--user", username])

-     if password:

-         command.extend(["--password", password])

-     try:

-         run_command(command)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

- 

- 

- def get_update_id(nevr, url):

-     """ Retrieve the update identifier from bodhi for the given nevr. """

-     start = datetime.datetime.utcnow()

-     info_log = f"Retrieving update created"

-     print_user(info_log)

-     url = f"{url}/updates/?builds={nevr}"

-     updateid = None

-     success = True

-     while True:

-         req = requests.get(url)

-         data = req.json()

-         if data["updates"]:

-             updateid = data["updates"][0]["updateid"]

-         if updateid:

-             break

- 

-         if (datetime.datetime.utcnow() - start).seconds > (15 * 60):

-             success = False

-             info_log = f"Update for {nevr} not created within 15 minutes"

-             break

+             if (datetime.datetime.utcnow() - start).seconds > (15 * 60):

+                 success = False

+                 info_log = f"Update for {nevr} not created within 15 minutes"

+                 break

  

-         # Only query bodhi every 30 seconds

-         time.sleep(30)

+             # Only query bodhi every 30 seconds

+             time.sleep(30)

  

-     print_user(info_log, success=success)

-     return updateid

+         self.print_user(info_log, success=success)

+         return updateid

+ 

+ 

+     def lookup_results_datagrepper(

+         self, base_url, name, topic, nevr=None, nevrs=None, rev=None,

+         bodhi_id=None, start=None, duration=15

+     ):

+         """ Check the CI results in datagrepper for results about our specified

+         build.

+         """

+         if start is None:

+             start = datetime.datetime.utcnow()

+         info_log = f"Checking datagrepper for {name} messages"

+         self.print_user(info_log)

+         # Start pulling messages 10 minutes before now

+         start_time = start - datetime.timedelta(minutes=10)

+         # Limiting the number of row per page to 10 allows for quicker results

+         url = (

+             base_url + f"?topic={topic}"

+             f"&start={start_time.timestamp()}&row_per_page=10"

+         )

  

+         success = None

+         returned_status = None

+         info_log = None

+         nevrs = nevrs or []

+         while True:

+             # We're assuming here that there won't be more than 100 messages for

+             # that topic coming in between the one we're interested in and when we

+             # are looking for it (10*10 == 100)

+             for page in range(1, 11):

+                 end_url = url

+                 end_url += f"&page={page}"

+                 data = requests.get(end_url).json()

+                 for message in data["raw_messages"]:

+ 

+                     # Old message format from the CI pipeline

+                     if "ci.pipeline" in message["topic"] and (

+                         message["msg"]["nvr"] == nevr

+                         or message["msg"]["nvr"] in nevrs

+                         or message["msg"]["rev"] == rev

+                     ):

+                         success = True

+                         returned_status = message["msg"]["status"]

+                         break

  

- def lookup_results_datagrepper(

-     base_url, name, topic, nevr=None, nevrs=None, rev=None, bodhi_id=None,

-     start=None, duration=15

- ):

-     """ Check the CI results in datagrepper for results about our specified

-     build.

-     """

-     if start is None:

-         start = datetime.datetime.utcnow()

-     info_log = f"Checking datagrepper for {name} messages"

-     print_user(info_log)

-     # Start pulling messages 10 minutes before now

-     start_time = start - datetime.timedelta(minutes=10)

-     # Limiting the number of row per page to 10 allows for quicker results

-     url = (

-         base_url + f"?topic={topic}"

-         f"&start={start_time.timestamp()}&row_per_page=10"

-     )

- 

-     success = None

-     returned_status = None

-     info_log = None

-     nevrs = nevrs or []

-     while True:

-         # We're assuming here that there won't be more than 100 messages for

-         # that topic coming in between the one we're interested in and when we

-         # are looking for it (10*10 == 100)

-         for page in range(1, 11):

-             end_url = url

-             end_url += f"&page={page}"

-             data = requests.get(end_url).json()

-             for message in data["raw_messages"]:

- 

-                 # Old message format from the CI pipeline

-                 if "ci.pipeline" in message["topic"] and (

-                     message["msg"]["nvr"] == nevr

-                     or message["msg"]["nvr"] in nevrs

-                     or message["msg"]["rev"] == rev

-                 ):

-                     success = True

-                     returned_status = message["msg"]["status"]

-                     break

+                     # New message format from the CI pipeline for koji builds

+                     if (

+                         "ci.koji-build" in message["topic"] and

+                         message["msg"]["artifact"]["nvr"] == nevr

+                     ):

+                         if message["topic"].endswith("test.complete"):

+                             success = True

+                             returned_status = message["msg"]["test"]["result"]

+                         elif message["topic"].endswith("test.error"):

+                             success = True

+                             returned_status = "error"

+                         elif message["topic"].endswith("test.running"):

+                             success = True

+                             returned_status = "running"

+                         break

  

-                 # New message format from the CI pipeline for koji builds

-                 if (

-                     "ci.koji-build" in message["topic"] and

-                     message["msg"]["artifact"]["nvr"] == nevr

-                 ):

-                     if message["topic"].endswith("test.complete"):

-                         success = True

-                         returned_status = message["msg"]["test"]["result"]

-                     elif message["topic"].endswith("test.error"):

-                         success = True

-                         returned_status = "error"

-                     elif message["topic"].endswith("test.running"):

-                         success = True

-                         returned_status = "running"

-                     break

+                     # New message format from the CI pipeline for dist-git PR

+                     if (

+                         "ci.dist-git-pr" in message["topic"]

+                         and message["msg"]["artifact"]["type"] == "pull-request"

+                         and message["msg"]["artifact"]["uid"] == rev

+                     ):

+                         if message["topic"].endswith("test.complete"):

+                             success = True

+                             returned_status = message["msg"]["test"]["result"]

+                         elif message["topic"].endswith("test.error"):

+                             success = True

+                             returned_status = "error"

+                         elif message["topic"].endswith("test.running"):

+                             success = True

+                             returned_status = "running"

+                         break

  

-                 # New message format from the CI pipeline for dist-git PR

-                 if (

-                     "ci.dist-git-pr" in message["topic"]

-                     and message["msg"]["artifact"]["type"] == "pull-request"

-                     and message["msg"]["artifact"]["uid"] == rev

-                 ):

-                     if message["topic"].endswith("test.complete"):

-                         success = True

-                         returned_status = message["msg"]["test"]["result"]

-                     elif message["topic"].endswith("test.error"):

+                     # resultsdb messages

+                     if (

+                         "resultsdb" in message["topic"]

+                         and "nvr" in message["msg"]["data"]

+                         and (nevr in message["msg"]["data"]["nvr"]

+                         or message["msg"]["data"]["nvr"] in nevrs)

+                     ):

                          success = True

-                         returned_status = "error"

-                     elif message["topic"].endswith("test.running"):

-                         success = True

-                         returned_status = "running"

-                     break

- 

-                 # resultsdb messages

-                 if (

-                     "resultsdb" in message["topic"]

-                     and "nvr" in message["msg"]["data"]

-                     and (nevr in message["msg"]["data"]["nvr"]

-                     or message["msg"]["data"]["nvr"] in nevrs)

-                 ):

-                     success = True

-                     returned_status = message["msg"]["outcome"]

-                     break

+                         returned_status = message["msg"]["outcome"]

+                         break

  

-                 # greenwave messages

-                 if (

-                     "greenwave" in message["topic"]

-                     and (

-                     message["msg"]["subject_identifier"] == nevr

-                     or

-                     message["msg"]["subject_identifier"] in nevrs

-                     )

-                 ):

-                     success = True

-                     returned_status = message["msg"]["policies_satisfied"]

-                     break

+                     # greenwave messages

+                     if (

+                         "greenwave" in message["topic"]

+                         and (

+                         message["msg"]["subject_identifier"] == nevr

+                         or

+                         message["msg"]["subject_identifier"] in nevrs

+                         )

+                     ):

+                         success = True

+                         returned_status = message["msg"]["policies_satisfied"]

+                         break

  

-                 # waiverdb messages

-                 if (

-                     "waiverdb" in message["topic"]

-                     and (

-                     message["msg"]["subject_identifier"] == nevr

-                     or

-                     message["msg"]["subject_identifier"] in nevrs

-                     )

-                 ):

-                     success = True

-                     returned_status = ""

-                     break

+                     # waiverdb messages

+                     if (

+                         "waiverdb" in message["topic"]

+                         and (

+                         message["msg"]["subject_identifier"] == nevr

+                         or

+                         message["msg"]["subject_identifier"] in nevrs

+                         )

+                     ):

+                         success = True

+                         returned_status = ""

+                         break

  

-                 # bodhi messages

-                 if (

-                     "bodhi.update.status.testing" in message["topic"]

-                     and message["msg"]["artifact"]["id"].startswith(bodhi_id)

-                 ):

-                     success = True

-                     returned_status = ""

+                     # bodhi messages

+                     if (

+                         "bodhi.update.status.testing" in message["topic"]

+                         and message["msg"]["artifact"]["id"].startswith(bodhi_id)

+                     ):

+                         success = True

+                         returned_status = ""

+                         break

+                 if success is not None:

                      break

              if success is not None:

                  break

-         if success is not None:

-             break

  

-         if (datetime.datetime.utcnow() - start).seconds > (duration * 60):

-             success = False

-             info_log = f"{name} results not found in datagrepper"

-             break

+             if (datetime.datetime.utcnow() - start).seconds > (duration * 60):

+                 success = False

+                 info_log = f"{name} results not found in datagrepper"

+                 break

  

-         # Only query datagrepper every 30 seconds

-         time.sleep(30)

+             # Only query datagrepper every 30 seconds

+             time.sleep(30)

  

-     if info_log is None:

-         info_log = f"{name} results in datagrepper returned {returned_status}"

+         if info_log is None:

+             info_log = f"{name} results in datagrepper returned {returned_status}"

  

-     end = datetime.datetime.utcnow()

-     info_log += f" - ran for: {(end - start).seconds}s"

-     print_user(info_log, success=success)

+         end = datetime.datetime.utcnow()

+         info_log += f" - ran for: {(end - start).seconds}s"

+         self.print_user(info_log, success=success)

  

  

- def lookup_ci_resultsdb(nevr, name, url):

-     """ Check the CI results in the specified resultsdb for results about

-     our specified build.

-     """

-     start = datetime.datetime.utcnow()

-     info_log = f"Checking {name} for CI results "

-     print_user(info_log)

-     topic = "org.centos.prod.ci.pipeline.allpackages-build.complete"

-     if ".stg" in url:

-         topic = "org.centos.stage.ci.pipeline.allpackages-build.complete"

-     url = f"{url}?testcases={topic}"

- 

-     success = False

-     returned_status = None

-     info_log = None

-     while True:

-         # Assume we won't have more than 3 pages of results coming in b/w

-         # our checks

-         for page in [0, 1, 2]:

-             end_url = url

-             end_url += f"&page={page}"

-             data = requests.get(end_url).json()

-             for result in data["data"]:

-                 if nevr in result["data"]["nvr"]:

-                     success = True

-                     returned_status = result["data"]["status"][0]

+     def lookup_ci_resultsdb(self, nevr, name, url):

+         """ Check the CI results in the specified resultsdb for results about

+         our specified build.

+         """

+         start = datetime.datetime.utcnow()

+         info_log = f"Checking {name} for CI results "

+         self.print_user(info_log)

+         topic = "org.centos.prod.ci.pipeline.allpackages-build.complete"

+         if ".stg" in url:

+             topic = "org.centos.stage.ci.pipeline.allpackages-build.complete"

+         url = f"{url}?testcases={topic}"

+ 

+         success = False

+         returned_status = None

+         info_log = None

+         while True:

+             # Assume we won't have more than 3 pages of results coming in b/w

+             # our checks

+             for page in [0, 1, 2]:

+                 end_url = url

+                 end_url += f"&page={page}"

+                 data = requests.get(end_url).json()

+                 for result in data["data"]:

+                     if nevr in result["data"]["nvr"]:

+                         success = True

+                         returned_status = result["data"]["status"][0]

+                         break

+                 if success:

                      break

              if success:

                  break

-         if success:

-             break

  

-         if (datetime.datetime.utcnow() - start).seconds > (15 * 60):

-             success = False

-             info_log = f"CI results did not show in {name} for {nevr} within 15 minutes"

-             break

+             if (datetime.datetime.utcnow() - start).seconds > (15 * 60):

+                 success = False

+                 info_log = f"CI results did not show in {name} for {nevr} within 15 minutes"

+                 break

  

-         # Only query datagrepper every 30 seconds

-         time.sleep(30)

+             # Only query datagrepper every 30 seconds

+             time.sleep(30)

  

-     if info_log is None:

-         info_log = f"CI results in {name} returned {returned_status}"

+         if info_log is None:

+             info_log = f"CI results in {name} returned {returned_status}"

+ 

+         end = datetime.datetime.utcnow()

+         info_log += f" - ran for: {(end - start).seconds}s"

+         self.print_user(info_log, success=success)

+ 

+ 

+     def waive_update(self, command, updateid, prod=True, username=None, password=None):

+         """ Waive all the tests results for the specified update using bodhi's

+         CLI.

+         """

+         info_log = f"Waiving test results for bodhi update"

+         self.print_user(info_log)

+         command = [

+             command,

+             "updates",

+             "waive",

+             updateid,

+             "'This is fine, we are testing the workflow'",

+             "--debug",

+         ]

+         if not prod:

+             command.append("--staging")

+         if username:

+             command.extend(["--user", username])

+         if password:

+             command.extend(["--password", password])

+         try:

+             run_command(command)

+             self.print_user(info_log, success=True)

+         except MonitoringException:

+             self.print_user(info_log, success=False)

+ 

+ 

+     def get_pr_flag(

+         self,

+         base_url,

+         username,

+         namespace,

+         name,

+         pr_id,

+         flag_username,

+         flag_status,

+         duration=10,

+     ):

+         """ Retrieve the flags of the PR and assert the last one from the

+         specified flag_username has the given status.

+         """

+         pr = "/".join([namespace, name, "pull-request", pr_id])

+         info_log = f"Retreiving flags for PR: {pr}"

+         self.print_user(info_log)

+         url = "/".join([base_url.rstrip("/"), "api/0", pr, "flag"])

  

-     end = datetime.datetime.utcnow()

-     info_log += f" - ran for: {(end - start).seconds}s"

-     print_user(info_log, success=success)

+         start = datetime.datetime.utcnow()

+         success = False

  

+         while True:

+             try:

+                 req = requests.get(url=url)

+             except requests.exceptions.ConnectionError:

+                 continue

  

- def waive_update(command, updateid, prod=True, username=None, password=None):

-     """ Waive all the tests results for the specified update using bodhi's

-     CLI.

-     """

-     info_log = f"Waiving test results for bodhi update"

-     print_user(info_log)

-     command = [

-         command,

-         "updates",

-         "waive",

-         updateid,

-         "'This is fine, we are testing the workflow'",

-         "--debug",

-     ]

-     if not prod:

-         command.append("--staging")

-     if username:

-         command.extend(["--user", username])

-     if password:

-         command.extend(["--password", password])

-     try:

-         run_command(command)

-         print_user(info_log, success=True)

-     except MonitoringException:

-         print_user(info_log, success=False)

- 

- 

- def get_pr_flag(

-     base_url,

-     username,

-     namespace,

-     name,

-     pr_id,

-     flag_username,

-     flag_status,

-     duration=10,

- ):

-     """ Retrieve the flags of the PR and assert the last one from the

-     specified flag_username has the given status.

-     """

-     pr = "/".join([namespace, name, "pull-request", pr_id])

-     info_log = f"Retreiving flags for PR: {pr}"

-     print_user(info_log)

-     url = "/".join([base_url.rstrip("/"), "api/0", pr, "flag"])

+             if req.ok:

+                 break

  

-     start = datetime.datetime.utcnow()

-     success = False

+             if (datetime.datetime.utcnow() - start).seconds > (duration * 60):

+                 success = False

+                 info_log = f"Failed to retrieve flags for PR: {pr}"

+                 break

  

-     while True:

-         try:

-             req = requests.get(url=url)

-         except requests.exceptions.ConnectionError:

-             continue

+             # Only query pagure every 30 seconds

+             time.sleep(30)

  

-         if req.ok:

-             break

+         if not req.ok:

+             print(req.text)

+             self.logs.append(f"Error retrieving PR flags: {req.text}")

+             raise MonitoringException("Error retrieving PR flags")

+         else:

+             output = req.json()

+             for flag in output["flags"]:

+                 if flag["username"] == flag_username:

+                     info_log = f"Retreived flag {flag['status']} on PR"

+                     success = flag["status"] == flag_status

+                     break

+ 

+         self.print_user(info_log, success=success)

  

-         if (datetime.datetime.utcnow() - start).seconds > (duration * 60):

-             success = False

-             info_log = f"Failed to retrieve flags for PR: {pr}"

-             break

- 

-         # Only query pagure every 30 seconds

-         time.sleep(30)

- 

-     if not req.ok:

-         print(req.text)

-         raise MonitoringException("Error retrieving PR flags")

-     else:

-         output = req.json()

-         for flag in output["flags"]:

-             if flag["username"] == flag_username:

-                 info_log = f"Retreived flag {flag['status']} on PR"

-                 success = flag["status"] == flag_status

-                 break

  

-     print_user(info_log, success=success)

+     def merge_pr(self, base_url, username, namespace, name, pr_id, token):

+         """ Merge the specified PR

+         """

+         pr = "/".join([namespace, name, "pull-request", pr_id])

+         info_log = f"Merge PR: {pr}"

+         self.print_user(info_log)

+         url = "/".join([base_url.rstrip("/"), "api/0", pr, "merge"])

+         headers = {"Authorization": f"token {token}"}

+         req = requests.post(url=url, data={"wait": True}, headers=headers)

+         success = False

+         if not req.ok:

+             print(req.text)

+             self.logs(f"Error Merging flags: {req.text}")

+             raise MonitoringException("Error merging flags")

+         else:

+             success = True

  

+         self.print_user(info_log, success=success)

  

- def merge_pr(base_url, username, namespace, name, pr_id, token):

-     """ Merge the specified PR

-     """

-     pr = "/".join([namespace, name, "pull-request", pr_id])

-     info_log = f"Merge PR: {pr}"

-     print_user(info_log)

-     url = "/".join([base_url.rstrip("/"), "api/0", pr, "merge"])

-     headers = {"Authorization": f"token {token}"}

-     req = requests.post(url=url, data={"wait": True}, headers=headers)

-     success = False

-     if not req.ok:

-         print(req.text)

-         raise MonitoringException("Error merging flags")

-     else:

-         success = True

  

-     print_user(info_log, success=success)

+     def finalize(self, start):

+         """ End data returned. """

+         end = datetime.datetime.utcnow()

+         delta = (end - start).seconds

+         self.logs.append(f"Ran for {delta} seconds ({delta/60:.2f} minutes)")

+         print(f"Ran for {delta} seconds ({delta/60:.2f} minutes)")

  

  

- def finalize(start):

-     """ End data returned. """

-     end = datetime.datetime.utcnow()

-     delta = (end - start).seconds

-     print(f"Ran for {delta} seconds ({delta/60:.2f} minutes)")

+ def run_command(command, cwd=None):

+     """ Run the specified command in a specific working directory if one

+     is specified.

+     """

+     output = None

+     try:

+         output = subprocess.check_output(

+             command, cwd=cwd, stderr=subprocess.PIPE

+         )

+     except subprocess.CalledProcessError as e:

+         _log.error(

+             "Command `{}` return code: `{}`".format(

+                 " ".join(command), e.returncode

+             )

+         )

+         _log.error("stdout:\n-------\n{}".format(e.stdout))

+         _log.error("stderr:\n-------\n{}".format(e.stderr))

+         raise MonitoringException("Command failed to run")

  

+     return output

no initial comment

This PR is still a little bit WIP but it should give us a basis to test things :)

3 new commits added

  • Add notifications upon test start and end
  • Move the utility functions to a class so logs can be recorded
  • Catch exceptions when running the tests so the runner can keep running
4 years ago

5 new commits added

  • Add notifications upon test start and end
  • Move the utility functions to a class so logs can be recorded
  • Catch exceptions when running the tests so the runner can keep running
  • Add a runner script to run all the tests sequentially and in a scheduler
  • Allow specifying the CLI arguments to the main functions
4 years ago

I like the refactor, but the monitor_gating_multi_builds.py is not working now.
The runner looks nice.

I will merge this and try to fix the multi-gate in another PR,
at leas as far as the refactor goes :)

Pull-Request has been merged by asaleh

4 years ago

I like the refactor, but the monitor_gating_multi_builds.py is not working now.
The runner looks nice.

Yes the multi_build isn't part of this PR, I wanted to make this in different
steps, first the single build then multi and this PR is kinda long enough
without multi build which can easily come in another PR :)