From 55b100f8d823b4f427580673a2181eb8f16a93e6 Mon Sep 17 00:00:00 2001 From: Pierre-Yves Chibon Date: Jan 21 2020 10:39:59 +0000 Subject: Move the utility functions to a class so logs can be recorded This allows to record the logs as they are printed which allows the scheduler/runner to access these logs and do something with them, such as publishing them via fedora-messaging. Signed-off-by: Pierre-Yves Chibon --- diff --git a/monitor_gating.py b/monitor_gating.py index f728dca..2c30613 100644 --- a/monitor_gating.py +++ b/monitor_gating.py @@ -34,27 +34,8 @@ import requests from utils import( MonitoringException, - print_user, + MonitoringUtils, run_command, - clone_repo, - add_remote, - switch_branch, - bump_release, - commit_changes, - push_changes, - pull_changes, - open_pullrequest, - get_nevr, - build_package, - get_build_tags, - get_update_id, - lookup_results_datagrepper, - lookup_ci_resultsdb, - waive_update, - get_pr_flag, - merge_pr, - finalize, - create_update, ) _log = logging.getLogger(__name__) @@ -119,6 +100,7 @@ def main(args): args = get_arguments(args) conf = toml.load(args.conf) + utils = MonitoringUtils() name = conf["name_single"] namespace = conf["namespace"] @@ -128,28 +110,28 @@ def main(args): with tempfile.TemporaryDirectory(prefix="ci-test-") as folder: print(f"Working in {folder}\n") if not args.nevr: - clone_repo(conf["fedpkg"], namespace, name, folder=folder) + utils.clone_repo(conf["fedpkg"], namespace, name, folder=folder) gitfolder = os.path.join(folder, name) - switch_branch(conf["fedpkg"], branch, folder=gitfolder) - bump_release(name, folder=gitfolder) - commit_changes("Bump release", folder=gitfolder) - nevr = get_nevr(conf["fedpkg"], folder=gitfolder) + utils.switch_branch(conf["fedpkg"], branch, folder=gitfolder) + utils.bump_release(name, folder=gitfolder) + utils.commit_changes("Bump release", folder=gitfolder) + nevr = utils.get_nevr(conf["fedpkg"], folder=gitfolder) print(f" Upcoming build : {nevr}") if args.no_pr: # Push to the main repo - push_changes(gitfolder, "origin", branch) + utils.push_changes(gitfolder, "origin", branch) else: # Add the fork as remote, push to the it, open the PR, # wait for CI to flag the PR, twice, merge the PR - add_remote( + utils.add_remote( f"{fas_username}", f"ssh://{fas_username}@{conf['distgit_host']}/" f"forks/{fas_username}/{namespace}/{name}.git", folder=gitfolder, ) - push_changes(gitfolder, fas_username, branch, force=True) - pr_created, pr_id, pr_uid = open_pullrequest( + utils.push_changes(gitfolder, fas_username, branch, force=True) + pr_created, pr_id, pr_uid = utils.open_pullrequest( base_url=conf["pagure_dist_git"], username=fas_username, namespace=namespace, @@ -159,14 +141,14 @@ def main(args): ) if pr_created: # Check that pr pipeline is running - lookup_results_datagrepper( + utils.lookup_results_datagrepper( base_url=conf["datagrepper"], name="CI (running)", topic=f"org.centos.{conf['_ci_env']}.ci.dist-git-pr.test.running", rev=pr_uid, ) # Check that CI flag pending was set - get_pr_flag( + utils.get_pr_flag( base_url=conf["pagure_dist_git"], username=fas_username, namespace=namespace, @@ -176,14 +158,14 @@ def main(args): flag_status="pending", ) # Check that pr pipeline has finished - lookup_results_datagrepper( + utils.lookup_results_datagrepper( base_url=conf["datagrepper"], name="CI (complete)", topic=f"org.centos.{conf['_ci_env']}.ci.dist-git-pr.test.error", rev=pr_uid, ) # Check that CI flag failure was set - get_pr_flag( + utils.get_pr_flag( base_url=conf["pagure_dist_git"], username=fas_username, namespace=namespace, @@ -194,7 +176,7 @@ def main(args): duration=25, ) # Merge the PR: TODO - merge_pr( + utils.merge_pr( base_url=conf["pagure_dist_git"], username=fas_username, namespace=namespace, @@ -202,15 +184,15 @@ def main(args): pr_id=pr_id, token=conf["pagure_token"], ) - pull_changes(gitfolder, "origin", branch) + utils.pull_changes(gitfolder, "origin", branch) else: return # Build the package - build_package(conf["fedpkg"], folder=gitfolder) + utils.build_package(conf["fedpkg"], folder=gitfolder) # Check the tag of the build - get_build_tags( + utils.get_build_tags( conf.get("koji_hub"), nevr, expected_ends=["updates-candidate", "signing-pending"], @@ -219,9 +201,9 @@ def main(args): nevr = args.nevr # Retrieve or create the update - updateid = get_update_id(nevr, conf["bodhi"]) + updateid = utils.get_update_id(nevr, conf["bodhi"]) if not args.update and not args.auto_update: - create_update(conf["bodhi-cli"], + utils.create_update(conf["bodhi-cli"], nevr, prod=conf["_env"] == "prod", username=conf.get("bodhi-user"), @@ -233,18 +215,18 @@ def main(args): updateid = args.update # Check the tag of the build - get_build_tags( + utils.get_build_tags( conf.get("koji_hub"), nevr, expected_ends=["signing-pending", "testing-pending"], ) if not updateid: - finalize(start) + utils.finalize(start) return # Check that bodhi notified the pipeline it can run - lookup_results_datagrepper( + utils.lookup_results_datagrepper( base_url=conf["datagrepper"], name="bodhi to CI", topic=f"org.fedoraproject.{conf['_env']}.bodhi.update.status." @@ -253,14 +235,14 @@ def main(args): ) # Check that the CI pipeline is running - lookup_results_datagrepper( + utils.lookup_results_datagrepper( base_url=conf["datagrepper"], name="CI (running)", topic=f"org.centos.{conf['_ci_env']}.ci.koji-build.test.running", nevr=nevr, ) # Check at the CI pipeline has completed - lookup_results_datagrepper( + utils.lookup_results_datagrepper( base_url=conf["datagrepper"], name="CI (complete)", topic=f"org.centos.{conf['_ci_env']}.ci.koji-build.test.error", @@ -269,19 +251,19 @@ def main(args): ) # Check the tag of the build - get_build_tags( + utils.get_build_tags( conf.get("koji_hub"), nevr, expected_ends=["testing-pending"], ) # Check that the CI results made it to resultsdb - lookup_ci_resultsdb( + utils.lookup_ci_resultsdb( nevr=nevr, name="resultsdb(phx)", url=conf["resultsdb"] ) # Check that resultsdb announced the new results - lookup_results_datagrepper( + utils.lookup_results_datagrepper( base_url=conf["datagrepper"], name="resultsdb", topic=f"org.fedoraproject.{conf['_env']}.resultsdb.result.new", @@ -289,7 +271,7 @@ def main(args): ) # Check that greenwave reacted to resultsdb's new results - lookup_results_datagrepper( + utils.lookup_results_datagrepper( base_url=conf["datagrepper"], name="greenwave", topic=f"org.fedoraproject.{conf['_env']}.greenwave.decision.update", @@ -297,14 +279,14 @@ def main(args): ) # Check the tag of the build -- build is blocked but should be signed - get_build_tags( + utils.get_build_tags( conf.get("koji_hub"), nevr, expected_ends=["testing-pending"], ) if not args.no_waive: - waive_update( + utils.waive_update( conf["bodhi-cli"], updateid, prod=conf["_env"] == "prod", @@ -313,7 +295,7 @@ def main(args): ) # Check that waiverdb announced the new waiver - lookup_results_datagrepper( + utils.lookup_results_datagrepper( base_url=conf["datagrepper"], name="waiverdb", topic=f"org.fedoraproject.{conf['_env']}.waiverdb.waiver.new", @@ -321,7 +303,7 @@ def main(args): ) # Check that greenwave reacted to the new waiver - lookup_results_datagrepper( + utils.lookup_results_datagrepper( base_url=conf["datagrepper"], name="greenwave", topic=f"org.fedoraproject.{conf['_env']}.greenwave.decision.update", @@ -329,13 +311,13 @@ def main(args): ) # Check the tag of the build -- build was waived, let is through - get_build_tags( + utils.get_build_tags( conf.get("koji_hub"), nevr, expected_ends=["f32"], ) - finalize(start) + utils.finalize(start) if __name__ == "__main__": diff --git a/utils.py b/utils.py index 4c3e8dd..afd629c 100644 --- a/utils.py +++ b/utils.py @@ -23,630 +23,645 @@ class MonitoringException(Exception): """The base class for all exceptions raised by this script.""" -def print_user(content, success=None): - """ Prints the specified content to the user. - """ - spaces = 90 - if success is not None: - end = None - if success: - content = "{} {}".format(content.ljust(spaces), "[DONE]") - else: - content = "{} {}".format(content.ljust(spaces), "[FAILED]") - else: - end = "\r" +class MonitoringUtils: - now = datetime.datetime.utcnow() - time = now.strftime("%H:%M:%S") - print(f"{time} - {content}", end=end, flush=True) + def __init__(self): + """ Instanciate the object. """ + self.logs = [] + def print_user(self, content, success=None): + """ Prints the specified content to the user. + """ + spaces = 90 + if success is not None: + end = None + if success: + content = "{} {}".format(content.ljust(spaces), "[DONE]") + else: + content = "{} {}".format(content.ljust(spaces), "[FAILED]") + else: + end = "\r" -def run_command(command, cwd=None): - """ Run the specified command in a specific working directory if one - is specified. - """ - output = None - try: - output = subprocess.check_output( - command, cwd=cwd, stderr=subprocess.PIPE - ) - except subprocess.CalledProcessError as e: - _log.error( - "Command `{}` return code: `{}`".format( - " ".join(command), e.returncode - ) - ) - _log.error("stdout:\n-------\n{}".format(e.stdout)) - _log.error("stderr:\n-------\n{}".format(e.stderr)) - raise MonitoringException("Command failed to run") + now = datetime.datetime.utcnow() + time = now.strftime("%H:%M:%S") + self.logs.append(f"{time} - {content}") + print(f"{time} - {content}", end=end, flush=True) - return output - -def clone_repo(command, namespace, name, folder): - """ Clone the specified git repo into the specified folder. - """ - info_log = f"Cloning the git repo: {namespace}/{name}" - print_user(info_log) - try: - run_command([command, "clone", f"{namespace}/{name}"], cwd=folder) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) + def clone_repo(self, command, namespace, name, folder): + """ Clone the specified git repo into the specified folder. + """ + info_log = f"Cloning the git repo: {namespace}/{name}" + self.print_user(info_log) + try: + run_command([command, "clone", f"{namespace}/{name}"], cwd=folder) + self.print_user(info_log, success=True) + except MonitoringException: + self.print_user(info_log, success=False) -def add_remote(name, url, folder): - """ Add the specified remote to the git repo in the folder with the - specified url. - """ - info_log = f"Adding remote: {name}" - print_user(info_log) - try: - run_command(["git", "remote", "add", name, url], cwd=folder) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) + def add_remote(self, name, url, folder): + """ Add the specified remote to the git repo in the folder with the + specified url. + """ + info_log = f"Adding remote: {name}" + self.print_user(info_log) + try: + run_command(["git", "remote", "add", name, url], cwd=folder) + self.print_user(info_log, success=True) + except MonitoringException: + self.print_user(info_log, success=False) -def switch_branch(command, name, folder): - """ Switch to the specified git branch in the specified git repo. - """ - info_log = f"Switching to branch: {name}" - print_user(info_log) - try: - run_command([command, "switch-branch", f"{name}"], cwd=folder) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) + def switch_branch(self, command, name, folder): + """ Switch to the specified git branch in the specified git repo. + """ + info_log = f"Switching to branch: {name}" + self.print_user(info_log) + try: + run_command([command, "switch-branch", f"{name}"], cwd=folder) + self.print_user(info_log, success=True) + except MonitoringException: + self.print_user(info_log, success=False) -def bump_release(name, folder): - """ Bump the release of the spec file the specified git repo. - """ - info_log = f"Bumping release of: {name}.spec" - print_user(info_log) - try: - run_command(["rpmdev-bumpspec", f"{name}.spec"], cwd=folder) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) + def bump_release(self, name, folder): + """ Bump the release of the spec file the specified git repo. + """ + info_log = f"Bumping release of: {name}.spec" + self.print_user(info_log) + try: + run_command(["rpmdev-bumpspec", f"{name}.spec"], cwd=folder) + self.print_user(info_log, success=True) + except MonitoringException: + self.print_user(info_log, success=False) -def commit_changes(commit_log, folder): - """ Commit all the changes made to *tracked* files in the git repo - with the specified commit log. - """ - info_log = f"Commiting changes" - print_user(info_log) - try: - run_command(["git", "commit", "-asm", commit_log], cwd=folder) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) + def commit_changes(self, commit_log, folder): + """ Commit all the changes made to *tracked* files in the git repo + with the specified commit log. + """ + info_log = f"Commiting changes" + self.print_user(info_log) + try: + run_command(["git", "commit", "-asm", commit_log], cwd=folder) + self.print_user(info_log, success=True) + except MonitoringException: + self.print_user(info_log, success=False) -def push_changes(folder, target, branch, force=False): - """ Push all changes using git. - """ - info_log = f"Pushing changes" - print_user(info_log) - try: - cmd = ["git", "push", target, branch] - if force: - cmd.append("-f") - run_command(cmd, cwd=folder) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) + def push_changes(self, folder, target, branch, force=False): + """ Push all changes using git. + """ + info_log = f"Pushing changes" + self.print_user(info_log) + try: + cmd = ["git", "push", target, branch] + if force: + cmd.append("-f") + run_command(cmd, cwd=folder) + self.print_user(info_log, success=True) + except MonitoringException: + self.print_user(info_log, success=False) -def pull_changes(folder, target, branch): - """ Pull all changes using git. - """ - info_log = f"Pushing changes" - print_user(info_log) - try: - cmd = ["git", "pull", "--rebase", target, branch] - run_command(cmd, cwd=folder) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) + def pull_changes(self, folder, target, branch): + """ Pull all changes using git. + """ + info_log = f"Pushing changes" + self.print_user(info_log) + try: + cmd = ["git", "pull", "--rebase", target, branch] + run_command(cmd, cwd=folder) + self.print_user(info_log, success=True) + except MonitoringException: + self.print_user(info_log, success=False) -def open_pullrequest(base_url, username, namespace, name, branch, token): - """ Open a pull-request from the user's fork to the main project for - the specified branch. - """ - info_log = f"Creating PR from forks/{username}/{namespace}/{name}" - print_user(info_log) - url = "/".join( - [base_url.rstrip("/"), "api/0", namespace, name, "pull-request/new"] - ) - data = { - "branch_to": branch, - "branch_from": branch, - "repo_from": name, - "repo_from_username": username, - "repo_from_namespace": namespace, - "initial_comment": "Testing PR", - "title": "Test PR for monitoring", - } - headers = {"Authorization": f"token {token}"} - req = requests.post(url=url, data=data, headers=headers) - if not req.ok: - print(req.text) - success = False - pr_id = None - pr_uid = None - else: - output = req.json() - pr_id = str(output["id"]) - pr_uid = output["uid"] + def open_pullrequest(self, base_url, username, namespace, name, branch, token): + """ Open a pull-request from the user's fork to the main project for + the specified branch. + """ + info_log = f"Creating PR from forks/{username}/{namespace}/{name}" + self.print_user(info_log) url = "/".join( - [base_url.rstrip("/"), namespace, name, "pull-request", pr_id] + [base_url.rstrip("/"), "api/0", namespace, name, "pull-request/new"] ) - info_log = f"PR created {url}" - success = True - print_user(info_log, success=success) - return (success, pr_id, pr_uid) - + data = { + "branch_to": branch, + "branch_from": branch, + "repo_from": name, + "repo_from_username": username, + "repo_from_namespace": namespace, + "initial_comment": "Testing PR", + "title": "Test PR for monitoring", + } + headers = {"Authorization": f"token {token}"} + req = requests.post(url=url, data=data, headers=headers) + if not req.ok: + print(req.text) + success = False + pr_id = None + pr_uid = None + else: + output = req.json() + pr_id = str(output["id"]) + pr_uid = output["uid"] + url = "/".join( + [base_url.rstrip("/"), namespace, name, "pull-request", pr_id] + ) + info_log = f"PR created {url}" + success = True + self.print_user(info_log, success=success) + return (success, pr_id, pr_uid) -def get_nevr(command, folder): - """ Get the name-epoch-version-release presently in git - """ - info_log = f"Getting nevr" - print_user(info_log) - try: - nevr = run_command([command, "verrel"], cwd=folder) - print_user(info_log, success=True) - return nevr.strip().decode("utf-8") - except MonitoringException: - print_user(info_log, success=False) + def get_nevr(self, command, folder): + """ Get the name-epoch-version-release presently in git + """ + info_log = f"Getting nevr" + self.print_user(info_log) + try: + nevr = run_command([command, "verrel"], cwd=folder) + self.print_user(info_log, success=True) + return nevr.strip().decode("utf-8") + except MonitoringException: + self.print_user(info_log, success=False) -def build_package(command, folder, target=None): - """ Build the package in the current branch - """ - info_log = f"Building the package" - print_user(info_log) - command = [command, "build"] - if target: - command.extend(["--target", target]) - try: - run_command(command, cwd=folder) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) + def build_package(self, command, folder, target=None): + """ Build the package in the current branch + """ + info_log = f"Building the package" + self.print_user(info_log) + command = [command, "build"] + if target: + command.extend(["--target", target]) + try: + run_command(command, cwd=folder) + self.print_user(info_log, success=True) + except MonitoringException: + self.print_user(info_log, success=False) + + + def chain_build_packages(self, command, packages, folder, target=None): + """ Chain-build the packages in the current branch + """ + if not isinstance(packages, list): + packages = [packages] + info_log = f"Chain-building the packages: {packages + [os.path.basename(folder)]}" + self.print_user(info_log) + command = [command, "chain-build"] + command.extend(packages) + if target: + command.extend(["--target", target]) + try: + run_command(command, cwd=folder) + self.print_user(info_log, success=True) + except MonitoringException: + self.print_user(info_log, success=False) -def chain_build_packages(command, packages, folder, target=None): - """ Chain-build the packages in the current branch - """ - if not isinstance(packages, list): - packages = [packages] - info_log = f"Chain-building the packages: {packages + [os.path.basename(folder)]}" - print_user(info_log) - command = [command, "chain-build"] - command.extend(packages) - if target: - command.extend(["--target", target]) - try: - run_command(command, cwd=folder) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) + def get_build_tags(self, koji_url, nevr, expected_ends): + """ List the tags associated with the specified build. + """ + # return + start = datetime.datetime.utcnow() + info_log = f"Retrieving koji tags" + self.print_user(info_log) + command = [ + "koji", + ] + if koji_url: + command.extend([ + "-s", + koji_url, + ]) + command.extend([ + "call", + "listTags", + nevr + ]) -def get_build_tags(koji_url, nevr, expected_ends): - """ List the tags associated with the specified build. - """ - # return - start = datetime.datetime.utcnow() - info_log = f"Retrieving koji tags" - print_user(info_log) - command = [ - "koji", - "-s", - koji_url, - "call", - "listTags", - nevr - ] - - success = False - tags = None - broke = False - while True: - try: - output = run_command(command) - output = output.decode('utf-8') + success = False + tags = None + broke = False + while True: try: - data = ast.literal_eval(output.strip()) - except Exception: - print("Could not decode JSON in:") - print(command) - print(output) - broke = True - break - tags = [tag.get("name") for tag in data] - for tag_name in tags: - for expectation in expected_ends: - if tag_name.endswith(expectation): - success = True + output = run_command(command) + output = output.decode('utf-8') + try: + data = ast.literal_eval(output.strip()) + except Exception: + print("Could not decode JSON in:") + print(command) + print(output) + broke = True + break + tags = [tag.get("name") for tag in data] + for tag_name in tags: + for expectation in expected_ends: + if tag_name.endswith(expectation): + success = True + broke = True + break + if success: broke = True break - if success: - broke = True + if broke: break - if broke: - break - if (datetime.datetime.utcnow() - start).seconds > (15 * 60): + if (datetime.datetime.utcnow() - start).seconds > (15 * 60): + success = False + info_log = f"Update for {nevr} not created within 15 minutes" + break + + # Only query koji every 30 seconds + time.sleep(30) + except MonitoringException: success = False - info_log = f"Update for {nevr} not created within 15 minutes" break - # Only query koji every 30 seconds - time.sleep(30) + info_log = f"Retrieving koji tags: {tags}" + self.print_user(info_log, success=success) + + + def create_update(self, command, item, prod=True, username=None, password=None, from_tag=False): + """ Create the update for the package built. + """ + info_log = f"Creating a bodhi update" + self.print_user(info_log) + command = [ + command, + "updates", + "new", + "--notes", + "Bump release to test CI", + "--type", + "bugfix", + "--autotime", + ] + if from_tag: + command.append("--from-tag") + command.append(item) + + if not prod: + command.append("--staging") + if username: + command.extend(["--user", username]) + if password: + command.extend(["--password", password]) + try: + run_command(command) + self.print_user(info_log, success=True) except MonitoringException: - success = False - break + self.print_user(info_log, success=False) - info_log = f"Retrieving koji tags: {tags}" - print_user(info_log, success=success) + def get_update_id(self, nevr, url): + """ Retrieve the update identifier from bodhi for the given nevr. """ + start = datetime.datetime.utcnow() + info_log = f"Retrieving update created" + self.print_user(info_log) + url = f"{url}/updates/?builds={nevr}" + updateid = None + success = True + while True: + req = requests.get(url) + data = req.json() + if data["updates"]: + updateid = data["updates"][0]["updateid"] + if updateid: + break -def create_update(command, item, prod=True, username=None, password=None, from_tag=False): - """ Create the update for the package built. - """ - info_log = f"Creating a bodhi update" - print_user(info_log) - command = [ - command, - "updates", - "new", - "--notes", - "Bump release to test CI", - "--type", - "bugfix", - "--autotime", - ] - if from_tag: - command.append("--from-tag") - command.append(item) - - if not prod: - command.append("--staging") - if username: - command.extend(["--user", username]) - if password: - command.extend(["--password", password]) - try: - run_command(command) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) - - -def get_update_id(nevr, url): - """ Retrieve the update identifier from bodhi for the given nevr. """ - start = datetime.datetime.utcnow() - info_log = f"Retrieving update created" - print_user(info_log) - url = f"{url}/updates/?builds={nevr}" - updateid = None - success = True - while True: - req = requests.get(url) - data = req.json() - if data["updates"]: - updateid = data["updates"][0]["updateid"] - if updateid: - break - - if (datetime.datetime.utcnow() - start).seconds > (15 * 60): - success = False - info_log = f"Update for {nevr} not created within 15 minutes" - break + if (datetime.datetime.utcnow() - start).seconds > (15 * 60): + success = False + info_log = f"Update for {nevr} not created within 15 minutes" + break - # Only query bodhi every 30 seconds - time.sleep(30) + # Only query bodhi every 30 seconds + time.sleep(30) - print_user(info_log, success=success) - return updateid + self.print_user(info_log, success=success) + return updateid + + + def lookup_results_datagrepper( + self, base_url, name, topic, nevr=None, nevrs=None, rev=None, + bodhi_id=None, start=None, duration=15 + ): + """ Check the CI results in datagrepper for results about our specified + build. + """ + if start is None: + start = datetime.datetime.utcnow() + info_log = f"Checking datagrepper for {name} messages" + self.print_user(info_log) + # Start pulling messages 10 minutes before now + start_time = start - datetime.timedelta(minutes=10) + # Limiting the number of row per page to 10 allows for quicker results + url = ( + base_url + f"?topic={topic}" + f"&start={start_time.timestamp()}&row_per_page=10" + ) + success = None + returned_status = None + info_log = None + nevrs = nevrs or [] + while True: + # We're assuming here that there won't be more than 100 messages for + # that topic coming in between the one we're interested in and when we + # are looking for it (10*10 == 100) + for page in range(1, 11): + end_url = url + end_url += f"&page={page}" + data = requests.get(end_url).json() + for message in data["raw_messages"]: + + # Old message format from the CI pipeline + if "ci.pipeline" in message["topic"] and ( + message["msg"]["nvr"] == nevr + or message["msg"]["nvr"] in nevrs + or message["msg"]["rev"] == rev + ): + success = True + returned_status = message["msg"]["status"] + break -def lookup_results_datagrepper( - base_url, name, topic, nevr=None, nevrs=None, rev=None, bodhi_id=None, - start=None, duration=15 -): - """ Check the CI results in datagrepper for results about our specified - build. - """ - if start is None: - start = datetime.datetime.utcnow() - info_log = f"Checking datagrepper for {name} messages" - print_user(info_log) - # Start pulling messages 10 minutes before now - start_time = start - datetime.timedelta(minutes=10) - # Limiting the number of row per page to 10 allows for quicker results - url = ( - base_url + f"?topic={topic}" - f"&start={start_time.timestamp()}&row_per_page=10" - ) - - success = None - returned_status = None - info_log = None - nevrs = nevrs or [] - while True: - # We're assuming here that there won't be more than 100 messages for - # that topic coming in between the one we're interested in and when we - # are looking for it (10*10 == 100) - for page in range(1, 11): - end_url = url - end_url += f"&page={page}" - data = requests.get(end_url).json() - for message in data["raw_messages"]: - - # Old message format from the CI pipeline - if "ci.pipeline" in message["topic"] and ( - message["msg"]["nvr"] == nevr - or message["msg"]["nvr"] in nevrs - or message["msg"]["rev"] == rev - ): - success = True - returned_status = message["msg"]["status"] - break + # New message format from the CI pipeline for koji builds + if ( + "ci.koji-build" in message["topic"] and + message["msg"]["artifact"]["nvr"] == nevr + ): + if message["topic"].endswith("test.complete"): + success = True + returned_status = message["msg"]["test"]["result"] + elif message["topic"].endswith("test.error"): + success = True + returned_status = "error" + elif message["topic"].endswith("test.running"): + success = True + returned_status = "running" + break - # New message format from the CI pipeline for koji builds - if ( - "ci.koji-build" in message["topic"] and - message["msg"]["artifact"]["nvr"] == nevr - ): - if message["topic"].endswith("test.complete"): - success = True - returned_status = message["msg"]["test"]["result"] - elif message["topic"].endswith("test.error"): - success = True - returned_status = "error" - elif message["topic"].endswith("test.running"): - success = True - returned_status = "running" - break + # New message format from the CI pipeline for dist-git PR + if ( + "ci.dist-git-pr" in message["topic"] + and message["msg"]["artifact"]["type"] == "pull-request" + and message["msg"]["artifact"]["uid"] == rev + ): + if message["topic"].endswith("test.complete"): + success = True + returned_status = message["msg"]["test"]["result"] + elif message["topic"].endswith("test.error"): + success = True + returned_status = "error" + elif message["topic"].endswith("test.running"): + success = True + returned_status = "running" + break - # New message format from the CI pipeline for dist-git PR - if ( - "ci.dist-git-pr" in message["topic"] - and message["msg"]["artifact"]["type"] == "pull-request" - and message["msg"]["artifact"]["uid"] == rev - ): - if message["topic"].endswith("test.complete"): - success = True - returned_status = message["msg"]["test"]["result"] - elif message["topic"].endswith("test.error"): + # resultsdb messages + if ( + "resultsdb" in message["topic"] + and "nvr" in message["msg"]["data"] + and (nevr in message["msg"]["data"]["nvr"] + or message["msg"]["data"]["nvr"] in nevrs) + ): success = True - returned_status = "error" - elif message["topic"].endswith("test.running"): - success = True - returned_status = "running" - break - - # resultsdb messages - if ( - "resultsdb" in message["topic"] - and "nvr" in message["msg"]["data"] - and (nevr in message["msg"]["data"]["nvr"] - or message["msg"]["data"]["nvr"] in nevrs) - ): - success = True - returned_status = message["msg"]["outcome"] - break + returned_status = message["msg"]["outcome"] + break - # greenwave messages - if ( - "greenwave" in message["topic"] - and ( - message["msg"]["subject_identifier"] == nevr - or - message["msg"]["subject_identifier"] in nevrs - ) - ): - success = True - returned_status = message["msg"]["policies_satisfied"] - break + # greenwave messages + if ( + "greenwave" in message["topic"] + and ( + message["msg"]["subject_identifier"] == nevr + or + message["msg"]["subject_identifier"] in nevrs + ) + ): + success = True + returned_status = message["msg"]["policies_satisfied"] + break - # waiverdb messages - if ( - "waiverdb" in message["topic"] - and ( - message["msg"]["subject_identifier"] == nevr - or - message["msg"]["subject_identifier"] in nevrs - ) - ): - success = True - returned_status = "" - break + # waiverdb messages + if ( + "waiverdb" in message["topic"] + and ( + message["msg"]["subject_identifier"] == nevr + or + message["msg"]["subject_identifier"] in nevrs + ) + ): + success = True + returned_status = "" + break - # bodhi messages - if ( - "bodhi.update.status.testing" in message["topic"] - and message["msg"]["artifact"]["id"].startswith(bodhi_id) - ): - success = True - returned_status = "" + # bodhi messages + if ( + "bodhi.update.status.testing" in message["topic"] + and message["msg"]["artifact"]["id"].startswith(bodhi_id) + ): + success = True + returned_status = "" + break + if success is not None: break if success is not None: break - if success is not None: - break - if (datetime.datetime.utcnow() - start).seconds > (duration * 60): - success = False - info_log = f"{name} results not found in datagrepper" - break + if (datetime.datetime.utcnow() - start).seconds > (duration * 60): + success = False + info_log = f"{name} results not found in datagrepper" + break - # Only query datagrepper every 30 seconds - time.sleep(30) + # Only query datagrepper every 30 seconds + time.sleep(30) - if info_log is None: - info_log = f"{name} results in datagrepper returned {returned_status}" + if info_log is None: + info_log = f"{name} results in datagrepper returned {returned_status}" - end = datetime.datetime.utcnow() - info_log += f" - ran for: {(end - start).seconds}s" - print_user(info_log, success=success) + end = datetime.datetime.utcnow() + info_log += f" - ran for: {(end - start).seconds}s" + self.print_user(info_log, success=success) -def lookup_ci_resultsdb(nevr, name, url): - """ Check the CI results in the specified resultsdb for results about - our specified build. - """ - start = datetime.datetime.utcnow() - info_log = f"Checking {name} for CI results " - print_user(info_log) - topic = "org.centos.prod.ci.pipeline.allpackages-build.complete" - if ".stg" in url: - topic = "org.centos.stage.ci.pipeline.allpackages-build.complete" - url = f"{url}?testcases={topic}" - - success = False - returned_status = None - info_log = None - while True: - # Assume we won't have more than 3 pages of results coming in b/w - # our checks - for page in [0, 1, 2]: - end_url = url - end_url += f"&page={page}" - data = requests.get(end_url).json() - for result in data["data"]: - if nevr in result["data"]["nvr"]: - success = True - returned_status = result["data"]["status"][0] + def lookup_ci_resultsdb(self, nevr, name, url): + """ Check the CI results in the specified resultsdb for results about + our specified build. + """ + start = datetime.datetime.utcnow() + info_log = f"Checking {name} for CI results " + self.print_user(info_log) + topic = "org.centos.prod.ci.pipeline.allpackages-build.complete" + if ".stg" in url: + topic = "org.centos.stage.ci.pipeline.allpackages-build.complete" + url = f"{url}?testcases={topic}" + + success = False + returned_status = None + info_log = None + while True: + # Assume we won't have more than 3 pages of results coming in b/w + # our checks + for page in [0, 1, 2]: + end_url = url + end_url += f"&page={page}" + data = requests.get(end_url).json() + for result in data["data"]: + if nevr in result["data"]["nvr"]: + success = True + returned_status = result["data"]["status"][0] + break + if success: break if success: break - if success: - break - if (datetime.datetime.utcnow() - start).seconds > (15 * 60): - success = False - info_log = f"CI results did not show in {name} for {nevr} within 15 minutes" - break + if (datetime.datetime.utcnow() - start).seconds > (15 * 60): + success = False + info_log = f"CI results did not show in {name} for {nevr} within 15 minutes" + break - # Only query datagrepper every 30 seconds - time.sleep(30) + # Only query datagrepper every 30 seconds + time.sleep(30) - if info_log is None: - info_log = f"CI results in {name} returned {returned_status}" + if info_log is None: + info_log = f"CI results in {name} returned {returned_status}" + + end = datetime.datetime.utcnow() + info_log += f" - ran for: {(end - start).seconds}s" + self.print_user(info_log, success=success) + + + def waive_update(self, command, updateid, prod=True, username=None, password=None): + """ Waive all the tests results for the specified update using bodhi's + CLI. + """ + info_log = f"Waiving test results for bodhi update" + self.print_user(info_log) + command = [ + command, + "updates", + "waive", + updateid, + "'This is fine, we are testing the workflow'", + "--debug", + ] + if not prod: + command.append("--staging") + if username: + command.extend(["--user", username]) + if password: + command.extend(["--password", password]) + try: + run_command(command) + self.print_user(info_log, success=True) + except MonitoringException: + self.print_user(info_log, success=False) + + + def get_pr_flag( + self, + base_url, + username, + namespace, + name, + pr_id, + flag_username, + flag_status, + duration=10, + ): + """ Retrieve the flags of the PR and assert the last one from the + specified flag_username has the given status. + """ + pr = "/".join([namespace, name, "pull-request", pr_id]) + info_log = f"Retreiving flags for PR: {pr}" + self.print_user(info_log) + url = "/".join([base_url.rstrip("/"), "api/0", pr, "flag"]) - end = datetime.datetime.utcnow() - info_log += f" - ran for: {(end - start).seconds}s" - print_user(info_log, success=success) + start = datetime.datetime.utcnow() + success = False + while True: + try: + req = requests.get(url=url) + except requests.exceptions.ConnectionError: + continue -def waive_update(command, updateid, prod=True, username=None, password=None): - """ Waive all the tests results for the specified update using bodhi's - CLI. - """ - info_log = f"Waiving test results for bodhi update" - print_user(info_log) - command = [ - command, - "updates", - "waive", - updateid, - "'This is fine, we are testing the workflow'", - "--debug", - ] - if not prod: - command.append("--staging") - if username: - command.extend(["--user", username]) - if password: - command.extend(["--password", password]) - try: - run_command(command) - print_user(info_log, success=True) - except MonitoringException: - print_user(info_log, success=False) - - -def get_pr_flag( - base_url, - username, - namespace, - name, - pr_id, - flag_username, - flag_status, - duration=10, -): - """ Retrieve the flags of the PR and assert the last one from the - specified flag_username has the given status. - """ - pr = "/".join([namespace, name, "pull-request", pr_id]) - info_log = f"Retreiving flags for PR: {pr}" - print_user(info_log) - url = "/".join([base_url.rstrip("/"), "api/0", pr, "flag"]) + if req.ok: + break - start = datetime.datetime.utcnow() - success = False + if (datetime.datetime.utcnow() - start).seconds > (duration * 60): + success = False + info_log = f"Failed to retrieve flags for PR: {pr}" + break - while True: - try: - req = requests.get(url=url) - except requests.exceptions.ConnectionError: - continue + # Only query pagure every 30 seconds + time.sleep(30) - if req.ok: - break + if not req.ok: + print(req.text) + self.logs.append(f"Error retrieving PR flags: {req.text}") + raise MonitoringException("Error retrieving PR flags") + else: + output = req.json() + for flag in output["flags"]: + if flag["username"] == flag_username: + info_log = f"Retreived flag {flag['status']} on PR" + success = flag["status"] == flag_status + break + + self.print_user(info_log, success=success) - if (datetime.datetime.utcnow() - start).seconds > (duration * 60): - success = False - info_log = f"Failed to retrieve flags for PR: {pr}" - break - - # Only query pagure every 30 seconds - time.sleep(30) - - if not req.ok: - print(req.text) - raise MonitoringException("Error retrieving PR flags") - else: - output = req.json() - for flag in output["flags"]: - if flag["username"] == flag_username: - info_log = f"Retreived flag {flag['status']} on PR" - success = flag["status"] == flag_status - break - print_user(info_log, success=success) + def merge_pr(self, base_url, username, namespace, name, pr_id, token): + """ Merge the specified PR + """ + pr = "/".join([namespace, name, "pull-request", pr_id]) + info_log = f"Merge PR: {pr}" + self.print_user(info_log) + url = "/".join([base_url.rstrip("/"), "api/0", pr, "merge"]) + headers = {"Authorization": f"token {token}"} + req = requests.post(url=url, data={"wait": True}, headers=headers) + success = False + if not req.ok: + print(req.text) + self.logs(f"Error Merging flags: {req.text}") + raise MonitoringException("Error merging flags") + else: + success = True + self.print_user(info_log, success=success) -def merge_pr(base_url, username, namespace, name, pr_id, token): - """ Merge the specified PR - """ - pr = "/".join([namespace, name, "pull-request", pr_id]) - info_log = f"Merge PR: {pr}" - print_user(info_log) - url = "/".join([base_url.rstrip("/"), "api/0", pr, "merge"]) - headers = {"Authorization": f"token {token}"} - req = requests.post(url=url, data={"wait": True}, headers=headers) - success = False - if not req.ok: - print(req.text) - raise MonitoringException("Error merging flags") - else: - success = True - print_user(info_log, success=success) + def finalize(self, start): + """ End data returned. """ + end = datetime.datetime.utcnow() + delta = (end - start).seconds + self.logs.append(f"Ran for {delta} seconds ({delta/60:.2f} minutes)") + print(f"Ran for {delta} seconds ({delta/60:.2f} minutes)") -def finalize(start): - """ End data returned. """ - end = datetime.datetime.utcnow() - delta = (end - start).seconds - print(f"Ran for {delta} seconds ({delta/60:.2f} minutes)") +def run_command(command, cwd=None): + """ Run the specified command in a specific working directory if one + is specified. + """ + output = None + try: + output = subprocess.check_output( + command, cwd=cwd, stderr=subprocess.PIPE + ) + except subprocess.CalledProcessError as e: + _log.error( + "Command `{}` return code: `{}`".format( + " ".join(command), e.returncode + ) + ) + _log.error("stdout:\n-------\n{}".format(e.stdout)) + _log.error("stderr:\n-------\n{}".format(e.stderr)) + raise MonitoringException("Command failed to run") + return output