From 3b1fbabd35cad3537da5e3bf6a171a3a08a38518 Mon Sep 17 00:00:00 2001 From: Martin Krizek Date: Nov 11 2011 11:50:10 +0000 Subject: Use standard logging facilities This patch adds support for logging messages from the autoqa script and watchers. There are two new cmd line options controlling what's printed out on stdout: --debug (print everything) and --quiet (print only error messages). --verbose option was removed and is now default one. Everything is logged into the log file (specified in autoqa.conf) except debug messages. Logging debug messages into the log file can be turned on by setting debug option in autoqa.conf. Fixes: #196 --- diff --git a/autoqa b/autoqa index 619a5b9..561ee52 100755 --- a/autoqa +++ b/autoqa @@ -32,6 +32,8 @@ import copy import fnmatch import traceback from subprocess import call +import logging +import autoqa.logger from autoqa.config import autoqa_conf, getbool # Hardcoded defaults for the 'general' section @@ -90,14 +92,13 @@ def prep_controlfile(controlfile, extradata, arch): return name def maybe_call(cmdlist, dryrun=False, verbose=False): - if dryrun or verbose: - print ' '.join(cmdlist) if dryrun: + print ' '.join(cmdlist) return 0 try: return call(cmdlist) except OSError, e: - print "Error: Command failed: %s\n %s" % (' '.join(cmdlist), e) + logging.error("Command failed: %s\n %s" % (' '.join(cmdlist), e)) return None def schedule_job(controlfile, required_arch=None, email=None, name=None, dryrun=False, labels=[]): @@ -184,7 +185,7 @@ def resolve_tests(opts, autoqa_args, event, parser): try: test_vars[test] = eval_test_vars(test, default_test_vars) except StandardError, e: - print "Error: Can't evaluate test '%s': %s" % (test, e) + logging.error("Can't evaluate test '%s': %s" % (test, e)) traceback.print_tb(sys.exc_traceback) tests.remove(test) testlist = [test for test,vars in test_vars.iteritems() if vars['execute'] == True] @@ -202,7 +203,7 @@ def resolve_tests(opts, autoqa_args, event, parser): def parse_params(): # Sanity check our installation if not os.path.isdir(conf['eventdir']): - print "Can't find events in %s. Check your installation." % conf['eventdir'] + logging.error("Can't find events in %s. Check your installation." % conf['eventdir']) sys.exit(1) # known events = dirs in eventdir # FIXME - would be nice to have some way to determine what makes a event valid @@ -211,34 +212,45 @@ def parse_params(): # Set up the option parser parser = optparse.OptionParser(usage="%prog EVENT [options] ...", add_help_option=False) + parser.add_option('-h', '--help', action='help', help='show this help message (or event help message if EVENT given) and \ exit') + parser.add_option('-a', '--arch', action='append', default=[], help="arch to run the test(s) on; can be used multiple times; by default \ 'noarch' arch is used") + parser.add_option('-t', '--test', action='append', help="run only the given test(s) instead of all relevant ones; can be used \ multiple times; if you specify a test that wouldn't be run by default it will \ be forced to run") + # XXX --skiptest/--exclude? parser.add_option('--keep-control-file', action='store_true', help='do not delete generated control files') + parser.add_option('--dryrun', '--dry-run', action='store_true', dest='dryrun', help='do not actually execute commands, just show what would be done \ (implies --keep-control-file)') + parser.add_option('--local', action='store_true', dest='local', help='do not schedule jobs - run test(s) directly on the local machine') + parser.add_option('-l', '--list-tests', action='store_true', dest='listtests', help='list the tests for the given event - do not run any tests') + parser.add_option('--autotest-server', action='store', default=None, help='sets the autotest-server hostname used for creating URLs to results;\ hostname of the local machine is used by default') + + autoqa.logger.add_options(parser) + # Read and validate the event name # Check for no args, or just -h/--help if len(sys.argv) == 1 or sys.argv[1] in ('-h', '--help'): parser.print_help() - print 'No event name given.\nKnown events: %s' % ' '.join(known_events) + logging.error('No event given.\nKnown events: %s' % ' '.join(known_events)) parser.exit() event_name = sys.argv[1] if not event_name in known_events: @@ -253,6 +265,9 @@ def parse_params(): (opts, args) = parser.parse_args() args.pop(0) # dump event name + # Set up a logger + autoqa.logger.setup_logger(file_logger=not opts.dryrun, opts=opts) + # Run the tests locally, or schedule them through autotest? run_local = (opts.local or getbool(conf['local'])) @@ -284,18 +299,19 @@ def main(): # Print testlist, if requested if opts.listtests: - print ' '.join(testlist) + logging.info(' '.join(testlist)) sys.exit(0) # We're ready to run/queue tests now. exit_status = 0 for test in testlist: template = os.path.join(conf['testdir'], test, 'control') + for arch in test_vars[test]['archs']: try: control = prep_controlfile(template, test_vars[test]['autoqa_args'], arch) except IOError, e: - print "WARNING: could not process control file for %s: %s" % (test, str(e)) + logging.warn("Could not process control file for %s: %s" % (test, str(e))) continue testname='%s:%s.%s' % (event, test, arch) email = conf['notification_email'] @@ -308,11 +324,11 @@ def main(): required_arch=arch, dryrun=opts.dryrun, labels=test_vars[test]['labels']) if retval != 0: - print "ERROR: failed to schedule job %s" % testname + logging.error("Failed to schedule job %s" % testname) exit_status = 10 if opts.keep_control_file or opts.dryrun: - print "keeping %s at user request" % control + logging.info("Keeping %s at user request" % control) else: os.remove(control) diff --git a/conf/autoqa.conf b/conf/autoqa.conf index 792357c..d1eddfc 100644 --- a/conf/autoqa.conf +++ b/conf/autoqa.conf @@ -23,6 +23,13 @@ local = false # test results. By default current hostname is used. autotest_server = +# Path containing log file for the autoqa script and watchers +logfile = /var/log/autoqa.log + +# Set to "true" if you want to have debug messages from the autoqa script and +# watchers logged in the logfile +debug = false + [notifications] # The 'notifications' section controls which notifications about which events diff --git a/lib/autoqa/logger.py b/lib/autoqa/logger.py new file mode 100644 index 0000000..5de7392 --- /dev/null +++ b/lib/autoqa/logger.py @@ -0,0 +1,107 @@ +#!/usr/bin/python +# +# Copyright 2011, Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Author: Martin Krizek + +'''This is a simple module providing a logger to autoqa script and watchers.''' + +import logging +import ConfigParser +import optparse +from autoqa.config import getbool, autoqa_conf + +def _get_level(opts): + ''' + Get log level according to the cmd line options. + + Args: + opts - optparse.Values object + ''' + loglevel = logging.INFO # verbose is default + if opts.debug: + loglevel = logging.DEBUG + elif opts.quiet: + loglevel = logging.ERROR + return loglevel + + +def add_options(parser): + ''' + Add command line options controlling log level. + + Args: + parser - optparse.OptionParser instance + ''' + log = optparse.OptionGroup(parser, "Verbose options") + log.add_option('-d', '--debug', action="store_true", + help='Show debug messages') + log.add_option('-q', '--quiet', action="store_true", + help='Suppress messages except errors') + parser.add_option_group(log) + + +def setup_logger(level=logging.INFO, file_logger=True, opts=None): + ''' Return a logger instance. + + Args: + level - minimum log level to be printed, provide + one of @attr logging.{DEBUG,INFO,WARN,ERROR,CRITICAL} + file_logger - indicates whether write into the log file or not + opts - optparse.Values object. If provided, the chosen log level + will be parsed from it and @param level will be ignored. + See also @method add_options. + ''' + + if opts is not None: + level = _get_level(opts) + + root = logging.getLogger() + root.setLevel(logging.NOTSET) # clear the existing value + + # Handler for standard output + # StreamHandler uses default formatter - just plain message + sh = logging.StreamHandler() + sh.setLevel(level) + root.addHandler(sh) + + if not file_logger: + return + + # Handler for a log file + # Everything is logged in the log file except debug msgs + # unless debug option is set in autoqa.conf, then also + # debug msgs are logged. + try: + file_lvl = logging.DEBUG if getbool(autoqa_conf.get('general', 'debug')) else logging.INFO + logfile = autoqa_conf.get('general', 'logfile') + except ConfigParser.Error, e: + # default, in case autoqa.conf is broken + file_lvl = logging.INFO + logfile = '/var/log/autoqa.log' + + try: + fh = logging.FileHandler(logfile) + except IOError, e: + logging.warning("%s" % e) + return + fh.setLevel(file_lvl) + log_format = '[%(filename)s:%(lineno)d] ' if level == logging.DEBUG else '%(module)-12s ' + log_format += '%(asctime)s %(levelname)-7s %(message)s' + formatter = logging.Formatter(fmt=log_format, datefmt='%Y-%m-%d %H:%M:%S') + fh.setFormatter(formatter) + root.addHandler(fh) diff --git a/watchers/compose/watcher.py b/watchers/compose/watcher.py index 57d8c4a..6c8123f 100755 --- a/watchers/compose/watcher.py +++ b/watchers/compose/watcher.py @@ -29,14 +29,20 @@ import subprocess from autoqa.repoinfo import repoinfo import optparse from autoqa.config import getbool +import logging +import autoqa.logger # Set up the option parser parser = optparse.OptionParser(description='A utility to watch a set of \ compose trees for changes and to kick off tests if the compose changes.') parser.add_option('--dryrun', '--dry-run', action='store_true', help='Do not actually execute commands, just show what would be done') +autoqa.logger.add_options(parser) (opts, args) = parser.parse_args() +# Set up a logger +autoqa.logger.setup_logger(file_logger=not opts.dryrun, opts=opts) + # Set the default arch to our placeholder '$ARCH' repoinfo.setarch('$ARCH') @@ -75,7 +81,7 @@ for reponame in watchcomposes: remote = urlgrabber.urlread(urlpath) # XXX maybe less broad exception? except Exception, e: - print "Error from %s: %s" % (urlpath, e) + logging.error("Error from %s: %s" % (urlpath, e)) continue # Now compare the remote to the cache if cache != remote: @@ -91,7 +97,7 @@ for reponame, arches in sorted(testable.items()): '--arch', arch] harnesscall.append(repo['url'].replace('$ARCH', arch)) if opts.dryrun: - print ' '.join(harnesscall) + logging.info(' '.join(harnesscall)) continue # update cache diff --git a/watchers/git-post-receive/autoqa-git-hook.wsgi b/watchers/git-post-receive/autoqa-git-hook.wsgi index 8dae59a..214638a 100755 --- a/watchers/git-post-receive/autoqa-git-hook.wsgi +++ b/watchers/git-post-receive/autoqa-git-hook.wsgi @@ -8,12 +8,15 @@ import time import subprocess import urlgrabber import logging -# XXX when completed, import autoqa.logger +import autoqa.logger from autoqa.repoinfo import repoinfo from autoqa.config import autoqa_conf from wsgiref.validate import validator from wsgiref.simple_server import make_server +# Set up a logger +autoqa.logger.setup_logger(logging.INFO, True) + # Setup a cache path cachedir = '/var/cache/autoqa/git-post-receive' try: diff --git a/watchers/koji-bodhi/watcher.py b/watchers/koji-bodhi/watcher.py index 13a426c..17c66b1 100755 --- a/watchers/koji-bodhi/watcher.py +++ b/watchers/koji-bodhi/watcher.py @@ -29,6 +29,8 @@ import time, calendar import pickle import subprocess import optparse +import logging +import autoqa.logger from autoqa import koji_utils from autoqa import bodhi_utils from autoqa.repoinfo import repoinfo @@ -41,23 +43,20 @@ class KojiWatcher(object): cachedir = '/var/cache/autoqa/watch-koji-builds/' cachefile = os.path.join(cachedir, 'koji_history_prevtimes.cache') - def __init__(self, verbose = False, dry_run = False, prevtime = None, only_tags = []): + def __init__(self, dry_run = False, prevtime = None, only_tags = []): self.dry_run = dry_run - self.verbose = verbose if not os.path.isdir(self.cachedir): - if self.verbose: print "Initializing cache" + logging.info("Initializing cache") os.makedirs(self.cachedir) - if self.verbose: - print "Connecting to koji:", self.kojiserver + logging.info("Connecting to koji: %s" % self.kojiserver) self.session = koji_utils.SimpleKojiClientSession(self.kojiserver, self.kojiopts) self.session.check_connection() # If user did not set the -prevtime parameter, use cached if prevtime is None: - if self.verbose: - print "Loading prevtimes from cache" + logging.info("Loading prevtimes from cache") self.default_prevtime = calendar.timegm(time.gmtime()) - 3*60*60 # now - 3 hours in UTC self.prevtimes = self._load_prevtimes_from_cache() # returns dict # self.prevtimes is a dict. if the respective key is not found, @@ -67,8 +66,7 @@ class KojiWatcher(object): else: self.default_prevtime = prevtime self.prevtimes = {} # if this is empty, default_prevtime is used (see list_new_builds) - if self.verbose: - print "Loading taglist" + logging.info("Loading taglist") self.taglist, self.master_repos, self.pending_repos = self._load_repoinfo(only_tags) # returns tuple of sets def _load_repoinfo(self, only_tags=[]): @@ -126,8 +124,7 @@ class KojiWatcher(object): taglist.add(tagname) pending_repos.add(tagname) - if self.verbose: - print "\ntaglist:", sorted(taglist), "\n\n", "pending_repos:", sorted(pending_repos), "\n" + logging.info("\ntaglist: %s\n\n pending_repos: %s\n", sorted(taglist), sorted(pending_repos)) return taglist, master_repos, pending_repos def _load_prevtimes_from_cache(self): @@ -164,19 +161,15 @@ class KojiWatcher(object): """ updates = [] # get history from koji & filter everything newer than prevtime - if self.verbose: - print " Fetching tagHistory from koji" + logging.info(" Fetching tagHistory from koji") updates = filter(lambda pkg: pkg['create_ts'] > prevtime, self.session.tagHistory(tag = tag)) - if self.verbose: - print " Sorting according to create_ts" + logging.info(" Sorting according to create_ts") updates.sort(key = lambda pkg: pkg['create_ts']) # sanity_check - filter just "active" builds - if self.verbose: - print " Filtering out non-active builds" + logging.info(" Filtering out non-active builds") non_active = filter(lambda pkg: pkg['active'] is None, updates) #TODO: print non-active builds to stderr - these skipped our attention somehow - if self.verbose: - print " Filtering active builds" + logging.info(" Filtering active builds") updates = filter(lambda pkg: pkg['active'] is not None, updates) # update the appropriate prevtime in self.prevtimes @@ -185,8 +178,7 @@ class KojiWatcher(object): # sadly the tagHistory does not all the required information # we need to run the tests, so let's fetch the 'good' info - if self.verbose: - print " Getting real info about builds" + logging.info(" Getting real info about builds") for i in range(len(updates)): b = updates[i] updates[i] = self.session.getBuild("%s-%s-%s" % (b['name'], b['version'], b['release'])) @@ -215,32 +207,26 @@ class KojiWatcher(object): for tag in taglist: tagged_builds[tag] = [] - if self.verbose: - print " Fetching builds since:", prevtime + logging.info(" Fetching builds since: %s", prevtime) builds = self.session.list_builds_since(prevtime) try: builds += self.prevtimes['untagged_builds'] # untagged builds from previous run - if self.verbose: - print " Adding previously stored untagged builds" + logging.info(" Adding previously stored untagged builds") except KeyError: # no previously stored undagged builds, pass - if self.verbose: - print " No previously stored untagged builds" + logging.info(" No previously stored untagged builds") pass - if self.verbose: - print " Sorting according to completion_ts" + logging.info(" Sorting according to completion_ts") builds.sort(key = lambda pkg: pkg['completion_ts']) try: self.prevtimes['updates_candidate'] = builds[-1]['completion_ts'] except IndexError: - if self.verbose: - print " No new builds since last check" + logging.info(" No new builds since last check") pass - if self.verbose: - print " Assorting according to tags" + logging.info(" Assorting according to tags") for nvr in builds: tags = set(self.session.tag_history(nvr)) if tags: @@ -250,8 +236,7 @@ class KojiWatcher(object): elif not self.session.build_failed(nvr): # No tag yet but the build is still OK - check again later untagged_builds.append(nvr) - if self.verbose: - print " Assorting done" + logging.info(" Assorting done") self.prevtimes['untagged_builds'] = untagged_builds @@ -285,29 +270,25 @@ class KojiWatcher(object): # ... taglist = list(self.taglist) for tag in filter(lambda x: x.endswith("-pending"), taglist): - if self.verbose: - print "Checking", tag + logging.info("Checking %s" % tag) try: prevtime = self.prevtimes[tag] except KeyError: prevtime = self.default_prevtime - if self.verbose: - print 'Searching for new builds since %s UTC' % time.asctime(time.gmtime(prevtime)) + logging.info('Searching for new builds since %s UTC' % time.asctime(time.gmtime(prevtime))) history[tag] = self.new_builds_since(tag, prevtime) # checking the non-pending tags the old way taglist = filter(lambda x: not x.endswith("-pending"), taglist) - if self.verbose: - print "Checking the old way:", sorted(taglist) + logging.info("Checking the old way: %s" % sorted(taglist)) try: prevtime = self.prevtimes['updates_candidate'] except KeyError: prevtime = self.default_prevtime - if self.verbose: - print 'Searching for new builds since %s UTC' % time.asctime(time.gmtime(prevtime)) + logging.info('Searching for new builds since %s UTC' % time.asctime(time.gmtime(prevtime))) # fetch the new builds new_builds = self.new_builds_in_updates_candidate(set(taglist), prevtime) # and store them to the history dict the 'right' way @@ -335,8 +316,7 @@ class KojiWatcher(object): # schedule koji events for tag in sorted(new_builds.keys()): bodhi_skip = [] - if self.verbose: - print "Scheduling tests for", tag + logging.info("Scheduling tests for %s" % tag) for b in new_builds[tag]: # Get a list of all package arches in this build arches = [r['arch'] for r in self.session.listRPMs(b['build_id'])] @@ -356,14 +336,13 @@ class KojiWatcher(object): # the autoqa bodhi-update events for this particular nvrs if b['nvr'] in bodhi_skip: - if self.verbose: - print "Skipping %s since it's a part of already scheduled bodhi update" % b['nvr'] + logging.info("Skipping %s since it's a part of already scheduled bodhi update" % b['nvr']) continue bodhi_info = bodhi_utils.query_update(b['nvr']) if bodhi_info is None: - print >> sys.stderr, "Update %s not found in Bodhi" % b['nvr'] + logging.error("Update %s not found in Bodhi" % b['nvr']) continue # inspect the 'builds' part of bodhi info. @@ -397,8 +376,7 @@ class KojiWatcher(object): # add the nvrs to the respective call harnesscall.extend(stashed_nvrs) - if self.dry_run or self.verbose: - print " ".join(harnesscall) + logging.info(" ".join(harnesscall)) # do not schedule jobs on dry_run if self.dry_run: @@ -420,8 +398,7 @@ class KojiWatcher(object): 'batch' jobs using the data gathered once. """ exit_status = 0 - if self.verbose: - print "Scheduling batch tests" + logging.info("Scheduling batch tests") for tag in sorted(new_builds.keys()): # Skip tags with no new entries if len(new_builds[tag]) == 0: @@ -456,8 +433,8 @@ class KojiWatcher(object): harnesscall.extend(harness_nvrs) - if self.dry_run or self.verbose: - print " ".join(harnesscall) + logging.info(" ".join(harnesscall)) + # do not schedule jobs on dry_run if self.dry_run: continue @@ -478,16 +455,13 @@ class KojiWatcher(object): """ - if self.verbose: - print "Looking for new builds" + logging.info("Looking for new builds") new_builds = self.list_new_builds() # this updates self.prevtimes if not self.dry_run: - if self.verbose: - print "Saving prevtimes to cache" + logging.info("Saving prevtimes to cache") self._save_prevtimes_to_cache() - if self.verbose: - print "Scheduling jobs" + logging.info("Scheduling jobs") exit_status = [] exit_status.append(self.schedule_jobs(new_builds)) @@ -512,10 +486,11 @@ tags for new builds and kick off tests when new builds/packages are found.') parser.add_option('-p', '--prevtime', action='store', type='float', default=None, help='How far back to look for new builds (seconds since last epoch)') - parser.add_option('--verbose', action='store_true', - help='Print extra information') + autoqa.logger.add_options(parser) (opts, args) = parser.parse_args() + # Set up a logger + autoqa.logger.setup_logger(file_logger=not opts.dryrun, opts=opts) # Because paralel runs of koji watcher caused problems with not-updated # cache file (prevtimes), we use locking mechanism to prevent paralel runs. @@ -537,15 +512,15 @@ tags for new builds and kick off tests when new builds/packages are found.') # cachedir is created in KojiWatcher.__init__(), checking for the # presence of cachedir below should offer additional protection. if os.path.exists(KojiWatcher.cachedir) and lock.acquire() is not True: - print "Another instance of this watcher is already running. Exiting..." + logging.warn("Another instance of this watcher is already running. Exiting...") sys.exit(0) try: - watcher = KojiWatcher(verbose = opts.verbose, dry_run = opts.dryrun, prevtime = opts.prevtime, only_tags = opts.tags) + watcher = KojiWatcher(dry_run = opts.dryrun, prevtime = opts.prevtime, only_tags = opts.tags) exit_status = watcher.run() except KeyboardInterrupt: - print "Exiting on keyboard interrupt." + logging.warn("Exiting on keyboard interrupt.") lock.release() sys.exit(1) diff --git a/watchers/yum-repo/watcher.py b/watchers/yum-repo/watcher.py index 46bbb13..13b8a2a 100755 --- a/watchers/yum-repo/watcher.py +++ b/watchers/yum-repo/watcher.py @@ -28,14 +28,20 @@ import sys import subprocess from autoqa.repoinfo import repoinfo import optparse +import logging +import autoqa.logger # Set up the option parser parser = optparse.OptionParser(description='A utility to watch a set of repos \ for changes and to kick off tests if the repo changes.') parser.add_option('--dryrun', '--dry-run', action='store_true', help='Do not actually execute commands, just show what would be done') +autoqa.logger.add_options(parser) (opts, args) = parser.parse_args() +# Set up a logger +autoqa.logger.setup_logger(file_logger=not opts.dryrun, opts=opts) + # Set the default arch to our placeholder '$ARCH' repoinfo.setarch('$ARCH') @@ -69,7 +75,7 @@ for reponame in repoinfo.repos(): 'repodata', 'repomd.xml') remote = urlgrabber.urlread(urlpath) except Exception, e: - print "Error from %s: %s" % (urlpath, e) + logging.error("Error from %s: %s" % (urlpath, e)) continue # Now compare the remote to the cache if cache != remote: @@ -89,7 +95,7 @@ for reponame, arches in sorted(testable.items()): harnesscall.append(repo['url'].replace('$ARCH', arch)) if opts.dryrun: - print ' '.join(harnesscall) + logging.info(' '.join(harnesscall)) continue # Update the cache files