#2049 extending flake8 rules
Merged 4 years ago by mikem. Opened 4 years ago by julian8628.
julian8628/koji flake8  into  master

file modified
+31 -17
@@ -1,20 +1,34 @@ 

  [flake8]

- select = I,C,F4

- ignore = F

- exclude = .git,

-           __pycache__,

-           tests,

-           docs,

-           ./koji-*

- filename = *.py

-            ./cli/koji

-            ./builder/kojid

-            ./builder/mergerepos

-            ./hub/rpmdiff

-            ./util/kojira

-            ./util/koji-gc

-            ./util/koji-shadow

-            ./util/koji-sweep-db

-            ./vm/kojivmd

+ select = E,F,W,C,I

+ ignore =

+     # too many leading ‘#’ for block comment

+     E266,

+     # do not assign a lambda expression, use a def

+     E731,

+     # [PY2] list comprehension redefines `name` from line `N`

+     F812,

+     # line break after binary operator

+     W504

+ max_line_length = 99

+ exclude =

+     .git,

+     __pycache__,

+     tests,

+     docs,

+     ./koji-*/*

+ 

+ filename =

+     *.py,

+     ./cli/koji,

+     ./builder/kojid,

+     ./builder/mergerepos,

+     ./hub/rpmdiff,

+     ./util/kojira,

+     ./util/koji-gc,

+     ./util/koji-shadow,

+     ./util/koji-sweep-db,

+     ./util/koji-sidetag-cleanup,

+     ./vm/kojivmd

+ 

  application_import_names = koji,koji_cli,kojihub,kojiweb,__main__

  import_order_style = pep8

file modified
+794 -646
@@ -60,8 +60,12 @@ 

  import koji.tasks

  import koji.util

  from koji.daemon import SCM, TaskManager, incremental_upload, log_output

- from koji.tasks import (BaseTaskHandler, MultiPlatformTask, ServerExit,

-                         ServerRestart)

+ from koji.tasks import (

+     BaseTaskHandler,

+     MultiPlatformTask,

+     ServerExit,

+     ServerRestart

+ )

  from koji.util import dslice, dslice_ex, isSuccess, parseStatus, to_list

  

  try:
@@ -101,7 +105,7 @@ 

      import pykickstart.parser as ksparser

      import pykickstart.handlers.control as kscontrol

      import pykickstart.errors as kserrors

-     import iso9660 # from pycdio

+     import iso9660  # from pycdio

      image_enabled = True

  except ImportError:  # pragma: no cover

      image_enabled = False
@@ -123,6 +127,7 @@ 

  except ImportError:  # pragma: no cover

      ozif_enabled = False

  

+ 

  def main(options, session):

      logger = logging.getLogger("koji.build")

      logger.info('Starting up')
@@ -131,25 +136,27 @@ 

      tm.findHandlers(globals())

      tm.findHandlers(vars(koji.tasks))

      if options.plugin:

-         #load plugins

+         # load plugins

          pt = koji.plugin.PluginTracker(path=options.pluginpath.split(':'))

          for name in options.plugin:

              logger.info('Loading plugin: %s' % name)

              tm.scanPlugin(pt.load(name))

+ 

      def shutdown(*args):

          raise SystemExit

+ 

      def restart(*args):

          logger.warn("Initiating graceful restart")

          tm.restart_pending = True

-     signal.signal(signal.SIGTERM,shutdown)

-     signal.signal(signal.SIGUSR1,restart)

-     while 1:

+     signal.signal(signal.SIGTERM, shutdown)

+     signal.signal(signal.SIGUSR1, restart)

+     while True:

          try:

              taken = False

              tm.updateBuildroots()

              tm.updateTasks()

              taken = tm.getNextTask()

-         except (SystemExit,ServerExit,KeyboardInterrupt):

+         except (SystemExit, ServerExit, KeyboardInterrupt):

              logger.warn("Exiting")

              break

          except ServerRestart:
@@ -160,7 +167,7 @@ 

              break

          except koji.RetryError:

              raise

-         except:

+         except Exception:

              # XXX - this is a little extreme

              # log the exception and continue

              logger.error(''.join(traceback.format_exception(*sys.exc_info())))
@@ -170,7 +177,7 @@ 

                  # The load-balancing code in getNextTask() will prevent a single builder

                  # from getting overloaded.

                  time.sleep(options.sleeptime)

-         except (SystemExit,KeyboardInterrupt):

+         except (SystemExit, KeyboardInterrupt):

              logger.warn("Exiting")

              break

      logger.warn("Shutting down, please wait...")
@@ -181,20 +188,20 @@ 

  

  class BuildRoot(object):

  

-     def __init__(self,session,options,*args,**kwargs):

+     def __init__(self, session, options, *args, **kwargs):

          self.logger = logging.getLogger("koji.build.buildroot")

          self.session = session

          self.options = options

          if len(args) + len(kwargs) == 1:

              # manage an existing mock buildroot

-             self._load(*args,**kwargs)

+             self._load(*args, **kwargs)

          else:

-             self._new(*args,**kwargs)

+             self._new(*args, **kwargs)

  

      def _load(self, data):

-         #manage an existing buildroot

+         # manage an existing buildroot

          if isinstance(data, dict):

-             #assume data already pulled from db

+             # assume data already pulled from db

              self.id = data['id']

          else:

              self.id = data
@@ -226,8 +233,8 @@ 

          self.tag_id = self.config['id']

          self.tag_name = self.config['name']

          if self.config['id'] != repo_info['tag_id']:

-             raise koji.BuildrootError("tag/repo mismatch: %s vs %s" \

-                     % (self.config['name'], repo_info['tag_name']))

+             raise koji.BuildrootError("tag/repo mismatch: %s vs %s"

+                                       % (self.config['name'], repo_info['tag_name']))

          repo_state = koji.REPO_STATES[repo_info['state']]

          if repo_state == 'EXPIRED':

              # This should be ok. Expired repos are still intact, just not
@@ -259,14 +266,15 @@ 

      def _writeMockConfig(self):

          # mock config

          configdir = '/etc/mock/koji'

-         configfile = "%s/%s.cfg" % (configdir,self.name)

+         configfile = "%s/%s.cfg" % (configdir, self.name)

          self.mockcfg = "koji/%s" % self.name

  

          opts = {}

          for k in ('repoid', 'tag_name'):

              if hasattr(self, k):

                  opts[k] = getattr(self, k)

-         for k in ('mockdir', 'topdir', 'topurl', 'topurls', 'packager', 'vendor', 'distribution', 'mockhost', 'yum_proxy', 'rpmbuild_timeout'):

+         for k in ('mockdir', 'topdir', 'topurl', 'topurls', 'packager', 'vendor',

+                   'distribution', 'mockhost', 'yum_proxy', 'rpmbuild_timeout'):

              if hasattr(self.options, k):

                  opts[k] = getattr(self.options, k)

          opts['buildroot_id'] = self.id
@@ -291,8 +299,8 @@ 

                  opts['tag_macros'][macro] = self.config['extra'][key]

          output = koji.genMockConfig(self.name, self.br_arch, managed=True, **opts)

  

-         #write config

-         with open(configfile,'w') as fo:

+         # write config

+         with open(configfile, 'w') as fo:

              fo.write(output)

  

      def _repositoryEntries(self, pi, plugin=False):
@@ -396,9 +404,9 @@ 

  

      def mock(self, args):

          """Run mock"""

-         mockpath = getattr(self.options,"mockpath","/usr/bin/mock")

+         mockpath = getattr(self.options, "mockpath", "/usr/bin/mock")

          cmd = [mockpath, "-r", self.mockcfg]

-         #if self.options.debug_mock:

+         # if self.options.debug_mock:

          #    cmd.append('--debug')

          # TODO: should we pass something like --verbose --trace instead?

          if 'mock.new_chroot' in self.config['extra']:
@@ -469,16 +477,18 @@ 

                      try:

                          stat_info = os.stat(fpath)

                          if not fd or stat_info.st_ino != inode or stat_info.st_size < size:

-                             # either a file we haven't opened before, or mock replaced a file we had open with

-                             # a new file and is writing to it, or truncated the file we're reading,

-                             # but our fd is pointing to the previous location in the old file

+                             # either a file we haven't opened before, or mock replaced a file we

+                             # had open with a new file and is writing to it, or truncated the file

+                             # we're reading, but our fd is pointing to the previous location in the

+                             # old file

                              if fd:

                                  self.logger.info('Rereading %s, inode: %s -> %s, size: %s -> %s' %

-                                                  (fpath, inode, stat_info.st_ino, size, stat_info.st_size))

+                                                  (fpath, inode, stat_info.st_ino, size,

+                                                   stat_info.st_size))

                                  fd.close()

                              fd = open(fpath, 'rb')

                          logs[fname] = (fd, stat_info.st_ino, stat_info.st_size or size, fpath)

-                     except:

+                     except Exception:

                          self.logger.error("Error reading mock log: %s", fpath)

                          self.logger.error(''.join(traceback.format_exception(*sys.exc_info())))

                          continue
@@ -495,7 +505,7 @@ 

                              ts_offsets[fname] = position

                      incremental_upload(self.session, fname, fd, uploadpath, logger=self.logger)

  

-             #clean up and return exit status of command

+             # clean up and return exit status of command

              for (fname, (fd, inode, size, fpath)) in logs.items():

                  if not fd:

                      continue
@@ -507,7 +517,7 @@ 

              return status[1]

  

          else:

-             #in no case should exceptions propagate past here

+             # in no case should exceptions propagate past here

              try:

                  self.session._forget()

                  if workdir:
@@ -516,15 +526,15 @@ 

                      fd = os.open(outfile, flags, 0o666)

                      os.dup2(fd, 1)

                      os.dup2(fd, 2)

-                 if os.getuid() == 0 and hasattr(self.options,"mockuser"):

+                 if os.getuid() == 0 and hasattr(self.options, "mockuser"):

                      self.logger.info('Running mock as %s' % self.options.mockuser)

-                     uid,gid = pwd.getpwnam(self.options.mockuser)[2:4]

+                     uid, gid = pwd.getpwnam(self.options.mockuser)[2:4]

                      os.setgroups([grp.getgrnam('mock')[2]])

-                     os.setregid(gid,gid)

-                     os.setreuid(uid,uid)

-                 os.execvp(cmd[0],cmd)

-             except:

-                 #diediedie

+                     os.setregid(gid, gid)

+                     os.setreuid(uid, uid)

+                 os.execvp(cmd[0], cmd)

+             except BaseException:

+                 # diediedie

                  print("Failed to exec mock")

                  print(''.join(traceback.format_exception(*sys.exc_info())))

                  os._exit(1)
@@ -542,7 +552,7 @@ 

              raise koji.BuildrootError("could not init mock buildroot, %s" % self._mockResult(rv))

          # log kernel version

          self.mock(['--chroot', 'uname -r'])

-         self.session.host.setBuildRootList(self.id,self.getPackageList())

+         self.session.host.setBuildRootList(self.id, self.getPackageList())

  

      def _mockResult(self, rv, logfile=None):

          if logfile:
@@ -555,7 +565,7 @@ 

          return parseStatus(rv, 'mock') + msg

  

      def rebuild_srpm(self, srpm):

-         self.session.host.setBuildRootState(self.id,'BUILDING')

+         self.session.host.setBuildRootState(self.id, 'BUILDING')

  

          # unpack SRPM to tempdir

          srpm_dir = os.path.join(self.tmpdir(), 'srpm_unpacked')
@@ -586,12 +596,11 @@ 

              self.expire()

              raise koji.BuildError("error building srpm, %s" % self._mockResult(rv))

  

- 

      def build_srpm(self, specfile, sourcedir, source_cmd):

-         self.session.host.setBuildRootState(self.id,'BUILDING')

+         self.session.host.setBuildRootState(self.id, 'BUILDING')

          if source_cmd:

-             # call the command defined by source_cmd in the chroot so any required files not stored in

-             # the SCM can be retrieved

+             # call the command defined by source_cmd in the chroot so any required files not stored

+             # in the SCM can be retrieved

              chroot_sourcedir = sourcedir[len(self.rootdir()):]

              args = ['--no-clean', '--unpriv', '--cwd', chroot_sourcedir, '--chroot']

              args.extend(source_cmd)
@@ -615,19 +624,20 @@ 

              self.expire()

              raise koji.BuildError("error building srpm, %s" % self._mockResult(rv))

  

-     def build(self,srpm,arch=None):

+     def build(self, srpm, arch=None):

          # run build

-         self.session.host.setBuildRootState(self.id,'BUILDING')

+         self.session.host.setBuildRootState(self.id, 'BUILDING')

          args = ['--no-clean']

          if arch:

              args.extend(['--target', arch])

          args.extend(['--rebuild', srpm])

          rv = self.mock(args)

  

-         self.session.host.updateBuildRootList(self.id,self.getPackageList())

+         self.session.host.updateBuildRootList(self.id, self.getPackageList())

          if rv:

              self.expire()

-             raise koji.BuildError("error building package (arch %s), %s" % (arch, self._mockResult(rv)))

+             raise koji.BuildError("error building package (arch %s), %s" %

+                                   (arch, self._mockResult(rv)))

  

      def getPackageList(self):

          """Return a list of packages from the buildroot
@@ -656,9 +666,9 @@ 

              ts = rpm.TransactionSet()

              for h in ts.dbMatch():

                  pkg = koji.get_header_fields(h, fields)

-                 #skip our fake packages

+                 # skip our fake packages

                  if pkg['name'] in ['buildsys-build', 'gpg-pubkey']:

-                     #XXX config

+                     # XXX config

                      continue

                  pkg['payloadhash'] = koji.hex_string(pkg['sigmd5'])

                  del pkg['sigmd5']
@@ -682,7 +692,8 @@ 

              maven_files = []

              for repofile in files:

                  if koji.util.multi_fnmatch(repofile, self.options.maven_repo_ignore) or \

-                         koji.util.multi_fnmatch(os.path.join(relpath, repofile), self.options.maven_repo_ignore):

+                         koji.util.multi_fnmatch(os.path.join(relpath, repofile),

+                                                 self.options.maven_repo_ignore):

                      continue

                  if relpath == '' and repofile in ['scm-sources.zip', 'patches.zip']:

                      # special-case the archives of the sources and patches, since we drop them in
@@ -693,8 +704,10 @@ 

              if maven_files:

                  path_comps = relpath.split('/')

                  if len(path_comps) < 3:

-                     raise koji.BuildrootError('files found in unexpected path in local Maven repo, directory: %s, files: %s' % \

-                         (relpath, ', '.join([f['filename'] for f in maven_files])))

+                     raise koji.BuildrootError('files found in unexpected path in local Maven repo,'

+                                               ' directory: %s, files: %s' %

+                                               (relpath,

+                                                ', '.join([f['filename'] for f in maven_files])))

                  # extract the Maven info from the path within the local repo

                  maven_info = {'version': path_comps[-1],

                                'artifact_id': path_comps[-2],
@@ -706,8 +719,8 @@ 

      def mavenBuild(self, sourcedir, outputdir, repodir,

                     props=None, profiles=None, options=None, goals=None):

          self.session.host.setBuildRootState(self.id, 'BUILDING')

-         cmd = ['--no-clean', '--chroot', '--unpriv', '--cwd', sourcedir[len(self.rootdir()):], '--',

-                '/usr/bin/mvn', '-C']

+         cmd = ['--no-clean', '--chroot', '--unpriv', '--cwd', sourcedir[len(self.rootdir()):],

+                '--', '/usr/bin/mvn', '-C']

          if options:

              cmd.extend(options)

          if profiles:
@@ -728,13 +741,15 @@ 

          ignore_unknown = False

          if rv:

              ignore_unknown = True

-         self.session.host.updateMavenBuildRootList(self.id, self.task_id, self.getMavenPackageList(repodir),

+         self.session.host.updateMavenBuildRootList(self.id, self.task_id,

+                                                    self.getMavenPackageList(repodir),

                                                     ignore=self.getMavenPackageList(outputdir),

                                                     project=True, ignore_unknown=ignore_unknown,

                                                     extra_deps=self.deps)

          if rv:

              self.expire()

-             raise koji.BuildrootError('error building Maven package, %s' % self._mockResult(rv, logfile='root.log'))

+             raise koji.BuildrootError('error building Maven package, %s' %

+                                       self._mockResult(rv, logfile='root.log'))

  

      def markExternalRPMs(self, rpmlist):

          """Check rpms against pkgorigins and add external repo data to the external ones
@@ -744,9 +759,9 @@ 

          external_repos = self.session.getExternalRepoList(self.repo_info['tag_id'],

                                                            event=self.repo_info['create_event'])

          if not external_repos:

-             #nothing to do

+             # nothing to do

              return

-         #index external repos by expanded url

+         # index external repos by expanded url

          erepo_idx = {}

          for erepo in external_repos:

              # substitute $arch in the url with the arch of the repo we're generating
@@ -755,7 +770,7 @@ 

          pathinfo = koji.PathInfo(topdir='')

  

          repodir = pathinfo.repo(self.repo_info['id'], self.repo_info['tag_name'])

-         opts = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])

+         opts = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])

          opts['tempdir'] = self.options.workdir

  

          # prefer librepo
@@ -781,34 +796,35 @@ 

              pkgorigins = r.getinfo(librepo.LRR_YUM_REPOMD)['origin']['location_href']

              koji.util.rmtree(tmpdir)

          elif yum_available:

-             #XXX - cheap hack to get relative paths

+             # XXX - cheap hack to get relative paths

              repomdpath = os.path.join(repodir, self.br_arch, 'repodata', 'repomd.xml')

              with koji.openRemoteFile(repomdpath, **opts) as fo:

                  try:

                      repodata = repoMDObject.RepoMD('ourrepo', fo)

-                 except:

-                     raise koji.BuildError("Unable to parse repomd.xml file for %s" % os.path.join(repodir, self.br_arch))

-             data  = repodata.getData('origin')

-             pkgorigins  = data.location[1]

+                 except Exception:

+                     raise koji.BuildError("Unable to parse repomd.xml file for %s" %

+                                           os.path.join(repodir, self.br_arch))

+             data = repodata.getData('origin')

+             pkgorigins = data.location[1]

          else:

              # shouldn't occur

              raise koji.GenericError("install librepo or yum")

  

          relpath = os.path.join(repodir, self.br_arch, pkgorigins)

          with koji.openRemoteFile(relpath, **opts) as fo:

-             #at this point we know there were external repos at the create event,

-             #so there should be an origins file.

+             # at this point we know there were external repos at the create event,

+             # so there should be an origins file.

              origin_idx = {}

              # don't use 'with GzipFile' as it is not supported on py2.6

              fo2 = GzipFile(fileobj=fo, mode='r')

              if six.PY3:

                  fo2 = io.TextIOWrapper(fo2, encoding='utf-8')

              for line in fo2:

-                 parts=line.split(None, 2)

+                 parts = line.split(None, 2)

                  if len(parts) < 2:

                      continue

-                 #first field is formated by yum as [e:]n-v-r.a

-                 nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" %  koji.parse_NVRA(parts[0])

+                 # first field is formated by yum as [e:]n-v-r.a

+                 nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % koji.parse_NVRA(parts[0])

                  origin_idx[nvra] = parts[1]

              fo2.close()

          # mergerepo starts from a local repo in the task workdir, so internal
@@ -868,13 +884,13 @@ 

              return "%s%s" % (self.rootdir(), base)

  

      def expire(self):

-         self.session.host.setBuildRootState(self.id,'EXPIRED')

+         self.session.host.setBuildRootState(self.id, 'EXPIRED')

  

  

  class ChainBuildTask(BaseTaskHandler):

  

      Methods = ['chainbuild']

-     #mostly just waiting on other tasks

+     # mostly just waiting on other tasks

      _taskWeight = 0.1

  

      def handler(self, srcs, target, opts=None):
@@ -896,15 +912,16 @@ 

              raise koji.GenericError('unknown build target: %s' % target)

          nvrs = []

          for n_level, build_level in enumerate(srcs):

-             #if there are any nvrs to wait on, do so

+             # if there are any nvrs to wait on, do so

              if nvrs:

                  task_id = self.session.host.subtask(method='waitrepo',

-                                                     arglist=[target_info['build_tag_name'], None, nvrs],

+                                                     arglist=[

+                                                         target_info['build_tag_name'], None, nvrs],

                                                      label="wait %i" % n_level,

                                                      parent=self.id)

                  self.wait(task_id, all=True, failany=True)

              nvrs = []

-             #kick off the builds for this level

+             # kick off the builds for this level

              build_tasks = []

              for n_src, src in enumerate(build_level):

                  if SCM.is_scm_url(src):
@@ -915,11 +932,11 @@ 

                      build_tasks.append(task_id)

                  else:

                      nvrs.append(src)

-                     #next pass will wait for these

+                     # next pass will wait for these

              if build_tasks:

-                 #the level could have been all nvrs

+                 # the level could have been all nvrs

                  self.wait(build_tasks, all=True, failany=True)

-             #see what builds we created in this batch so the next pass can wait for them also

+             # see what builds we created in this batch so the next pass can wait for them also

              for build_task in build_tasks:

                  builds = self.session.listBuilds(taskID=build_task)

                  if builds:
@@ -929,7 +946,7 @@ 

  class BuildTask(BaseTaskHandler):

  

      Methods = ['build']

-     #we mostly just wait on other tasks

+     # we mostly just wait on other tasks

      _taskWeight = 0.2

  

      def handler(self, src, target, opts=None):
@@ -949,7 +966,7 @@ 

              self.event_id = repo_info['create_event']

          else:

              repo_info = None

-             #we'll wait for a repo later (self.getRepo)

+             # we'll wait for a repo later (self.getRepo)

              self.event_id = None

          task_info = self.session.getTaskInfo(self.id)

          target_info = None
@@ -959,10 +976,10 @@ 

              dest_tag = target_info['dest_tag']

              build_tag = target_info['build_tag']

              if repo_info is not None:

-                 #make sure specified repo matches target

+                 # make sure specified repo matches target

                  if repo_info['tag_id'] != target_info['build_tag']:

-                     raise koji.BuildError('Repo/Target mismatch: %s/%s' \

-                             % (repo_info['tag_name'], target_info['build_tag_name']))

+                     raise koji.BuildError('Repo/Target mismatch: %s/%s'

+                                           % (repo_info['tag_name'], target_info['build_tag_name']))

          else:

              # if repo_id is specified, we can allow the 'target' arg to simply specify

              # the destination tag (since the repo specifies the build tag).
@@ -970,7 +987,7 @@ 

                  raise koji.GenericError('unknown build target: %s' % target)

              build_tag = repo_info['tag_id']

              if target is None:

-                 #ok, call it skip-tag for the buildroot tag

+                 # ok, call it skip-tag for the buildroot tag

                  self.opts['skip_tag'] = True

                  dest_tag = build_tag

              else:
@@ -978,31 +995,31 @@ 

                  if not taginfo:

                      raise koji.GenericError('neither tag nor target: %s' % target)

                  dest_tag = taginfo['id']

-         #policy checks...

+         # policy checks...

          policy_data = {

-             'user_id' : task_info['owner'],

-             'source' : src,

-             'task_id' : self.id,

-             'build_tag' : build_tag,  #id

-             'skip_tag' : bool(self.opts.get('skip_tag')),

+             'user_id': task_info['owner'],

+             'source': src,

+             'task_id': self.id,

+             'build_tag': build_tag,  # id

+             'skip_tag': bool(self.opts.get('skip_tag')),

          }

          if target_info:

              policy_data['target'] = target_info['id'],

          if not self.opts.get('skip_tag'):

-             policy_data['tag'] = dest_tag  #id

+             policy_data['tag'] = dest_tag  # id

          if not SCM.is_scm_url(src) and not opts.get('scratch'):

-             #let hub policy decide

+             # let hub policy decide

              self.session.host.assertPolicy('build_from_srpm', policy_data)

          if opts.get('repo_id') is not None:

              # use of this option is governed by policy

              self.session.host.assertPolicy('build_from_repo_id', policy_data)

          if not repo_info:

              repo_info = self.getRepo(build_tag, builds=opts.get('wait_builds'),

-                                      wait=opts.get('wait_repo'))  #(subtask)

+                                      wait=opts.get('wait_repo'))  # (subtask)

              self.event_id = self.session.getLastEvent()['id']

          srpm = self.getSRPM(src, build_tag, repo_info['id'])

          h = self.readSRPMHeader(srpm)

-         data = koji.get_header_fields(h, ['name','version','release','epoch'])

+         data = koji.get_header_fields(h, ['name', 'version', 'release', 'epoch'])

          data['task_id'] = self.id

          if getattr(self, 'source', False):

              data['source'] = self.source['source']
@@ -1010,52 +1027,52 @@ 

  

          extra_arches = None

          self.logger.info("Reading package config for %(name)s" % data)

-         pkg_cfg = self.session.getPackageConfig(dest_tag,data['name'],event=self.event_id)

+         pkg_cfg = self.session.getPackageConfig(dest_tag, data['name'], event=self.event_id)

          self.logger.debug("%r" % pkg_cfg)

          if pkg_cfg is not None:

              extra_arches = pkg_cfg.get('extra_arches')

          if not self.opts.get('skip_tag') and not self.opts.get('scratch'):

              # Make sure package is on the list for this tag

              if pkg_cfg is None:

-                 raise koji.BuildError("package %s not in list for tag %s" \

-                         % (data['name'], target_info['dest_tag_name']))

+                 raise koji.BuildError("package %s not in list for tag %s"

+                                       % (data['name'], target_info['dest_tag_name']))

              elif pkg_cfg['blocked']:

-                 raise koji.BuildError("package %s is blocked for tag %s" \

-                         % (data['name'], target_info['dest_tag_name']))

+                 raise koji.BuildError("package %s is blocked for tag %s"

+                                       % (data['name'], target_info['dest_tag_name']))

              # TODO - more pre tests

          archlist = self.getArchList(build_tag, h, extra=extra_arches)

-         #let the system know about the build we're attempting

+         # let the system know about the build we're attempting

          if not self.opts.get('scratch'):

-             #scratch builds do not get imported

+             # scratch builds do not get imported

              build_id = self.session.host.initBuild(data)

-         #(initBuild raises an exception if there is a conflict)

-         failany = (self.opts.get('fail_fast', False)

-                     or not getattr(self.options, 'build_arch_can_fail', False))

+         # (initBuild raises an exception if there is a conflict)

+         failany = (self.opts.get('fail_fast', False) or

+                    not getattr(self.options, 'build_arch_can_fail', False))

          try:

-             self.extra_information = { "src": src, "data": data, "target": target }

-             srpm,rpms,brmap,logs = self.runBuilds(srpm, build_tag, archlist,

-                     repo_info['id'], failany=failany)

+             self.extra_information = {"src": src, "data": data, "target": target}

+             srpm, rpms, brmap, logs = self.runBuilds(srpm, build_tag, archlist,

+                                                      repo_info['id'], failany=failany)

  

              if opts.get('scratch'):

-                 #scratch builds do not get imported

-                 self.session.host.moveBuildToScratch(self.id,srpm,rpms,logs=logs)

+                 # scratch builds do not get imported

+                 self.session.host.moveBuildToScratch(self.id, srpm, rpms, logs=logs)

              else:

-                 self.session.host.completeBuild(self.id,build_id,srpm,rpms,brmap,logs=logs)

-         except (SystemExit,ServerExit,KeyboardInterrupt):

-             #we do not trap these

+                 self.session.host.completeBuild(self.id, build_id, srpm, rpms, brmap, logs=logs)

+         except (SystemExit, ServerExit, KeyboardInterrupt):

+             # we do not trap these

              raise

-         except:

+         except Exception:

              if not self.opts.get('scratch'):

-                 #scratch builds do not get imported

+                 # scratch builds do not get imported

                  self.session.host.failBuild(self.id, build_id)

              # reraise the exception

              raise

          if not self.opts.get('skip_tag') and not self.opts.get('scratch'):

-             self.tagBuild(build_id,dest_tag)

+             self.tagBuild(build_id, dest_tag)

  

      def getSRPM(self, src, build_tag, repo_id):

          """Get srpm from src"""

-         if isinstance(src,str):

+         if isinstance(src, str):

              if SCM.is_scm_url(src):

                  return self.getSRPMFromSCM(src, build_tag, repo_id)

              else:
@@ -1067,12 +1084,14 @@ 

                      return src

          else:

              raise koji.BuildError('Invalid source specification: %s' % src)

-             #XXX - other methods?

+             # XXX - other methods?

  

      def getSRPMFromSRPM(self, src, build_tag, repo_id):

          # rebuild srpm in mock, so it gets correct disttag, rpm version, etc.

          task_id = self.session.host.subtask(method='rebuildSRPM',

-                                             arglist=[src, build_tag, {'repo_id': repo_id, 'scratch': self.opts.get('scratch')}],

+                                             arglist=[src, build_tag, {

+                                                 'repo_id': repo_id,

+                                                 'scratch': self.opts.get('scratch')}],

                                              label='srpm',

                                              parent=self.id)

          # wait for subtask to finish
@@ -1085,9 +1104,11 @@ 

          return srpm

  

      def getSRPMFromSCM(self, url, build_tag, repo_id):

-         #TODO - allow different ways to get the srpm

+         # TODO - allow different ways to get the srpm

          task_id = self.session.host.subtask(method='buildSRPMFromSCM',

-                                             arglist=[url, build_tag, {'repo_id': repo_id, 'scratch': self.opts.get('scratch')}],

+                                             arglist=[url, build_tag, {

+                                                 'repo_id': repo_id,

+                                                 'scratch': self.opts.get('scratch')}],

                                              label='srpm',

                                              parent=self.id)

          # wait for subtask to finish
@@ -1100,10 +1121,10 @@ 

          return srpm

  

      def readSRPMHeader(self, srpm):

-         #srpm arg should be a path relative to <BASEDIR>/work

+         # srpm arg should be a path relative to <BASEDIR>/work

          self.logger.debug("Reading SRPM")

          relpath = "work/%s" % srpm

-         opts = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])

+         opts = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])

          opts['tempdir'] = self.workdir

          with koji.openRemoteFile(relpath, **opts) as fo:

              koji.check_rpm_file(fo)
@@ -1117,13 +1138,13 @@ 

          buildconfig = self.session.getBuildConfig(build_tag, event=self.event_id)

          arches = buildconfig['arches']

          if not arches:

-             #XXX - need to handle this better

+             # XXX - need to handle this better

              raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig)

          tag_archlist = [koji.canonArch(a) for a in arches.split()]

          self.logger.debug('arches: %s' % arches)

          if extra:

              self.logger.debug('Got extra arches: %s' % extra)

-             arches = "%s %s" % (arches,extra)

+             arches = "%s %s" % (arches, extra)

          archlist = arches.split()

          self.logger.debug('base archlist: %r' % archlist)

          # - adjust arch list based on srpm macros
@@ -1134,18 +1155,18 @@ 

              archlist = buildarchs

              self.logger.debug('archlist after buildarchs: %r' % archlist)

          if exclusivearch:

-             archlist = [ a for a in archlist if a in exclusivearch ]

+             archlist = [a for a in archlist if a in exclusivearch]

              self.logger.debug('archlist after exclusivearch: %r' % archlist)

          if excludearch:

-             archlist = [ a for a in archlist if a not in excludearch ]

+             archlist = [a for a in archlist if a not in excludearch]

              self.logger.debug('archlist after excludearch: %r' % archlist)

-         #noarch is funny

+         # noarch is funny

          if 'noarch' not in excludearch and \

-                 ( 'noarch' in buildarchs or 'noarch' in exclusivearch ):

+                 ('noarch' in buildarchs or 'noarch' in exclusivearch):

              archlist.append('noarch')

          override = self.opts.get('arch_override')

          if self.opts.get('scratch') and override:

-             #only honor override for scratch builds

+             # only honor override for scratch builds

              self.logger.debug('arch override: %s' % override)

              archlist = override.split()

          archdict = {}
@@ -1158,7 +1179,6 @@ 

              raise koji.BuildError("No matching arches were found")

          return to_list(archdict.keys())

  

- 

      def choose_taskarch(self, arch, srpm, build_tag):

          """Adjust the arch for buildArch subtask as needed"""

          if koji.util.multi_fnmatch(arch, self.options.literal_task_arches):
@@ -1183,13 +1203,13 @@ 

              excludearch = [koji.canonArch(a) for a in excludearch]

              archlist = list(tag_arches)

              if exclusivearch:

-                 archlist = [ a for a in archlist if a in exclusivearch ]

+                 archlist = [a for a in archlist if a in exclusivearch]

              if excludearch:

-                 archlist = [ a for a in archlist if a not in excludearch ]

+                 archlist = [a for a in archlist if a not in excludearch]

              if not archlist:

                  raise koji.BuildError("No valid arches were found. tag %r, "

-                         "exclusive %r, exclude %r" % (tag_arches,

-                         exclusivearch, excludearch))

+                                       "exclusive %r, exclude %r" % (tag_arches,

+                                                                     exclusivearch, excludearch))

              if set(archlist) != set(tag_arches):

                  return random.choice(archlist)

              else:
@@ -1199,7 +1219,6 @@ 

          # otherwise, noarch is ok

          return 'noarch'

  

- 

      def runBuilds(self, srpm, build_tag, archlist, repo_id, failany=True):

          self.logger.debug("Spawning jobs for arches: %r" % (archlist))

          subtasks = {}
@@ -1207,7 +1226,8 @@ 

          for arch in archlist:

              taskarch = self.choose_taskarch(arch, srpm, build_tag)

              subtasks[arch] = self.session.host.subtask(method='buildArch',

-                                                        arglist=[srpm, build_tag, arch, keep_srpm, {'repo_id': repo_id}],

+                                                        arglist=[srpm, build_tag, arch,

+                                                                 keep_srpm, {'repo_id': repo_id}],

                                                         label=arch,

                                                         parent=self.id,

                                                         arch=taskarch)
@@ -1227,13 +1247,13 @@ 

          built_srpm = None

          for (arch, task_id) in six.iteritems(subtasks):

              result = results[task_id]

-             self.logger.debug("DEBUG: %r : %r " % (arch,result,))

+             self.logger.debug("DEBUG: %r : %r " % (arch, result,))

              brootid = result['brootid']

              for fn in result['rpms']:

                  rpms.append(fn)

                  brmap[fn] = brootid

              for fn in result['logs']:

-                 logs.setdefault(arch,[]).append(fn)

+                 logs.setdefault(arch, []).append(fn)

              if result['srpms']:

                  if built_srpm:

                      raise koji.BuildError("multiple builds returned a srpm.  task %i" % self.id)
@@ -1245,17 +1265,17 @@ 

          else:

              raise koji.BuildError("could not find a built srpm")

  

-         return srpm,rpms,brmap,logs

+         return srpm, rpms, brmap, logs

  

-     def tagBuild(self,build_id,dest_tag):

-         #XXX - need options to skip tagging and to force tagging

-         #create the tagBuild subtask

-         #this will handle the "post tests"

+     def tagBuild(self, build_id, dest_tag):

+         # XXX - need options to skip tagging and to force tagging

+         # create the tagBuild subtask

+         # this will handle the "post tests"

          task_id = self.session.host.subtask(method='tagBuild',

-                                        arglist=[dest_tag,build_id,False,None,True],

-                                        label='tag',

-                                        parent=self.id,

-                                        arch='noarch')

+                                             arglist=[dest_tag, build_id, False, None, True],

+                                             label='tag',

+                                             parent=self.id,

+                                             arch='noarch')

          self.wait(task_id)

  

  
@@ -1274,12 +1294,12 @@ 

              tag_arches = [koji.canonArch(a) for a in tag['arches'].split()]

              host_arches = hostdata['arches'].split()

              if not set(tag_arches).intersection(host_arches):

-                 self.logger.info('Task %s (%s): tag arches (%s) and ' \

-                                  'host arches (%s) are disjoint' % \

+                 self.logger.info('Task %s (%s): tag arches (%s) and '

+                                  'host arches (%s) are disjoint' %

                                   (self.id, self.method,

                                    ', '.join(tag_arches), ', '.join(host_arches)))

                  return False

-         #otherwise...

+         # otherwise...

          # This is in principle an error condition, but this is not a good place

          # to fail. Instead we proceed and let the task fail normally.

          return True
@@ -1373,7 +1393,7 @@ 

  

          # run build

          self.logger.debug("Running build")

-         broot.build(fn,arch)

+         broot.build(fn, arch)

  

          # extract results

          resultdir = broot.resultdir()
@@ -1420,38 +1440,40 @@ 

          # upload files to storage server

          uploadpath = broot.getUploadPath()

          for f in rpm_files:

-             self.uploadFile("%s/%s" % (resultdir,f))

+             self.uploadFile("%s/%s" % (resultdir, f))

          self.logger.debug("keep srpm %i %s %s" % (self.id, keep_srpm, opts))

          if keep_srpm:

              if len(srpm_files) == 0:

                  raise koji.BuildError("no srpm files found for task %i" % self.id)

              if len(srpm_files) > 1:

-                 raise koji.BuildError("multiple srpm files found for task %i: %s" % (self.id, srpm_files))

+                 raise koji.BuildError("multiple srpm files found for task %i: %s" %

+                                       (self.id, srpm_files))

  

              # Run sanity checks.  Any failures will throw a BuildError

-             self.srpm_sanity_checks("%s/%s" % (resultdir,srpm_files[0]))

+             self.srpm_sanity_checks("%s/%s" % (resultdir, srpm_files[0]))

  

-             self.logger.debug("uploading %s/%s to %s" % (resultdir,srpm_files[0], uploadpath))

-             self.uploadFile("%s/%s" % (resultdir,srpm_files[0]))

+             self.logger.debug("uploading %s/%s to %s" % (resultdir, srpm_files[0], uploadpath))

+             self.uploadFile("%s/%s" % (resultdir, srpm_files[0]))

          if rpm_files:

-             ret['rpms'] = [ "%s/%s" % (uploadpath,f) for f in rpm_files ]

+             ret['rpms'] = ["%s/%s" % (uploadpath, f) for f in rpm_files]

          else:

              ret['rpms'] = []

          if keep_srpm:

-             ret['srpms'] = [ "%s/%s" % (uploadpath,f) for f in srpm_files ]

+             ret['srpms'] = ["%s/%s" % (uploadpath, f) for f in srpm_files]

          else:

              ret['srpms'] = []

-         ret['logs'] = [ "%s/%s" % (uploadpath,f) for f in log_files ]

+         ret['logs'] = ["%s/%s" % (uploadpath, f) for f in log_files]

          if rpmdiff_hash[self.id]:

              self.uploadFile(noarch_hash_path)

  

          ret['brootid'] = broot.id

  

          broot.expire()

-         #Let TaskManager clean up

+         # Let TaskManager clean up

  

          return ret

  

+ 

  class MavenTask(MultiPlatformTask):

  

      Methods = ['maven']
@@ -1501,11 +1523,11 @@ 

                  dest_cfg = self.session.getPackageConfig(dest_tag['id'], build_info['name'])

                  # Make sure package is on the list for this tag

                  if dest_cfg is None:

-                     raise koji.BuildError("package %s not in list for tag %s" \

-                         % (build_info['name'], dest_tag['name']))

+                     raise koji.BuildError("package %s not in list for tag %s"

+                                           % (build_info['name'], dest_tag['name']))

                  elif dest_cfg['blocked']:

-                     raise koji.BuildError("package %s is blocked for tag %s" \

-                         % (build_info['name'], dest_tag['name']))

+                     raise koji.BuildError("package %s is blocked for tag %s"

+                                           % (build_info['name'], dest_tag['name']))

  

              build_info = self.session.host.initMavenBuild(self.id, build_info, maven_info)

              self.build_id = build_info['id']
@@ -1514,30 +1536,34 @@ 

              rpm_results = None

              spec_url = self.opts.get('specfile')

              if spec_url:

-                 rpm_results = self.buildWrapperRPM(spec_url, self.build_task_id, target_info, build_info, repo_id)

+                 rpm_results = self.buildWrapperRPM(

+                     spec_url, self.build_task_id, target_info, build_info, repo_id)

  

              if self.opts.get('scratch'):

                  self.session.host.moveMavenBuildToScratch(self.id, maven_results, rpm_results)

              else:

-                 self.session.host.completeMavenBuild(self.id, self.build_id, maven_results, rpm_results)

+                 self.session.host.completeMavenBuild(

+                     self.id, self.build_id, maven_results, rpm_results)

          except (SystemExit, ServerExit, KeyboardInterrupt):

              # we do not trap these

              raise

-         except:

+         except Exception:

              if not self.opts.get('scratch'):

-                 #scratch builds do not get imported

+                 # scratch builds do not get imported

                  self.session.host.failBuild(self.id, self.build_id)

              # reraise the exception

              raise

  

          if not self.opts.get('scratch') and not self.opts.get('skip_tag'):

              tag_task_id = self.session.host.subtask(method='tagBuild',

-                                                     arglist=[dest_tag['id'], self.build_id, False, None, True],

+                                                     arglist=[dest_tag['id'],

+                                                              self.build_id, False, None, True],

                                                      label='tag',

                                                      parent=self.id,

                                                      arch='noarch')

              self.wait(tag_task_id)

  

+ 

  class BuildMavenTask(BaseBuildTask):

  

      Methods = ['buildMaven']
@@ -1559,7 +1585,7 @@ 

                      st = os.lstat(filepath)

                      mtime = time.localtime(st.st_mtime)

                      info = zipfile.ZipInfo(filepath[roottrim:])

-                     info.external_attr |= 0o120000 << 16 # symlink file type

+                     info.external_attr |= 0o120000 << 16  # symlink file type

                      info.compress_type = zipfile.ZIP_STORED

                      info.date_time = mtime[:6]

                      zfo.writestr(info, content)
@@ -1584,7 +1610,8 @@ 

          repo_info = self.session.repoInfo(repo_id, strict=True)

          event_id = repo_info['create_event']

  

-         br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id))

+         br_arch = self.find_arch('noarch', self.session.host.getHost(

+         ), self.session.getBuildConfig(build_tag['id'], event=event_id))

          maven_opts = opts.get('jvm_options')

          if not maven_opts:

              maven_opts = []
@@ -1592,7 +1619,8 @@ 

              if opt.startswith('-Xmx'):

                  break

          else:

-             # Give the JVM 2G to work with by default, if the build isn't specifying its own max. memory

+             # Give the JVM 2G to work with by default, if the build isn't specifying

+             # its own max. memory

              maven_opts.append('-Xmx2048m')

          buildroot = BuildRoot(self.session, self.options, build_tag['id'], br_arch, self.id,

                                install_group='maven-build', setup_dns=True, repo_id=repo_id,
@@ -1609,7 +1637,8 @@ 

              self.session.host.updateBuildRootList(buildroot.id, buildroot.getPackageList())

              if rv:

                  buildroot.expire()

-                 raise koji.BuildrootError('error installing packages, %s' % buildroot._mockResult(rv, logfile='mock_output.log'))

+                 raise koji.BuildrootError('error installing packages, %s' %

+                                           buildroot._mockResult(rv, logfile='mock_output.log'))

  

          # existence of symlink should be sufficient

          if not os.path.lexists('%s/usr/bin/mvn' % buildroot.rootdir()):
@@ -1629,8 +1658,8 @@ 

          logfile = self.workdir + '/checkout.log'

          uploadpath = self.getUploadDir()

  

- 

-         self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(), build_tag=build_tag, scratch=opts.get('scratch'))

+         self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(),

+                            build_tag=build_tag, scratch=opts.get('scratch'))

          # Check out sources from the SCM

          sourcedir = scm.checkout(scmdir, self.session, uploadpath, logfile)

          self.run_callbacks("postSCMCheckout",
@@ -1647,7 +1676,8 @@ 

              patchlog = self.workdir + '/patches.log'

              patch_scm = SCM(self.opts.get('patches'))

              patch_scm.assert_allowed(self.options.allowed_scms)

-             self.run_callbacks('preSCMCheckout', scminfo=patch_scm.get_info(), build_tag=build_tag, scratch=opts.get('scratch'))

+             self.run_callbacks('preSCMCheckout', scminfo=patch_scm.get_info(),

+                                build_tag=build_tag, scratch=opts.get('scratch'))

              # never try to check out a common/ dir when checking out patches

              patch_scm.use_common = False

              patchcheckoutdir = patch_scm.checkout(patchdir, self.session, uploadpath, patchlog)
@@ -1661,17 +1691,21 @@ 

          # Apply patches, if present

          if self.opts.get('patches'):

              # filter out directories and files beginning with . (probably scm metadata)

-             patches = [patch for patch in os.listdir(patchcheckoutdir) if \

-                            os.path.isfile(os.path.join(patchcheckoutdir, patch)) and \

-                            patch.endswith('.patch')]

+             patches = [patch for patch in os.listdir(patchcheckoutdir)

+                        if os.path.isfile(os.path.join(patchcheckoutdir, patch)) and

+                        patch.endswith('.patch')]

              if not patches:

                  raise koji.BuildError('no patches found at %s' % self.opts.get('patches'))

              patches.sort()

              for patch in patches:

-                 cmd = ['/usr/bin/patch', '--verbose', '--no-backup-if-mismatch', '-d', sourcedir, '-p1', '-i', os.path.join(patchcheckoutdir, patch)]

-                 ret = log_output(self.session, cmd[0], cmd, patchlog, uploadpath, logerror=1, append=1)

+                 cmd = ['/usr/bin/patch', '--verbose', '--no-backup-if-mismatch', '-d',

+                        sourcedir, '-p1', '-i', os.path.join(patchcheckoutdir, patch)]

+                 ret = log_output(self.session, cmd[0], cmd,

+                                  patchlog, uploadpath, logerror=1, append=1)

                  if ret:

-                     raise koji.BuildError('error applying patches from %s, see patches.log for details' % self.opts.get('patches'))

+                     raise koji.BuildError(

+                         'error applying patches from %s, see patches.log for details' %

+                         self.opts.get('patches'))

  

          # Set ownership of the entire source tree to the mock user

          uid = pwd.getpwnam(self.options.mockuser)[2]
@@ -1763,6 +1797,7 @@ 

                  'logs': logs,

                  'files': output_files}

  

+ 

  class WrapperRPMTask(BaseBuildTask):

      """Build a wrapper rpm around archives output from a Maven or Windows build.

      May either be called as a subtask or as a separate
@@ -1783,7 +1818,7 @@ 

              if re.match("%s:" % tag, spec, re.M):

                  raise koji.BuildError("%s is not allowed to be set in spec file" % tag)

          for tag in ("packager", "distribution", "vendor"):

-             if re.match("%%define\s+%s\s+" % tag, spec, re.M):

+             if re.match(r"%%define\s+%s\s+" % tag, spec, re.M):

                  raise koji.BuildError("%s is not allowed to be defined in spec file" % tag)

  

      def checkHost(self, hostdata):
@@ -1828,7 +1863,8 @@ 

                  artifact_name = os.path.basename(artifact_path)

                  base, ext = os.path.splitext(artifact_name)

                  if ext == '.log':

-                     # Exclude log files for consistency with the output of listArchives() used below

+                     # Exclude log files for consistency with the output of listArchives() used

+                     # below

                      continue

                  relpath = os.path.join(self.pathinfo.task(task['id']), artifact_path)[1:]

                  for volume in artifact_data[artifact_path]:
@@ -1841,10 +1877,11 @@ 

              # called as a top-level task to create wrapper rpms for an existing build

              # verify that the build is complete

              if not build['state'] == koji.BUILD_STATES['COMPLETE']:

-                 raise koji.BuildError('cannot call wrapperRPM on a build that did not complete successfully')

+                 raise koji.BuildError(

+                     'cannot call wrapperRPM on a build that did not complete successfully')

  

-             # get the list of files from the build instead of the task, because the task output directory may

-             # have already been cleaned up

+             # get the list of files from the build instead of the task,

+             # because the task output directory may have already been cleaned up

              if maven_info:

                  build_artifacts = self.session.listArchives(buildID=build['id'], type='maven')

              elif win_info:
@@ -1882,7 +1919,8 @@ 

                      assert False  # pragma: no cover

  

          if not artifacts:

-             raise koji.BuildError('no output found for %s' % (task and koji.taskLabel(task) or koji.buildLabel(build)))

+             raise koji.BuildError('no output found for %s' % (

+                 task and koji.taskLabel(task) or koji.buildLabel(build)))

  

          values['artifacts'] = artifacts

          values['all_artifacts'] = all_artifacts
@@ -1910,7 +1948,8 @@ 

              elif task['method'] == 'vmExec':

                  self.copy_fields(task_result, values, 'epoch', 'name', 'version', 'release')

                  values['win_info'] = {'platform': task_result['platform']}

-             elif task['method'] in ('createLiveCD', 'createAppliance', 'createImage', 'createLiveMedia'):

+             elif task['method'] in ('createLiveCD', 'createAppliance', 'createImage',

+                                     'createLiveMedia'):

                  self.copy_fields(task_result, values, 'epoch', 'name', 'version', 'release')

              else:

                  # can't happen
@@ -1926,9 +1965,11 @@ 

          repo_info = self.session.repoInfo(repo_id, strict=True)

          event_id = repo_info['create_event']

          build_tag = self.session.getTag(build_target['build_tag'], strict=True)

-         br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id))

+         br_arch = self.find_arch('noarch', self.session.host.getHost(

+         ), self.session.getBuildConfig(build_tag['id'], event=event_id))

  

-         buildroot = BuildRoot(self.session, self.options, build_tag['id'], br_arch, self.id, install_group='wrapper-rpm-build', repo_id=repo_id)

+         buildroot = BuildRoot(self.session, self.options, build_tag['id'], br_arch, self.id,

+                               install_group='wrapper-rpm-build', repo_id=repo_id)

          buildroot.workdir = self.workdir

          self.logger.debug("Initializing buildroot")

          buildroot.init()
@@ -1936,7 +1977,8 @@ 

          logfile = os.path.join(self.workdir, 'checkout.log')

          scmdir = buildroot.tmpdir() + '/scmroot'

          koji.ensuredir(scmdir)

-         self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(), build_tag=build_tag, scratch=opts.get('scratch'))

+         self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(),

+                            build_tag=build_tag, scratch=opts.get('scratch'))

          specdir = scm.checkout(scmdir, self.session, self.getUploadDir(), logfile)

          self.run_callbacks("postSCMCheckout",

                             scminfo=scm.get_info(),
@@ -1988,7 +2030,7 @@ 

          gid = grp.getgrnam('mock')[2]

          self.chownTree(specdir, uid, gid)

  

-         #build srpm

+         # build srpm

          self.logger.debug("Running srpm build")

          buildroot.build_srpm(specfile, specdir, None)

  
@@ -1996,7 +2038,8 @@ 

          if len(srpms) == 0:

              raise koji.BuildError('no srpms found in %s' % buildroot.resultdir())

          elif len(srpms) > 1:

-             raise koji.BuildError('multiple srpms found in %s: %s' % (buildroot.resultdir(), ', '.join(srpms)))

+             raise koji.BuildError('multiple srpms found in %s: %s' %

+                                   (buildroot.resultdir(), ', '.join(srpms)))

          else:

              srpm = srpms[0]

  
@@ -2013,18 +2056,18 @@ 

              if not opts.get('skip_tag'):

                  # Make sure package is on the list for this tag

                  if pkg_cfg is None:

-                     raise koji.BuildError("package %s not in list for tag %s" \

-                           % (data['name'], build_target['dest_tag_name']))

+                     raise koji.BuildError("package %s not in list for tag %s"

+                                           % (data['name'], build_target['dest_tag_name']))

                  elif pkg_cfg['blocked']:

-                     raise koji.BuildError("package %s is blocked for tag %s" \

-                           % (data['name'], build_target['dest_tag_name']))

+                     raise koji.BuildError("package %s is blocked for tag %s"

+                                           % (data['name'], build_target['dest_tag_name']))

              self.new_build_id = self.session.host.initBuild(data)

  

          try:

              buildroot.build(srpm)

          except (SystemExit, ServerExit, KeyboardInterrupt):

              raise

-         except:

+         except Exception:

              if self.new_build_id:

                  self.session.host.failBuild(self.id, self.new_build_id)

                  raise
@@ -2041,8 +2084,8 @@ 

                  else:

                      if self.new_build_id:

                          self.session.host.failBuild(self.id, self.new_build_id)

-                     raise koji.BuildError('multiple srpms found in %s: %s, %s' % \

-                         (resultdir, srpm, filename))

+                     raise koji.BuildError('multiple srpms found in %s: %s, %s' %

+                                           (resultdir, srpm, filename))

              elif filename.endswith('.rpm'):

                  rpms.append(filename)

              elif filename.endswith('.log'):
@@ -2050,8 +2093,8 @@ 

              else:

                  if self.new_build_id:

                      self.session.host.failBuild(self.id, self.new_build_id)

-                 raise koji.BuildError('unexpected file found in %s: %s' % \

-                     (resultdir, filename))

+                 raise koji.BuildError('unexpected file found in %s: %s' %

+                                       (resultdir, filename))

  

          if not srpm:

              if self.new_build_id:
@@ -2068,7 +2111,7 @@ 

                  self.uploadFile(os.path.join(resultdir, rpm_fn))

          except (SystemExit, ServerExit, KeyboardInterrupt):

              raise

-         except:

+         except Exception:

              if self.new_build_id:

                  self.session.host.failBuild(self.id, self.new_build_id)

                  raise
@@ -2086,23 +2129,27 @@ 

              relrpms = [uploaddir + '/' + r for r in rpms]

              rellogs = [uploaddir + '/' + l for l in logs]

              if opts.get('scratch'):

-                 self.session.host.moveBuildToScratch(self.id, relsrpm, relrpms, {'noarch': rellogs})

+                 self.session.host.moveBuildToScratch(

+                     self.id, relsrpm, relrpms, {'noarch': rellogs})

              else:

                  if opts.get('create_build'):

                      brmap = dict.fromkeys([relsrpm] + relrpms, buildroot.id)

                      try:

                          self.session.host.completeBuild(self.id, self.new_build_id,

-                                                         relsrpm, relrpms, brmap, {'noarch': rellogs})

+                                                         relsrpm, relrpms, brmap,

+                                                         {'noarch': rellogs})

                      except (SystemExit, ServerExit, KeyboardInterrupt):

                          raise

-                     except:

+                     except Exception:

                          self.session.host.failBuild(self.id, self.new_build_id)

                          raise

                      if not opts.get('skip_tag'):

                          tag_task_id = self.session.host.subtask(method='tagBuild',

                                                                  arglist=[build_target['dest_tag'],

-                                                                          self.new_build_id, False, None, True],

-                                                                 label='tag', parent=self.id, arch='noarch')

+                                                                          self.new_build_id, False,

+                                                                          None, True],

+                                                                 label='tag', parent=self.id,

+                                                                 arch='noarch')

                          self.wait(tag_task_id)

                  else:

                      self.session.host.importWrapperRPMs(self.id, build['id'], results)
@@ -2114,6 +2161,7 @@ 

  

          return results

  

+ 

  class ChainMavenTask(MultiPlatformTask):

  

      Methods = ['chainmaven']
@@ -2134,11 +2182,11 @@ 

                  dest_cfg = self.session.getPackageConfig(dest_tag['id'], package)

                  # Make sure package is on the list for this tag

                  if dest_cfg is None:

-                     raise koji.BuildError("package %s not in list for tag %s" \

-                         % (package, dest_tag['name']))

+                     raise koji.BuildError("package %s not in list for tag %s"

+                                           % (package, dest_tag['name']))

                  elif dest_cfg['blocked']:

-                     raise koji.BuildError("package %s is blocked for tag %s" \

-                         % (package, dest_tag['name']))

+                     raise koji.BuildError("package %s is blocked for tag %s"

+                                           % (package, dest_tag['name']))

  

          self.depmap = {}

          for package, params in builds.items():
@@ -2166,14 +2214,16 @@ 

                  if not opts.get('force'):

                      # check for a duplicate build (a build performed with the

                      # same scmurl and options)

-                     dup_build = self.get_duplicate_build(dest_tag['name'], package, params, task_opts)

+                     dup_build = self.get_duplicate_build(

+                         dest_tag['name'], package, params, task_opts)

                      # if we find one, mark the package as built and remove it from todo

                      if dup_build:

                          self.done[package] = dup_build['nvr']

                          for deps in todo.values():

                              deps.discard(package)

                          del todo[package]

-                         self.results.append('%s previously built from %s' % (dup_build['nvr'], task_url))

+                         self.results.append('%s previously built from %s' %

+                                             (dup_build['nvr'], task_url))

                          continue

                  task_opts.update(dslice(opts, ['skip_tag', 'scratch'], strict=False))

  
@@ -2228,8 +2278,9 @@ 

                                  self.done[package] = child['id']

                                  break

                          else:

-                             raise koji.BuildError('could not find buildMaven subtask of %s' % task_id)

-                     self.results.append('%s built from %s by task %s' % \

+                             raise koji.BuildError(

+                                 'could not find buildMaven subtask of %s' % task_id)

+                     self.results.append('%s built from %s by task %s' %

                                          (package, task_url, task_id))

                  else:

                      task_builds = self.session.listBuilds(taskID=task_id)
@@ -2265,7 +2316,7 @@ 

          for key in akeys:

              aval = a.get(key)

              bval = b.get(key)

-             if type(aval) != type(bval):

+             if not isinstance(aval, type(bval)):

                  return False

              if isinstance(aval, dict):

                  if not self.dicts_equal(aval, bval):
@@ -2324,10 +2375,11 @@ 

          # everything matches

          return build

  

+ 

  class TagBuildTask(BaseTaskHandler):

  

      Methods = ['tagBuild']

-     #XXX - set weight?

+     # XXX - set weight?

  

      def handler(self, tag_id, build_id, force=False, fromtag=None, ignore_success=False):

          task = self.session.getTaskInfo(self.id)
@@ -2336,38 +2388,46 @@ 

              self.session.getBuild(build_id, strict=True)

              self.session.getTag(tag_id, strict=True)

  

-             #several basic sanity checks have already been run (and will be run

-             #again when we make the final call). Our job is to perform the more

-             #computationally expensive 'post' tests.

+             # several basic sanity checks have already been run (and will be run

+             # again when we make the final call). Our job is to perform the more

+             # computationally expensive 'post' tests.

  

-             #XXX - add more post tests

-             self.session.host.tagBuild(self.id,tag_id,build_id,force=force,fromtag=fromtag)

-             self.session.host.tagNotification(True, tag_id, fromtag, build_id, user_id, ignore_success)

+             # XXX - add more post tests

+             self.session.host.tagBuild(self.id, tag_id, build_id, force=force, fromtag=fromtag)

+             self.session.host.tagNotification(

+                 True, tag_id, fromtag, build_id, user_id, ignore_success)

          except Exception as e:

              exctype, value = sys.exc_info()[:2]

-             self.session.host.tagNotification(False, tag_id, fromtag, build_id, user_id, ignore_success, "%s: %s" % (exctype, value))

+             self.session.host.tagNotification(

+                 False, tag_id, fromtag, build_id, user_id, ignore_success, "%s: %s" %

+                                                                            (exctype, value))

              raise e

  

+ 

  class BuildImageTask(MultiPlatformTask):

  

      def initImageBuild(self, name, version, release, target_info, opts):

          """create a build object for this image build"""

          pkg_cfg = self.session.getPackageConfig(target_info['dest_tag_name'],

-              name)

+                                                 name)

          self.logger.debug("%r" % pkg_cfg)

          if not opts.get('skip_tag') and not opts.get('scratch'):

              # Make sure package is on the list for this tag

              if pkg_cfg is None:

-                 raise koji.BuildError("package (image) %s not in list for tag %s" % (name, target_info['dest_tag_name']))

+                 raise koji.BuildError("package (image) %s not in list for tag %s" %

+                                       (name, target_info['dest_tag_name']))

              elif pkg_cfg['blocked']:

-                 raise koji.BuildError("package (image)  %s is blocked for tag %s" % (name, target_info['dest_tag_name']))

+                 raise koji.BuildError("package (image)  %s is blocked for tag %s" %

+                                       (name, target_info['dest_tag_name']))

          return self.session.host.initImageBuild(self.id,

-             dict(name=name, version=version, release=release, epoch=0))

+                                                 dict(name=name, version=version, release=release,

+                                                      epoch=0))

  

      def getRelease(self, name, ver):

          """return the next available release number for an N-V"""

          return self.session.getNextRelease(dict(name=name, version=ver))

  

+ 

  class BuildBaseImageTask(BuildImageTask):

      Methods = ['image']

  
@@ -2376,7 +2436,7 @@ 

          target_info = self.session.getBuildTarget(target, strict=True)

          build_tag = target_info['build_tag']

          repo_info = self.getRepo(build_tag)

-         #check requested arches against build tag

+         # check requested arches against build tag

          buildconfig = self.session.getBuildConfig(build_tag)

          if not buildconfig['arches']:

              raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig)
@@ -2389,7 +2449,9 @@ 

              opts = {}

  

          if not ozif_enabled:

-             self.logger.error("ImageFactory features require the following dependencies: pykickstart, imagefactory, oz and possibly python-hashlib")

+             self.logger.error(

+                 "ImageFactory features require the following dependencies: pykickstart, "

+                 "imagefactory, oz and possibly python-hashlib")

              raise koji.ApplianceError('ImageFactory functions not available')

  

          # build image(s)
@@ -2404,7 +2466,7 @@ 

                  raise koji.ApplianceError('The Release may not have a hyphen')

              if not opts.get('scratch'):

                  bld_info = self.initImageBuild(name, version, release,

-                     target_info, opts)

+                                                target_info, opts)

  

              subtasks = {}

              self.logger.debug("Spawning jobs for image arches: %r" % (arches))
@@ -2414,13 +2476,14 @@ 

                  subtasks[arch] = self.session.host.subtask(

                      method='createImage',

                      arglist=[name, version, release, arch, target_info,

-                     build_tag, repo_info, inst_url, opts],

+                              build_tag, repo_info, inst_url, opts],

                      label=arch, parent=self.id, arch=arch)

                  if arch in opts.get('optional_arches', []):

                      canfail.append(subtasks[arch])

              self.logger.debug("Got image subtasks: %r" % (subtasks))

              self.logger.debug("Waiting on image subtasks (%s can fail)..." % canfail)

-             results = self.wait(to_list(subtasks.values()), all=True, failany=True, canfail=canfail)

+             results = self.wait(to_list(subtasks.values()), all=True,

+                                 failany=True, canfail=canfail)

  

              # if everything failed, fail even if all subtasks are in canfail

              self.logger.debug('subtask results: %r', results)
@@ -2472,14 +2535,14 @@ 

                  self.session.host.moveImageBuildToScratch(self.id, results)

              else:

                  self.session.host.completeImageBuild(self.id, bld_info['id'],

-                     results)

+                                                      results)

  

-         except (SystemExit,ServerExit,KeyboardInterrupt):

-             #we do not trap these

+         except (SystemExit, ServerExit, KeyboardInterrupt):

+             # we do not trap these

              raise

-         except:

+         except Exception:

              if not opts.get('scratch'):

-                 #scratch builds do not get imported

+                 # scratch builds do not get imported

                  if bld_info:

                      self.session.host.failBuild(self.id, bld_info['id'])

              # reraise the exception
@@ -2488,15 +2551,17 @@ 

          # tag it

          if not opts.get('scratch') and not opts.get('skip_tag'):

              tag_task_id = self.session.host.subtask(method='tagBuild',

-                 arglist=[target_info['dest_tag'], bld_info['id'], False, None, True],

-                 label='tag', parent=self.id, arch='noarch')

+                                                     arglist=[target_info['dest_tag'],

+                                                              bld_info['id'], False, None, True],

+                                                     label='tag', parent=self.id, arch='noarch')

              self.wait(tag_task_id)

  

          # report results

          report = ''

          if opts.get('scratch'):

              respath = ', '.join(

-                 [os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(tid)) for tid in subtasks.values()])

+                 [os.path.join(koji.pathinfo.work(),

+                               koji.pathinfo.taskrelpath(tid)) for tid in subtasks.values()])

              report += 'Scratch '

          else:

              respath = koji.pathinfo.imagebuild(bld_info)
@@ -2512,7 +2577,7 @@ 

          target_info = self.session.getBuildTarget(target, strict=True)

          build_tag = target_info['build_tag']

          repo_info = self.getRepo(build_tag)

-         #check requested arch against build tag

+         # check requested arch against build tag

          buildconfig = self.session.getBuildConfig(build_tag)

          if not buildconfig['arches']:

              raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig)
@@ -2520,12 +2585,13 @@ 

          if koji.canonArch(arch) not in tag_archlist:

              raise koji.BuildError("Invalid arch for build tag: %s" % arch)

  

- 

          if not opts:

              opts = {}

  

          if not image_enabled:

-             self.logger.error("Appliance features require the following dependencies: pykickstart, and possibly python-hashlib")

+             self.logger.error(

+                 "Appliance features require the following dependencies: "

+                 "pykickstart, and possibly python-hashlib")

              raise koji.ApplianceError('Appliance functions not available')

  

          # build image
@@ -2536,11 +2602,13 @@ 

                  release = self.getRelease(name, version)

              if not opts.get('scratch'):

                  bld_info = self.initImageBuild(name, version, release,

-                     target_info, opts)

+                                                target_info, opts)

              create_task_id = self.session.host.subtask(method='createAppliance',

-                 arglist=[name, version, release, arch, target_info, build_tag,

-                     repo_info, ksfile, opts],

-                 label='appliance', parent=self.id, arch=arch)

+                                                        arglist=[name, version, release, arch,

+                                                                 target_info, build_tag,

+                                                                 repo_info, ksfile, opts],

+                                                        label='appliance', parent=self.id,

+                                                        arch=arch)

              results = self.wait(create_task_id)

              self.logger.info('image build task (%s) completed' % create_task_id)

              self.logger.info('results: %s' % results)
@@ -2560,12 +2628,12 @@ 

              else:

                  self.session.host.moveImageBuildToScratch(self.id, results)

  

-         except (SystemExit,ServerExit,KeyboardInterrupt):

-             #we do not trap these

+         except (SystemExit, ServerExit, KeyboardInterrupt):

+             # we do not trap these

              raise

-         except:

+         except Exception:

              if not opts.get('scratch'):

-                 #scratch builds do not get imported

+                 # scratch builds do not get imported

                  if bld_info:

                      self.session.host.failBuild(self.id, bld_info['id'])

              # reraise the exception
@@ -2574,14 +2642,15 @@ 

          # tag it

          if not opts.get('scratch') and not opts.get('skip_tag'):

              tag_task_id = self.session.host.subtask(method='tagBuild',

-                 arglist=[target_info['dest_tag'], bld_info['id'], False, None, True],

-                 label='tag', parent=self.id, arch='noarch')

+                                                     arglist=[target_info['dest_tag'],

+                                                              bld_info['id'], False, None, True],

+                                                     label='tag', parent=self.id, arch='noarch')

              self.wait(tag_task_id)

  

          # report results

          if opts.get('scratch'):

              respath = os.path.join(koji.pathinfo.work(),

-                 koji.pathinfo.taskrelpath(create_task_id))

+                                    koji.pathinfo.taskrelpath(create_task_id))

              report = 'Scratch '

          else:

              respath = koji.pathinfo.imagebuild(bld_info)
@@ -2589,6 +2658,7 @@ 

          report += 'appliance build results in: %s' % respath

          return report

  

+ 

  class BuildLiveCDTask(BuildImageTask):

      Methods = ['livecd']

  
@@ -2597,7 +2667,7 @@ 

          target_info = self.session.getBuildTarget(target, strict=True)

          build_tag = target_info['build_tag']

          repo_info = self.getRepo(build_tag)

-         #check requested arch against build tag

+         # check requested arch against build tag

          buildconfig = self.session.getBuildConfig(build_tag)

          if not buildconfig['arches']:

              raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig)
@@ -2609,7 +2679,7 @@ 

              opts = {}

          if not image_enabled:

              self.logger.error("LiveCD features require the following dependencies: "

-                     "pykickstart, pycdio, and possibly python-hashlib")

+                               "pykickstart, pycdio, and possibly python-hashlib")

              raise koji.LiveCDError('LiveCD functions not available')

  

          # build the image
@@ -2620,11 +2690,12 @@ 

                  release = self.getRelease(name, version)

              if not opts.get('scratch'):

                  bld_info = self.initImageBuild(name, version, release,

-                     target_info, opts)

+                                                target_info, opts)

              create_task_id = self.session.host.subtask(method='createLiveCD',

-                 arglist=[name, version, release, arch, target_info, build_tag,

-                 repo_info, ksfile, opts],

-                 label='livecd', parent=self.id, arch=arch)

+                                                        arglist=[name, version, release, arch,

+                                                                 target_info, build_tag,

+                                                                 repo_info, ksfile, opts],

+                                                        label='livecd', parent=self.id, arch=arch)

              results = self.wait(create_task_id)

              self.logger.info('image build task (%s) completed' % create_task_id)

              self.logger.info('results: %s' % results)
@@ -2644,12 +2715,12 @@ 

              else:

                  self.session.host.moveImageBuildToScratch(self.id, results)

  

-         except (SystemExit,ServerExit,KeyboardInterrupt):

-             #we do not trap these

+         except (SystemExit, ServerExit, KeyboardInterrupt):

+             # we do not trap these

              raise

-         except:

+         except Exception:

              if not opts.get('scratch'):

-                 #scratch builds do not get imported

+                 # scratch builds do not get imported

                  if bld_info:

                      self.session.host.failBuild(self.id, bld_info['id'])

              # reraise the exception
@@ -2658,14 +2729,15 @@ 

          # tag it if necessary

          if not opts.get('scratch') and not opts.get('skip_tag'):

              tag_task_id = self.session.host.subtask(method='tagBuild',

-                 arglist=[target_info['dest_tag'], bld_info['id'], False, None, True],

-                 label='tag', parent=self.id, arch='noarch')

+                                                     arglist=[target_info['dest_tag'],

+                                                              bld_info['id'], False, None, True],

+                                                     label='tag', parent=self.id, arch='noarch')

              self.wait(tag_task_id)

  

          # report the results

          if opts.get('scratch'):

              respath = os.path.join(koji.pathinfo.work(),

-                 koji.pathinfo.taskrelpath(create_task_id))

+                                    koji.pathinfo.taskrelpath(create_task_id))

              report = 'Scratch '

          else:

              respath = koji.pathinfo.imagebuild(bld_info)
@@ -2683,7 +2755,7 @@ 

          target_info = self.session.getBuildTarget(target, strict=True)

          build_tag = target_info['build_tag']

          repo_info = self.getRepo(build_tag)

-         #check requested arch against build tag

+         # check requested arch against build tag

          buildconfig = self.session.getBuildConfig(build_tag)

          if not buildconfig['arches']:

              raise koji.BuildError("No arches for tag %(name)s [%(id)s]" % buildconfig)
@@ -2700,7 +2772,7 @@ 

          if not image_enabled:

              # XXX - are these still required here?

              self.logger.error("Missing the following dependencies: "

-                     "pykickstart, pycdio, and possibly python-hashlib")

+                               "pykickstart, pycdio, and possibly python-hashlib")

              raise koji.PreBuildError('Live Media functions not available')

  

          # build the image
@@ -2711,22 +2783,22 @@ 

                  release = self.getRelease(name, version)

              if not opts.get('scratch'):

                  bld_info = self.initImageBuild(name, version, release,

-                     target_info, opts)

+                                                target_info, opts)

              subtasks = {}

              canfail = []

              for arch in arches:

                  subtasks[arch] = self.subtask('createLiveMedia',

-                     [name, version, release, arch, target_info, build_tag,

-                         repo_info, ksfile, opts],

-                     label='livemedia %s' % arch, arch=arch)

+                                               [name, version, release, arch, target_info,

+                                                build_tag, repo_info, ksfile, opts],

+                                               label='livemedia %s' % arch, arch=arch)

                  if arch in opts.get('optional_arches', []):

                      canfail.append(subtasks[arch])

              self.logger.debug("Tasks that can fail: %r", canfail)

  

- 

              self.logger.debug("Got image subtasks: %r", subtasks)

              self.logger.debug("Waiting on livemedia subtasks...")

-             results = self.wait(to_list(subtasks.values()), all=True, failany=True, canfail=canfail)

+             results = self.wait(to_list(subtasks.values()), all=True,

+                                 failany=True, canfail=canfail)

  

              # if everything failed, fail even if all subtasks are in canfail

              self.logger.debug('subtask results: %r', results)
@@ -2758,9 +2830,9 @@ 

                      if arch in ignored_arches:

                          continue

                      arglist = [spec_url, target_info, bld_info, tinfo,

-                                 {'repo_id': repo_info['id']}]

+                                {'repo_id': repo_info['id']}]

                      wrapper_tasks[arch] = self.subtask('wrapperRPM', arglist,

-                                 label='wrapper %s' % arch, arch='noarch')

+                                                        label='wrapper %s' % arch, arch='noarch')

  

                  results2 = self.wait(to_list(wrapper_tasks.values()), all=True, failany=True)

                  self.logger.debug('wrapper results: %r', results2)
@@ -2783,11 +2855,11 @@ 

                  self.session.host.moveImageBuildToScratch(self.id, results)

  

          except (SystemExit, ServerExit, KeyboardInterrupt):

-             #we do not trap these

+             # we do not trap these

              raise

-         except:

+         except Exception:

              if not opts.get('scratch'):

-                 #scratch builds do not get imported

+                 # scratch builds do not get imported

                  if bld_info:

                      self.session.host.failBuild(self.id, bld_info['id'])

              # reraise the exception
@@ -2796,14 +2868,16 @@ 

          # tag it if necessary

          if not opts.get('scratch') and not opts.get('skip_tag'):

              tag_task_id = self.session.host.subtask(method='tagBuild',

-                 arglist=[target_info['dest_tag'], bld_info['id'], False, None, True],

-                 label='tag', parent=self.id, arch='noarch')

+                                                     arglist=[target_info['dest_tag'],

+                                                              bld_info['id'], False, None, True],

+                                                     label='tag', parent=self.id, arch='noarch')

              self.wait(tag_task_id)

  

          # report the results

          if opts.get('scratch'):

              respath = ', '.join(

-                 [os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(tid)) for tid in subtasks.values()])

+                 [os.path.join(koji.pathinfo.work(),

+                               koji.pathinfo.taskrelpath(tid)) for tid in subtasks.values()])

              report = 'Scratch '

          else:

              respath = koji.pathinfo.imagebuild(bld_info)
@@ -2818,7 +2892,7 @@ 

      Methods = []

      # default to bind mounting /dev, but allow subclasses to change

      # this

-     bind_opts = {'dirs' : {'/dev' : '/dev',}}

+     bind_opts = {'dirs': {'/dev': '/dev', }}

  

      def makeImgBuildRoot(self, buildtag, repoinfo, arch, inst_group):

          """
@@ -2872,7 +2946,8 @@ 

              scm = SCM(self.opts['ksurl'])

              scm.assert_allowed(self.options.allowed_scms)

              logfile = os.path.join(self.workdir, 'checkout.log')

-             self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(), build_tag=build_tag, scratch=self.opts.get('scratch'))

+             self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(),

+                                build_tag=build_tag, scratch=self.opts.get('scratch'))

              scmsrcdir = scm.checkout(scmdir, self.session, self.getUploadDir(), logfile)

              self.run_callbacks("postSCMCheckout",

                                 scminfo=scm.get_info(),
@@ -2883,8 +2958,8 @@ 

          else:

              kspath = self.localPath("work/%s" % ksfile)

  

-         self.uploadFile(kspath) # upload the original ks file

-         return kspath # full absolute path to the file in the chroot

+         self.uploadFile(kspath)  # upload the original ks file

+         return kspath  # full absolute path to the file in the chroot

  

      def readKickstart(self, kspath, opts):

          """
@@ -2915,7 +2990,7 @@ 

                                     "'%s' : %s" % (kspath, e))

          except kserrors.KickstartError as e:

              raise koji.LiveCDError("Failed to parse kickstart file "

-                                     "'%s' : %s" % (kspath, e))

+                                    "'%s' : %s" % (kspath, e))

  

      def prepareKickstart(self, repo_info, target_info, arch, broot, opts):

          """
@@ -2937,14 +3012,15 @@ 

          # in the kickstart file. If --repo wasn't specified, then we use the

          # repo associated with the target passed in initially.

          repo_class = kscontrol.dataMap[self.ks.version]['RepoData']

-         self.ks.handler.repo.repoList = [] # delete whatever the ks file told us

+         self.ks.handler.repo.repoList = []  # delete whatever the ks file told us

          if opts.get('repo'):

              user_repos = opts['repo']

              if isinstance(user_repos, six.string_types):

                  user_repos = user_repos.split(',')

              index = 0

              for user_repo in user_repos:

-                 self.ks.handler.repo.repoList.append(repo_class(baseurl=user_repo, name='koji-override-%i' % index))

+                 self.ks.handler.repo.repoList.append(repo_class(

+                     baseurl=user_repo, name='koji-override-%i' % index))

                  index += 1

          else:

              path_info = koji.PathInfo(topdir=self.options.topurl)
@@ -2952,8 +3028,10 @@ 

                                        target_info['build_tag_name'])

              baseurl = '%s/%s' % (repopath, arch)

              self.logger.debug('BASEURL: %s' % baseurl)

-             self.ks.handler.repo.repoList.append(repo_class(baseurl=baseurl, name='koji-%s-%i' % (target_info['build_tag_name'], repo_info['id'])))

-         #inject url if provided

+             self.ks.handler.repo.repoList.append(repo_class(

+                 baseurl=baseurl, name='koji-%s-%i' % (target_info['build_tag_name'],

+                                                       repo_info['id'])))

+         # inject url if provided

          if opts.get('install_tree_url'):

              self.ks.handler.url(url=opts['install_tree_url'])

  
@@ -2969,7 +3047,7 @@ 

          if not os.path.exists(kskoji):

              raise koji.LiveCDError("KS file missing: %s" % kskoji)

          self.uploadFile(kskoji)

-         return broot.path_without_to_within(kskoji) # absolute path within chroot

+         return broot.path_without_to_within(kskoji)  # absolute path within chroot

  

      def getImagePackages(self, cachepath):

          """
@@ -2978,7 +3056,7 @@ 

          """

          found = False

          hdrlist = []

-         fields = ['name', 'version', 'release', 'epoch', 'arch', \

+         fields = ['name', 'version', 'release', 'epoch', 'arch',

                    'buildtime', 'sigmd5']

          for root, dirs, files in os.walk(cachepath):

              for f in files:
@@ -2998,27 +3076,27 @@ 

          # Duplicated with pungi-fedora fedora.conf

          # see https://pagure.io/koji/pull-request/817

          substitutions = {

-                          'Beta': 'B',

-                       'Rawhide': 'rawh',

-                 'Astronomy_KDE': 'AstK',

-                        'Atomic': 'AH',

-                      'Cinnamon': 'Cinn',

-                         'Cloud': 'C',

-                  'Design_suite': 'Dsgn',

-                'Electronic_Lab': 'Elec',

-                    'Everything': 'E',

+             'Beta': 'B',

+             'Rawhide': 'rawh',

+             'Astronomy_KDE': 'AstK',

+             'Atomic': 'AH',

+             'Cinnamon': 'Cinn',

+             'Cloud': 'C',

+             'Design_suite': 'Dsgn',

+             'Electronic_Lab': 'Elec',

+             'Everything': 'E',

                          'Games': 'Game',

                         'Images': 'img',

                        'Jam_KDE': 'Jam',

-                   'MATE_Compiz': 'MATE',

-              # Note https://pagure.io/pungi-fedora/issue/533

-              'Python-Classroom': 'Clss',

-              'Python_Classroom': 'Clss',

+             'MATE_Compiz': 'MATE',

+             # Note https://pagure.io/pungi-fedora/issue/533

+             'Python-Classroom': 'Clss',

+             'Python_Classroom': 'Clss',

                       'Robotics': 'Robo',

-                'Scientific_KDE': 'SciK',

+             'Scientific_KDE': 'SciK',

                       'Security': 'Sec',

                         'Server': 'S',

-                   'Workstation': 'WS',

+             'Workstation': 'WS',

              'WorkstationOstree': 'WS',

          }

  
@@ -3056,9 +3134,10 @@ 

                  return part.disk

          raise koji.ApplianceError('kickstart lacks a "/" mountpoint')

  

-     def handler(self, name, version, release, arch, target_info, build_tag, repo_info, ksfile, opts=None):

+     def handler(self, name, version, release, arch, target_info,

+                 build_tag, repo_info, ksfile, opts=None):

  

-         if opts == None:

+         if opts is None:

              opts = {}

          self.opts = opts

          broot = self.makeImgBuildRoot(build_tag, repo_info, arch,
@@ -3079,7 +3158,7 @@ 

                 '--logfile', app_log, '--cache', cachedir, '-o', odir]

          for arg_name in ('vmem', 'vcpu', 'format'):

              arg = opts.get(arg_name)

-             if arg != None:

+             if arg is not None:

                  cmd.extend(['--%s' % arg_name, arg])

          appname = '%s-%s-%s' % (name, version, release)

          cmd.extend(['--name', appname])
@@ -3090,7 +3169,8 @@ 

          self.uploadFile(os.path.join(broot.rootdir(), app_log[1:]))

          if rv:

              raise koji.ApplianceError(

-                 "Could not create appliance: %s" % parseStatus(rv, 'appliance-creator') + "; see root.log or appliance.log for more information")

+                 "Could not create appliance: %s" % parseStatus(rv, 'appliance-creator') +

+                 "; see root.log or appliance.log for more information")

  

          # Find the results

          results = []
@@ -3121,7 +3201,7 @@ 

  

          if not opts.get('scratch'):

              hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),

-                 cachedir[1:]))

+                                                          cachedir[1:]))

              broot.markExternalRPMs(hdrlist)

              imgdata['rpmlist'] = hdrlist

  
@@ -3132,6 +3212,8 @@ 

  # via the livecd-build group. livecd-creator is then executed in the chroot

  # to create the LiveCD image.

  #

+ 

+ 

  class LiveCDTask(ImageTask):

  

      Methods = ['createLiveCD']
@@ -3153,17 +3235,23 @@ 

  

          # image metadata

          id = iso.get_application_id()

-         if id is not None: fd.write("Application ID: %s\n" % id)

+         if id is not None:

+             fd.write("Application ID: %s\n" % id)

          id = iso.get_preparer_id()

-         if id is not None: fd.write("Preparer ID: %s\n" % id)

+         if id is not None:

+             fd.write("Preparer ID: %s\n" % id)

          id = iso.get_publisher_id()

-         if id is not None: fd.write("Publisher ID: %s\n" % id)

+         if id is not None:

+             fd.write("Publisher ID: %s\n" % id)

          id = iso.get_system_id()

-         if id is not None: fd.write("System ID: %s\n" % id)

+         if id is not None:

+             fd.write("System ID: %s\n" % id)

          id = iso.get_volume_id()

-         if id is not None: fd.write("Volume ID: %s\n" % id)

+         if id is not None:

+             fd.write("Volume ID: %s\n" % id)

          id = iso.get_volumeset_id()

-         if id is not None: fd.write("Volumeset ID: %s\n" % id)

+         if id is not None:

+             fd.write("Volumeset ID: %s\n" % id)

  

          fd.write('\nSize(bytes)  File Name\n')

          manifest = self.listISODir(iso, '/')
@@ -3204,15 +3292,15 @@ 

  

          return manifest

  

+     def handler(self, name, version, release, arch, target_info,

+                 build_tag, repo_info, ksfile, opts=None):

  

-     def handler(self, name, version, release, arch, target_info, build_tag, repo_info, ksfile, opts=None):

- 

-         if opts == None:

+         if opts is None:

              opts = {}

          self.opts = opts

  

          broot = self.makeImgBuildRoot(build_tag, repo_info, arch,

-             'livecd-build')

+                                       'livecd-build')

          kspath = self.fetchKickstart(broot, ksfile, target_info['build_tag_name'])

          self.readKickstart(kspath, opts)

          kskoji = self.prepareKickstart(repo_info, target_info, arch, broot, opts)
@@ -3235,7 +3323,8 @@ 

          self.uploadFile(os.path.join(broot.rootdir(), livecd_log[1:]))

          if rv:

              raise koji.LiveCDError(

-                 'Could not create LiveCD: %s' % parseStatus(rv, 'livecd-creator') + '; see root.log or livecd.log for more information')

+                 'Could not create LiveCD: %s' % parseStatus(rv, 'livecd-creator') +

+                 '; see root.log or livecd.log for more information')

  

          # Find the resultant iso

          # The cwd of the livecd-creator process is tmpdir() in the chroot, so
@@ -3247,7 +3336,8 @@ 

                  if not isofile:

                      isofile = afile

                  else:

-                     raise koji.LiveCDError('multiple .iso files found: %s and %s' % (isofile, afile))

+                     raise koji.LiveCDError(

+                         'multiple .iso files found: %s and %s' % (isofile, afile))

          if not isofile:

              raise koji.LiveCDError('could not find iso file in chroot')

          isosrc = os.path.join(broot.tmpdir(), isofile)
@@ -3264,28 +3354,27 @@ 

          self.uploadFile(isosrc, remoteName=isoname)

  

          imgdata = {'arch': arch,

-             'files': [isoname],

-             'rootdev': None,

-             'task_id': self.id,

-             'logs': ['build.log', 'mock_output.log', 'root.log', 'state.log',

-                      'livecd.log', os.path.basename(ksfile),

-                      os.path.basename(kskoji)],

-             'name': name,

-             'version': version,

-             'release': release

-         }

+                    'files': [isoname],

+                    'rootdev': None,

+                    'task_id': self.id,

+                    'logs': ['build.log', 'mock_output.log', 'root.log', 'state.log',

+                             'livecd.log', os.path.basename(ksfile),

+                             os.path.basename(kskoji)],

+                    'name': name,

+                    'version': version,

+                    'release': release

+                    }

          if not opts.get('scratch'):

              hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),

-                                             cachedir[1:]))

-             imgdata ['rpmlist'] = hdrlist

+                                                          cachedir[1:]))

+             imgdata['rpmlist'] = hdrlist

              broot.markExternalRPMs(hdrlist)

  

          broot.expire()

          return imgdata

  

  

- 

- ##  livemedia-creator

+ # livemedia-creator

  class LiveMediaTask(ImageTask):

  

      Methods = ['createLiveMedia']
@@ -3335,17 +3424,23 @@ 

  

          # image metadata

          id = iso.get_application_id()

-         if id is not None: fd.write("Application ID: %s\n" % id)

+         if id is not None:

+             fd.write("Application ID: %s\n" % id)

          id = iso.get_preparer_id()

-         if id is not None: fd.write("Preparer ID: %s\n" % id)

+         if id is not None:

+             fd.write("Preparer ID: %s\n" % id)

          id = iso.get_publisher_id()

-         if id is not None: fd.write("Publisher ID: %s\n" % id)

+         if id is not None:

+             fd.write("Publisher ID: %s\n" % id)

          id = iso.get_system_id()

-         if id is not None: fd.write("System ID: %s\n" % id)

+         if id is not None:

+             fd.write("System ID: %s\n" % id)

          id = iso.get_volume_id()

-         if id is not None: fd.write("Volume ID: %s\n" % id)

+         if id is not None:

+             fd.write("Volume ID: %s\n" % id)

          id = iso.get_volumeset_id()

-         if id is not None: fd.write("Volumeset ID: %s\n" % id)

+         if id is not None:

+             fd.write("Volumeset ID: %s\n" % id)

  

          fd.write('\nSize(bytes)  File Name\n')

          manifest = self.listISODir(iso, '/')
@@ -3386,14 +3481,15 @@ 

  

          return manifest

  

-     def handler(self, name, version, release, arch, target_info, build_tag, repo_info, ksfile, opts=None):

+     def handler(self, name, version, release, arch, target_info,

+                 build_tag, repo_info, ksfile, opts=None):

  

-         if opts == None:

+         if opts is None:

              opts = {}

          self.opts = opts

  

          broot = self.makeImgBuildRoot(build_tag, repo_info, arch,

-             'livemedia-build')

+                                       'livemedia-build')

          kspath = self.fetchKickstart(broot, ksfile, target_info['build_tag_name'])

          self.readKickstart(kspath, opts)

          kskoji = self.prepareKickstart(repo_info, target_info, arch, broot, opts)
@@ -3402,7 +3498,6 @@ 

          livemedia_log = broot.tmpdir(within=True) + '/lmc-logs/livemedia-out.log'

          resultdir = broot.tmpdir(within=True) + '/lmc'

  

- 

          # Common LMC command setup, needs extending

          cmd = ['/sbin/livemedia-creator',

                 '--ks', kskoji,
@@ -3410,9 +3505,8 @@ 

                 '--no-virt',

                 '--resultdir', resultdir,

                 '--project', name,

-                #'--tmp', '/tmp'

-               ]

- 

+                # '--tmp', '/tmp'

+                ]

  

          volid = opts.get('volid')

          if not volid:
@@ -3426,13 +3520,12 @@ 

          cmd.extend(['--make-iso',

                      '--volid', volid,

                      '--iso-only',

-                    ])

+                     ])

  

-         isoname='%s-%s-%s-%s.iso' % (name, arch, version, release)

+         isoname = '%s-%s-%s-%s.iso' % (name, arch, version, release)

          cmd.extend(['--iso-name', isoname,

                      '--releasever', version,

-                    ])

- 

+                     ])

  

          if arch == 'x86_64':

              cmd.append('--macboot')
@@ -3441,7 +3534,6 @@ 

              templates_dir = self.fetch_lorax_templates_from_scm(broot)

              cmd.extend(['--lorax-templates', templates_dir])

  

- 

          # Run livemedia-creator

          rv = broot.mock(['--cwd', broot.tmpdir(within=True), '--chroot', '--'] + cmd)

  
@@ -3449,7 +3541,7 @@ 

          logdirs = [

              os.path.join(broot.tmpdir(), 'lmc-logs'),

              os.path.join(broot.tmpdir(), 'lmc-logs/anaconda'),

-             ]

+         ]

          for logdir in logdirs:

              if not os.path.isdir(logdir):

                  continue
@@ -3468,7 +3560,8 @@ 

  

          if rv:

              raise koji.LiveMediaError(

-                 'Could not create LiveMedia: %s' % parseStatus(rv, 'livemedia-creator') + '; see root.log or livemedia-out.log for more information')

+                 'Could not create LiveMedia: %s' % parseStatus(rv, 'livemedia-creator') +

+                 '; see root.log or livemedia-out.log for more information')

  

          # Find the resultant iso

          # The cwd of the livemedia-creator process is broot.tmpdir() in the chroot, so
@@ -3481,12 +3574,12 @@ 

                  if not isofile:

                      isofile = afile

                  else:

-                     raise koji.LiveMediaError('multiple .iso files found: %s and %s' % (isofile, afile))

+                     raise koji.LiveMediaError(

+                         'multiple .iso files found: %s and %s' % (isofile, afile))

          if not isofile:

              raise koji.LiveMediaError('could not find iso file in chroot')

          isosrc = os.path.join(rootresultsdir, isofile)

  

- 

          # Generate the file manifest of the image, upload the results

          manifest = os.path.join(broot.resultdir(), 'manifest.log')

          self.genISOManifest(isosrc, manifest)
@@ -3495,29 +3588,31 @@ 

          self.uploadFile(isosrc, remoteName=isoname)

  

          imgdata = {'arch': arch,

-             'files': [isoname],

-             'rootdev': None,

-             'task_id': self.id,

-             'logs': ['build.log', 'mock_output.log', 'root.log', 'state.log',

-                      'livemedia-out.log', os.path.basename(ksfile),

-                      os.path.basename(kskoji)],

-             'name': name,

-             'version': version,

-             'release': release

-         }

+                    'files': [isoname],

+                    'rootdev': None,

+                    'task_id': self.id,

+                    'logs': ['build.log', 'mock_output.log', 'root.log', 'state.log',

+                             'livemedia-out.log', os.path.basename(ksfile),

+                             os.path.basename(kskoji)],

+                    'name': name,

+                    'version': version,

+                    'release': release

+                    }

          if not opts.get('scratch'):

              # TODO - generate list of rpms in image

              # (getImagePackages doesn't work here)

-             #hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),

+             # hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),

              #                                cachedir[1:]))

-             imgdata ['rpmlist'] = []

-             #broot.markExternalRPMs(hdrlist)

+             imgdata['rpmlist'] = []

+             # broot.markExternalRPMs(hdrlist)

  

          broot.expire()

          return imgdata

  

  # A generic task for building disk images using Oz

  # Other Oz-based image handlers should inherit this.

+ 

+ 

  class OzImageTask(BaseTaskHandler):

      Methods = []

  
@@ -3543,9 +3638,10 @@ 

              scm = SCM(self.opts['ksurl'])

              scm.assert_allowed(self.options.allowed_scms)

              logfile = os.path.join(self.workdir, 'checkout-%s.log' % self.arch)

-             self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(), build_tag=build_tag, scratch=self.opts.get('scratch'))

+             self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(),

+                                build_tag=build_tag, scratch=self.opts.get('scratch'))

              scmsrcdir = scm.checkout(self.workdir, self.session,

-                 self.getUploadDir(), logfile)

+                                      self.getUploadDir(), logfile)

              self.run_callbacks("postSCMCheckout",

                                 scminfo=scm.get_info(),

                                 build_tag=build_tag,
@@ -3553,15 +3649,15 @@ 

                                 srcdir=scmsrcdir)

              kspath = os.path.join(scmsrcdir, os.path.basename(ksfile))

          else:

-             tops = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])

+             tops = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])

              tops['tempdir'] = self.workdir

              with koji.openRemoteFile(ksfile, **tops) as ks_src:

                  kspath = os.path.join(self.workdir, os.path.basename(ksfile))

                  with open(kspath, 'wb') as ks_dest:

                      ks_dest.write(ks_src.read())

          self.logger.debug('uploading kickstart from here: %s' % kspath)

-         self.uploadFile(kspath) # upload the original ks file

-         return kspath # absolute path to the ks file

+         self.uploadFile(kspath)  # upload the original ks file

+         return kspath  # absolute path to the ks file

  

      def readKickstart(self, kspath):

          """
@@ -3585,10 +3681,10 @@ 

              ks.readKickstart(kspath)

          except IOError as e:

              raise koji.BuildError("Failed to read kickstart file "

-                                    "'%s' : %s" % (kspath, e))

+                                   "'%s' : %s" % (kspath, e))

          except kserrors.KickstartError as e:

              raise koji.BuildError("Failed to parse kickstart file "

-                                     "'%s' : %s" % (kspath, e))

+                                   "'%s' : %s" % (kspath, e))

          return ks

  

      def prepareKickstart(self, kspath, install_tree):
@@ -3607,7 +3703,7 @@ 

          # url with --repo, then we substitute that in for the repo(s) specified

          # in the kickstart file. If --repo wasn't specified, then we use the

          # repo associated with the target passed in initially.

-         ks.handler.repo.repoList = [] # delete whatever the ks file told us

+         ks.handler.repo.repoList = []  # delete whatever the ks file told us

          repo_class = kscontrol.dataMap[ks.version]['RepoData']

          # TODO: sensibly use "url" and "repo" commands in kickstart

          if self.opts.get('repo'):
@@ -3623,7 +3719,7 @@ 

              # --repo was not given, so we use the target's build repo

              path_info = koji.PathInfo(topdir=self.options.topurl)

              repopath = path_info.repo(self.repo_info['id'],

-                 self.target_info['build_tag_name'])

+                                       self.target_info['build_tag_name'])

              baseurl = '%s/%s' % (repopath, self.arch)

              self.logger.debug('BASEURL: %s' % baseurl)

              ks.handler.repo.repoList.append(repo_class(
@@ -3650,7 +3746,7 @@ 

          # put the new ksfile in the output directory

          if not os.path.exists(kspath):

              raise koji.BuildError("KS file missing: %s" % kspath)

-         self.uploadFile(kspath) # upload the modified ks file

+         self.uploadFile(kspath)  # upload the modified ks file

          return kspath

  

      def makeConfig(self):
@@ -3666,10 +3762,10 @@ 

              the way we want

          """

          return {

-             #Oz specific

+             # Oz specific

              'oz_data_dir': os.path.join(self.workdir, 'oz_data'),

              'oz_screenshot_dir': os.path.join(self.workdir, 'oz_screenshots'),

-             #IF specific

+             # IF specific

              'imgdir': os.path.join(self.workdir, 'scratch_images'),

              'tmpdir': os.path.join(self.workdir, 'oz-tmp'),

              'verbose': True,
@@ -3682,7 +3778,7 @@ 

              'rhevm_image_format': 'qcow2',

              'tdl_require_root_pw': False,

              'image_manager_args': {

-               'storage_path': os.path.join(self.workdir, 'output_image')},

+                 'storage_path': os.path.join(self.workdir, 'output_image')},

          }

  

      def makeTemplate(self, name, inst_tree):
@@ -3700,10 +3796,10 @@ 

          #      image and attempt to ssh in. This breaks docker image creation.

          # TODO: intelligently guess the distro based on the install tree URL

          distname, distver = self.parseDistro(self.opts.get('distro'))

-         if self.arch in ['armhfp','armv7hnl','armv7hl']:

-            arch = 'armv7l'

+         if self.arch in ['armhfp', 'armv7hnl', 'armv7hl']:

+             arch = 'armv7l'

          else:

-            arch = self.arch

+             arch = self.arch

          template = """<template>

      <name>%s</name>

          <os>
@@ -3714,10 +3810,12 @@ 

                  <url>%s</url>

              </install>

          """ % (name, distname, distver, arch, inst_tree)

-         template += """<icicle>

-                 <extra_command>rpm -qa --qf '%{NAME},%{VERSION},%{RELEASE},%{ARCH},%{EPOCH},%{SIZE},%{SIGMD5},%{BUILDTIME}\n'</extra_command>

-             </icicle>

-         """

+         template += ("<icicle>\n"

+                      "              <extra_command>rpm -qa --qf"

+                      " '%{NAME},%{VERSION},%{RELEASE},%{ARCH},%{EPOCH},%{SIZE},%{SIGMD5},"

+                      "%{BUILDTIME}\\n'</extra_command>\n"

+                      "          </icicle>\n"

+                      "      ")

          # TODO: intelligently guess the size based on the kickstart file

          template += """</os>

      <description>%s OS</description>
@@ -3725,7 +3823,7 @@ 

          <size>%sG</size>

      </disk>

  </template>

- """ % (name, self.opts.get('disk_size'))

+ """ % (name, self.opts.get('disk_size'))  # noqa: E501

          return template

  

      def parseDistro(self, distro):
@@ -3753,7 +3851,6 @@ 

              raise koji.BuildError('Unknown or supported distro given: %s' % distro)

  

      def fixImageXML(self, format, filename, xmltext):

- 

          """

          The XML generated by Oz/ImageFactory knows nothing about the name

          or image format conversions Koji does. We fix those values in the
@@ -3799,6 +3896,7 @@ 

              screenshot = found[0]

          return screenshot

  

+ 

  class BaseImageTask(OzImageTask):

  

      Methods = ['createImage']
@@ -3813,7 +3911,9 @@ 

          Some image formats require others to be processed first, which is why

          we have to do this. raw files in particular may not be kept.

          """

-         supported = ('raw', 'raw-xz', 'liveimg-squashfs', 'vmdk', 'qcow', 'qcow2', 'vdi', 'rhevm-ova', 'vsphere-ova', 'docker', 'vagrant-virtualbox', 'vagrant-libvirt', 'vagrant-vmware-fusion', 'vagrant-hyperv', 'vpc', "tar-gz")

+         supported = ('raw', 'raw-xz', 'liveimg-squashfs', 'vmdk', 'qcow', 'qcow2', 'vdi',

+                      'rhevm-ova', 'vsphere-ova', 'docker', 'vagrant-virtualbox', 'vagrant-libvirt',

+                      'vagrant-vmware-fusion', 'vagrant-hyperv', 'vpc', "tar-gz")

          for f in formats:

              if f not in supported:

                  raise koji.ApplianceError('Invalid format: %s' % f)
@@ -3837,23 +3937,23 @@ 

          Call out to ImageFactory to build the image(s) we want. Returns a dict

          of details for each image type we had to ask ImageFactory to build

          """

-         fcalls = {'raw':   self._buildBase,

+         fcalls = {'raw': self._buildBase,

                    'raw-xz': self._buildXZ,

                    'tar-gz': self._buildTarGZ,

                    'liveimg-squashfs': self._buildSquashfs,

-                   'vmdk':  self._buildConvert,

-                   'vdi':   self._buildConvert,

-                   'qcow':  self._buildConvert,

+                   'vmdk': self._buildConvert,

+                   'vdi': self._buildConvert,

+                   'qcow': self._buildConvert,

                    'qcow2': self._buildConvert,

-                   'vpc':   self._buildConvert,

-                   'rhevm-ova':   self._buildOVA,

+                   'vpc': self._buildConvert,

+                   'rhevm-ova': self._buildOVA,

                    'vsphere-ova': self._buildOVA,

                    'vagrant-virtualbox': self._buildOVA,

-                   'vagrant-libvirt':    self._buildOVA,

+                   'vagrant-libvirt': self._buildOVA,

                    'vagrant-vmware-fusion': self._buildOVA,

                    'vagrant-hyperv': self._buildOVA,

-                   'docker':      self._buildDocker

-         }

+                   'docker': self._buildDocker

+                   }

          # add a handler to the logger so that we capture ImageFactory's logging

          self.fhandler = logging.FileHandler(self.ozlog)

          self.bd = BuildDispatcher()
@@ -3861,7 +3961,7 @@ 

          self.tlog.setLevel(logging.DEBUG)

          self.tlog.addHandler(self.fhandler)

          images = {}

-         random.seed() # necessary to ensure a unique mac address

+         random.seed()  # necessary to ensure a unique mac address

          params = {'install_script': str(ks.handler),

                    'offline_icicle': True}

          # build the base (raw) image
@@ -3896,7 +3996,7 @@ 

          for fmt in images:

              imginfo[fmt] = images[fmt]

              lxml = self.fixImageXML(fmt, 'libvirt-%s-%s.xml' % (fmt, self.arch),

-                 self.base_img.base_image.parameters['libvirt_xml'])

+                                     self.base_img.base_image.parameters['libvirt_xml'])

              imginfo[fmt]['libvirt'] = lxml

          return imginfo

  
@@ -3921,15 +4021,16 @@ 

              if scrnshot:

                  ext = scrnshot[-3:]

                  self.uploadFile(scrnshot, remoteName='screenshot.%s' % ext)

-             image.os_plugin.abort() # forcibly tear down the VM

+             image.os_plugin.abort()  # forcibly tear down the VM

              # TODO abort when a task is CANCELLED

              if not self.session.checkUpload('', os.path.basename(self.ozlog)):

                  self.tlog.removeHandler(self.fhandler)

                  self.uploadFile(self.ozlog)

              if 'No disk activity' in details:

-                 details = 'Automated install failed or prompted for input. See the screenshot in the task results for more information.'

+                 details = 'Automated install failed or prompted for input. ' \

+                           'See the screenshot in the task results for more information'

              raise koji.ApplianceError('Image status is %s: %s' %

-                 (status, details))

+                                       (status, details))

  

      def _mergeFactoryParams(self, img_opts, fixed_params):

          """
@@ -3968,7 +4069,7 @@ 

          self.logger.debug('templates: %s' % template)

          self.logger.debug('pre-merge params: %s' % params)

          # We enforce various things related to the ks file - do not allow override

-         self._mergeFactoryParams(params, [ 'install_script' ])

+         self._mergeFactoryParams(params, ['install_script'])

          self.logger.debug('post-merge params: %s' % params)

          base = self.bd.builder_for_base_image(template, parameters=params)

          if wait:
@@ -3989,17 +4090,16 @@ 

          rawimg = os.path.join(self.workdir, self.imgname + '.raw')

          cmd = ['/bin/cp', self.base_img.base_image.data, rawimg]

          conlog = os.path.join(self.workdir,

-             'xz-cp-%s-%s.log' % (format, self.arch))

+                               'xz-cp-%s-%s.log' % (format, self.arch))

          log_output(self.session, cmd[0], cmd, conlog, self.getUploadDir(),

-             logerror=1)

+                    logerror=1)

          cmd = ['/usr/bin/xz', '-z9T2', rawimg]

          conlog = os.path.join(self.workdir,

-             'xz-%s-%s.log' % (format, self.arch))

+                               'xz-%s-%s.log' % (format, self.arch))

          log_output(self.session, cmd[0], cmd, conlog, self.getUploadDir(),

-             logerror=1)

+                    logerror=1)

          return {'image': newimg}

  

- 

      def _buildTarGZ(self, format):

          """

          Use tar and gzip to compress a raw disk image.
@@ -4023,14 +4123,13 @@ 

          cmd = ['/bin/tar', '-Sczvf', newimg, 'disk.raw']

          conlog = os.path.join(self.workdir, 'tar-gz-%s.log' % self.arch)

          log_output(self.session, cmd[0], cmd, conlog, self.getUploadDir(),

-             logerror=1, cwd=imgdir)

+                    logerror=1, cwd=imgdir)

  

          # now that we've made the tarball, we don't need this hardlink

          os.unlink(rawimg)

  

          return {'image': newimg}

  

- 

      def _buildSquashfs(self, format):

          """

          Use squashfs to wrap a raw disk image into liveimg compatible image.
@@ -4051,15 +4150,15 @@ 

                 'if=%s' % self.base_img.base_image.data,

                 'of=%s' % fsimg]

          conlog = os.path.join(self.workdir,

-             'squashfs-dd-%s-%s.log' % (format, self.arch))

+                               'squashfs-dd-%s-%s.log' % (format, self.arch))

          log_output(self.session, cmd[0], cmd, conlog, self.getUploadDir(),

-             logerror=1)

+                    logerror=1)

          cmd = ['/usr/sbin/mksquashfs', os.path.join(self.workdir, 'squashfs-root'),

                 newimg, '-comp', 'xz', '-noappend']

          conlog = os.path.join(self.workdir,

-             'squashfs-mksquashfs-%s-%s.log' % (format, self.arch))

+                               'squashfs-mksquashfs-%s-%s.log' % (format, self.arch))

          log_output(self.session, cmd[0], cmd, conlog, self.getUploadDir(),

-             logerror=1)

+                    logerror=1)

          return {'image': newimg}

  

      def _buildOVA(self, format):
@@ -4084,26 +4183,27 @@ 

          if format == 'vagrant-virtualbox':

              format = 'vsphere-ova'

              img_opts['vsphere_ova_format'] = 'vagrant-virtualbox'

-             fixed_params = [ 'vsphere_ova_format' ]

+             fixed_params = ['vsphere_ova_format']

          if format == 'vagrant-libvirt':

              format = 'rhevm-ova'

              img_opts['rhevm_ova_format'] = 'vagrant-libvirt'

-             fixed_params = [ 'rhevm_ova_format' ]

+             fixed_params = ['rhevm_ova_format']

          if format == 'vagrant-vmware-fusion':

              format = 'vsphere-ova'

              img_opts['vsphere_ova_format'] = 'vagrant-vmware-fusion'

-             # The initial disk image transform for VMWare Fusion/Workstation requires a "standard" VMDK

-             # not the stream oriented format used for VirtualBox or regular VMWare OVAs

+             # The initial disk image transform for VMWare Fusion/Workstation requires a "standard"

+             # VMDK, not the stream oriented format used for VirtualBox or regular VMWare OVAs

              img_opts['vsphere_vmdk_format'] = 'standard'

-             fixed_params = [ 'vsphere_ova_format', 'vsphere_vmdk_format' ]

+             fixed_params = ['vsphere_ova_format', 'vsphere_vmdk_format']

          if format == 'vagrant-hyperv':

              format = 'hyperv-ova'

              img_opts['hyperv_ova_format'] = 'hyperv-vagrant'

-             fixed_params = [ 'hyperv_ova_format' ]

+             fixed_params = ['hyperv_ova_format']

          targ = self._do_target_image(self.base_img.base_image.identifier,

-             format.replace('-ova', ''), img_opts=img_opts, fixed_params=fixed_params)

+                                      format.replace('-ova', ''), img_opts=img_opts,

+                                      fixed_params=fixed_params)

          targ2 = self._do_target_image(targ.target_image.identifier, 'OVA',

-             img_opts=img_opts, fixed_params=fixed_params)

+                                       img_opts=img_opts, fixed_params=fixed_params)

          return {'image': targ2.target_image.data}

  

      def _buildDocker(self, format):
@@ -4118,7 +4218,7 @@ 

          """

          img_opts = {'compress': 'xz'}

          targ = self._do_target_image(self.base_img.base_image.identifier,

-             'docker', img_opts=img_opts)

+                                      'docker', img_opts=img_opts)

          return {'image': targ.target_image.data}

  

      def _do_target_image(self, base_id, image_type, img_opts=None, fixed_params=None):
@@ -4150,7 +4250,9 @@ 

          self._mergeFactoryParams(img_opts, fixed_params)

          self.logger.debug('img_opts_post_merge: %s' % img_opts)

          target = self.bd.builder_for_target_image(image_type,

-             image_id=base_id, template=None, parameters=img_opts)

+                                                   image_id=base_id,

+                                                   template=None,

+                                                   parameters=img_opts)

          target.target_thread.join()

          self._checkImageState(target)

          return target
@@ -4171,9 +4273,9 @@ 

              ofmt = 'vhd'

          newimg = os.path.join(self.workdir, self.imgname + '.%s' % ofmt)

          cmd = ['/usr/bin/qemu-img', 'convert', '-f', 'raw', '-O',

-             format, self.base_img.base_image.data, newimg]

+                format, self.base_img.base_image.data, newimg]

          if format == 'qcow':

-             cmd.insert(2, '-c') # enable compression for qcow images

+             cmd.insert(2, '-c')  # enable compression for qcow images

          if format == 'qcow2':

              # qemu-img changed its default behavior at some point to generate a

              # v3 image when the requested output format is qcow2.  We don't
@@ -4185,17 +4287,20 @@ 

              # Factory does not use a full path - for consistency, force that here

              cmd[0] = '/usr/bin/qemu-img'

          conlog = os.path.join(self.workdir,

-             'qemu-img-%s-%s.log' % (format, self.arch))

+                               'qemu-img-%s-%s.log' % (format, self.arch))

          log_output(self.session, cmd[0], cmd, conlog,

-             self.getUploadDir(), logerror=1)

+                    self.getUploadDir(), logerror=1)

          return {'image': newimg}

  

-     def handler(self, name, version, release, arch, target_info, build_tag, repo_info, inst_tree, opts=None):

+     def handler(self, name, version, release, arch, target_info,

+                 build_tag, repo_info, inst_tree, opts=None):

          if not ozif_enabled:

-             self.logger.error("ImageFactory features require the following dependencies: pykickstart, imagefactory, oz and possibly python-hashlib")

+             self.logger.error(

+                 "ImageFactory features require the following dependencies: "

+                 "pykickstart, imagefactory, oz and possibly python-hashlib")

              raise koji.ApplianceError('ImageFactory functions not available')

  

-         if opts == None:

+         if opts is None:

              opts = {}

          self.arch = arch

          self.target_info = target_info
@@ -4207,8 +4312,8 @@ 

          kspath = self.fetchKickstart(build_tag=target_info['build_tag_name'])

          ks = self.prepareKickstart(kspath, inst_tree)

          kskoji = self.writeKickstart(ks,

-             os.path.join(self.workdir, 'koji-%s-%i-base.ks' %

-             (self.target_info['build_tag_name'], self.id)))

+                                      os.path.join(self.workdir, 'koji-%s-%i-base.ks' %

+                                                   (self.target_info['build_tag_name'], self.id)))

  

          # auto-generate a TDL file and config dict for ImageFactory

          self.imgname = '%s-%s-%s.%s' % (name, version, release, self.arch)
@@ -4251,21 +4356,21 @@ 

          }

          # record the RPMs that were installed

          if not opts.get('scratch'):

-             #fields = ('name', 'version', 'release', 'arch', 'epoch', 'size',

+             # fields = ('name', 'version', 'release', 'arch', 'epoch', 'size',

              #    'payloadhash', 'buildtime')

              icicle = xml.dom.minidom.parseString(images['raw']['icicle'])

              self.logger.debug('ICICLE: %s' % images['raw']['icicle'])

              for p in icicle.getElementsByTagName('extra'):

                  bits = p.firstChild.nodeValue.split(',')

                  rpm = {

-                     'name':         bits[0],

-                     'version':      bits[1],

-                     'release':      bits[2],

-                     'arch':         bits[3],

+                     'name': bits[0],

+                     'version': bits[1],

+                     'release': bits[2],

+                     'arch': bits[3],

                      # epoch is a special case, as usual

-                     'size':         int(bits[5]),

-                     'payloadhash':  bits[6],

-                     'buildtime':    int(bits[7])

+                     'size': int(bits[5]),

+                     'payloadhash': bits[6],

+                     'buildtime': int(bits[7])

                  }

                  if rpm['name'] in ['buildsys-build', 'gpg-pubkey']:

                      continue
@@ -4276,7 +4381,7 @@ 

                  imgdata['rpmlist'].append(rpm)

              # TODO: hack to make this work for now, need to refactor

              br = BuildRoot(self.session, self.options, build_tag, self.arch,

-                 self.id, repo_id=self.repo_info['id'])

+                            self.id, repo_id=self.repo_info['id'])

              br.markExternalRPMs(imgdata['rpmlist'])

  

          # upload the results
@@ -4305,6 +4410,7 @@ 

          # no need to delete anything since self.workdir will get scrubbed

          return imgdata

  

+ 

  class BuildIndirectionImageTask(OzImageTask):

      Methods = ['indirectionimage']

  
@@ -4317,16 +4423,19 @@ 

      def initImageBuild(self, name, version, release, target_info, opts):

          """create a build object for this image build"""

          pkg_cfg = self.session.getPackageConfig(target_info['dest_tag_name'],

-              name)

+                                                 name)

          self.logger.debug("%r" % pkg_cfg)

          if not opts.get('skip_tag') and not opts.get('scratch'):

              # Make sure package is on the list for this tag

              if pkg_cfg is None:

-                 raise koji.BuildError("package (image) %s not in list for tag %s" % (name, target_info['dest_tag_name']))

+                 raise koji.BuildError("package (image) %s not in list for tag %s" %

+                                       (name, target_info['dest_tag_name']))

              elif pkg_cfg['blocked']:

-                 raise koji.BuildError("package (image)  %s is blocked for tag %s" % (name, target_info['dest_tag_name']))

+                 raise koji.BuildError("package (image)  %s is blocked for tag %s" %

+                                       (name, target_info['dest_tag_name']))

          return self.session.host.initImageBuild(self.id,

-             dict(name=name, version=version, release=release, epoch=0))

+                                                 dict(name=name, version=version, release=release,

+                                                      epoch=0))

  

      def getRelease(self, name, ver):

          """return the next available release number for an N-V"""
@@ -4354,10 +4463,11 @@ 

          if fileurl:

              scm = SCM(fileurl)

              scm.assert_allowed(self.options.allowed_scms)

-             self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(), build_tag=build_tag, scratch=self.opts.get('scratch'))

+             self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(),

+                                build_tag=build_tag, scratch=self.opts.get('scratch'))

              logfile = os.path.join(self.workdir, 'checkout.log')

              scmsrcdir = scm.checkout(self.workdir, self.session,

-                 self.getUploadDir(), logfile)

+                                      self.getUploadDir(), logfile)

              self.run_callbacks("postSCMCheckout",

                                 scminfo=scm.get_info(),

                                 build_tag=build_tag,
@@ -4365,15 +4475,15 @@ 

                                 srcdir=scmsrcdir)

              final_path = os.path.join(scmsrcdir, os.path.basename(filepath))

          else:

-             tops = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])

+             tops = dict([(k, getattr(self.options, k)) for k in ('topurl', 'topdir')])

              tops['tempdir'] = self.workdir

              final_path = os.path.join(self.workdir, os.path.basename(filepath))

              with koji.openRemoteFile(filepath, **tops) as remote_fileobj:

                  with open(final_path, 'w') as final_fileobj:

                      shutil.copyfileobj(remote_fileobj, final_fileobj)

          self.logger.debug('uploading retrieved file from here: %s' % final_path)

-         self.uploadFile(final_path) # upload the original ks file

-         return final_path # absolute path to the ks file

+         self.uploadFile(final_path)  # upload the original ks file

+         return final_path  # absolute path to the ks file

  

      def handler(self, opts):

          """Governing task for building an image with two other images using Factory Indirection"""
@@ -4385,11 +4495,13 @@ 

              taskinfo = self.session.getTaskInfo(task_id)

              taskstate = koji.TASK_STATES[taskinfo['state']].lower()

              if taskstate != 'closed':

-                 raise koji.BuildError("Input task (%d) must be in closed state - current state is (%s)" %

+                 raise koji.BuildError("Input task (%d) must be in closed state"

+                                       " - current state is (%s)" %

                                        (task_id, taskstate))

              taskmethod = taskinfo['method']

              if taskmethod != "createImage":

-                 raise koji.BuildError("Input task method must be 'createImage' - actual method (%s)" %

+                 raise koji.BuildError("Input task method must be 'createImage'"

+                                       " - actual method (%s)" %

                                        (taskmethod))

              result = self.session.getTaskResult(task_id)

  
@@ -4402,12 +4514,14 @@ 

              task_diskimage = _match_name(result['files'], ".*qcow2$")

              task_tdl = _match_name(result['files'], "tdl.*xml")

  

-             task_dir = os.path.join(koji.pathinfo.work(),koji.pathinfo.taskrelpath(task_id))

+             task_dir = os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(task_id))

              diskimage_full = os.path.join(task_dir, task_diskimage)

              tdl_full = os.path.join(task_dir, task_tdl)

  

              if not (os.path.isfile(diskimage_full) and os.path.isfile(tdl_full)):

-                 raise koji.BuildError("Missing TDL or qcow2 image for task (%d) - possible expired scratch build" % (task_id))

+                 raise koji.BuildError(

+                     "Missing TDL or qcow2 image for task (%d) - possible expired scratch build" %

+                     (task_id))

  

              # The sequence to recreate a valid persistent image is as follows

              # Create a new BaseImage object
@@ -4428,7 +4542,10 @@ 

              return factory_base_image

  

          def _nvr_to_image(nvr, arch):

-             """ Take a build ID or NVR plus arch and turn it into an Image Factory Base Image object """

+             """

+             Take a build ID or NVR plus arch and turn it into

+             an Image Factory Base Image object

+             """

              pim = PersistentImageManager.default_manager()

              build = self.session.getBuild(nvr)

              if not build:
@@ -4439,7 +4556,7 @@ 

                  raise koji.BuildError("Could not retrieve archives for build (%s) from NVR (%s)" %

                                        (build['id'], nvr))

  

-             buildfiles = [ x['filename'] for x in buildarchives ]

+             buildfiles = [x['filename'] for x in buildarchives]

              builddir = koji.pathinfo.imagebuild(build)

  

              def _match_name(inlist, namere):
@@ -4447,14 +4564,15 @@ 

                      if re.search(namere, filename):

                          return filename

  

-             build_diskimage = _match_name(buildfiles, ".*%s\.qcow2$" % (arch))

-             build_tdl = _match_name(buildfiles, "tdl.%s\.xml" % (arch))

+             build_diskimage = _match_name(buildfiles, r".*%s\.qcow2$" % (arch))

+             build_tdl = _match_name(buildfiles, r"tdl.%s\.xml" % (arch))

  

              diskimage_full = os.path.join(builddir, build_diskimage)

              tdl_full = os.path.join(builddir, build_tdl)

  

              if not (os.path.isfile(diskimage_full) and os.path.isfile(tdl_full)):

-                 raise koji.BuildError("Missing TDL (%s) or qcow2 (%s) image for image (%s) - this should never happen" %

+                 raise koji.BuildError("Missing TDL (%s) or qcow2 (%s) image for image (%s)"

+                                       " - this should never happen" %

                                        (build_tdl, build_diskimage, nvr))

  

              # The sequence to recreate a valid persistent image is as follows
@@ -4475,7 +4593,7 @@ 

              # We can now reference this object directly or via its UUID in persistent storage

              return factory_base_image

  

-         if opts == None:

+         if opts is None:

              opts = {}

          self.opts = opts

  
@@ -4505,7 +4623,7 @@ 

              release = self.getRelease(name, version)

          if '-' in version:

              raise koji.ApplianceError('The Version may not have a hyphen')

-         if '-' in  release:

+         if '-' in release:

              raise koji.ApplianceError('The Release may not have a hyphen')

  

          indirection_template = self.fetchHubOrSCM(opts.get('indirection_template'),
@@ -4532,21 +4650,20 @@ 

          bld_info = None

          if not opts['scratch']:

              bld_info = self.initImageBuild(name, version, release,

-                 target_info, opts)

+                                            target_info, opts)

  

          try:

              return self._do_indirection(opts, base_factory_image, utility_factory_image,

-                                  indirection_template, tlog, ozlog, fhandler,

-                                  bld_info, target_info, bd)

-         except:

+                                         indirection_template, tlog, ozlog, fhandler,

+                                         bld_info, target_info, bd)

+         except Exception:

              if not opts.get('scratch'):

-                 #scratch builds do not get imported

+                 # scratch builds do not get imported

                  if bld_info:

                      self.session.host.failBuild(self.id, bld_info['id'])

              # reraise the exception

              raise

  

- 

      def _do_indirection(self, opts, base_factory_image, utility_factory_image,

                          indirection_template, tlog, ozlog, fhandler, bld_info,

                          target_info, bd):
@@ -4566,8 +4683,8 @@ 

              results_loc = "/" + results_loc

          params = {'utility_image': str(utility_factory_image.identifier),

                    'utility_customizations': utility_customizations,

-                   'results_location': results_loc }

-         random.seed() # necessary to ensure a unique mac address

+                   'results_location': results_loc}

+         random.seed()  # necessary to ensure a unique mac address

          try:

              try:

                  # Embedded deep debug option - if template is just the string MOCK
@@ -4580,11 +4697,11 @@ 

                      target.target_image = target_image

                      with open(target_image.data, "w") as f:

                          f.write("Mock build from task ID: %s" % self.id)

-                     target_image.status='COMPLETE'

+                     target_image.status = 'COMPLETE'

                  else:

                      target = bd.builder_for_target_image('indirection',

-                         image_id=base_factory_image.identifier,

-                         parameters=params)

+                                                          image_id=base_factory_image.identifier,

+                                                          parameters=params)

                      target.target_thread.join()

              except Exception as e:

                  self.logger.debug("Exception encountered during target build")
@@ -4601,42 +4718,45 @@ 

                  tlog.removeHandler(fhandler)

                  self.uploadFile(ozlog)

              raise koji.ApplianceError('Image status is %s: %s' %

-                 (target.target_image.status, target.target_image.status_detail))

+                                       (target.target_image.status,

+                                        target.target_image.status_detail))

  

          self.uploadFile(target.target_image.data, remoteName=os.path.basename(results_loc))

  

-         myresults = { }

+         myresults = {}

          myresults['task_id'] = self.id

-         myresults['files'] = [ os.path.basename(results_loc) ]

-         myresults['logs'] = [ os.path.basename(ozlog) ]

+         myresults['files'] = [os.path.basename(results_loc)]

+         myresults['logs'] = [os.path.basename(ozlog)]

          myresults['arch'] = opts['arch']

          # TODO: This should instead track the two input images: base and utility

-         myresults['rpmlist'] = [ ]

+         myresults['rpmlist'] = []

  

          # This is compatible with some helper methods originally implemented for the base

          # image build.  In the original usage, the dict contains an entry per build arch

          # TODO: If adding multiarch support, keep this in mind

-         results = { str(self.id): myresults }

+         results = {str(self.id): myresults}

          self.logger.debug('Image Results for hub: %s' % results)

  

          if opts['scratch']:

              self.session.host.moveImageBuildToScratch(self.id, results)

          else:

              self.session.host.completeImageBuild(self.id, bld_info['id'],

-                 results)

+                                                  results)

  

          # tag it

          if not opts.get('scratch') and not opts.get('skip_tag'):

              tag_task_id = self.session.host.subtask(method='tagBuild',

-                 arglist=[target_info['dest_tag'], bld_info['id'], False, None, True],

-                 label='tag', parent=self.id, arch='noarch')

+                                                     arglist=[target_info['dest_tag'],

+                                                              bld_info['id'], False, None, True],

+                                                     label='tag', parent=self.id, arch='noarch')

              self.wait(tag_task_id)

  

          # report results

          report = ''

          if opts.get('scratch'):

              respath = ', '.join(

-                 [os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(tid)) for tid in [self.id] ])

+                 [os.path.join(koji.pathinfo.work(),

+                               koji.pathinfo.taskrelpath(tid)) for tid in [self.id]])

              report += 'Scratch '

          else:

              respath = koji.pathinfo.imagebuild(bld_info)
@@ -4665,8 +4785,10 @@ 

          build_tag = self.session.getTag(build_tag, strict=True, event=event_id)

  

          rootopts = {'install_group': 'srpm-build', 'repo_id': repo_id}

-         br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id))

-         broot = BuildRoot(self.session, self.options, build_tag['id'], br_arch, self.id, **rootopts)

+         br_arch = self.find_arch('noarch', self.session.host.getHost(

+         ), self.session.getBuildConfig(build_tag['id'], event=event_id))

+         broot = BuildRoot(self.session, self.options,

+                           build_tag['id'], br_arch, self.id, **rootopts)

          broot.workdir = self.workdir

  

          self.logger.debug("Initializing buildroot")
@@ -4704,7 +4826,8 @@ 

          release = koji.get_header_field(h, 'release')

          srpm_name = "%(name)s-%(version)s-%(release)s.src.rpm" % locals()

          if srpm_name != os.path.basename(srpm):

-             raise koji.BuildError('srpm name mismatch: %s != %s' % (srpm_name, os.path.basename(srpm)))

+             raise koji.BuildError('srpm name mismatch: %s != %s' %

+                                   (srpm_name, os.path.basename(srpm)))

  

          # upload srpm and return

          self.uploadFile(srpm)
@@ -4737,7 +4860,7 @@ 

              if re.match("%s:" % tag, spec, re.M):

                  raise koji.BuildError("%s is not allowed to be set in spec file" % tag)

          for tag in ("packager", "distribution", "vendor"):

-             if re.match("%%define\s+%s\s+" % tag, spec, re.M):

+             if re.match(r"%%define\s+%s\s+" % tag, spec, re.M):

                  raise koji.BuildError("%s is not allowed to be defined in spec file" % tag)

  

      def patch_scm_source(self, sourcedir, logfile, opts):
@@ -4768,12 +4891,15 @@ 

          rootopts = {'install_group': 'srpm-build',

                      'setup_dns': True,

                      'repo_id': repo_id}

-         if self.options.scm_credentials_dir is not None and os.path.isdir(self.options.scm_credentials_dir):

-             rootopts['bind_opts'] = {'dirs' : {self.options.scm_credentials_dir : '/credentials',}}

-             ## Force internal_dev_setup back to true because bind_opts is used to turn it off

+         if self.options.scm_credentials_dir is not None and os.path.isdir(

+                 self.options.scm_credentials_dir):

+             rootopts['bind_opts'] = {'dirs': {self.options.scm_credentials_dir: '/credentials', }}

+             # Force internal_dev_setup back to true because bind_opts is used to turn it off

              rootopts['internal_dev_setup'] = True

-         br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id))

-         broot = BuildRoot(self.session, self.options, build_tag['id'], br_arch, self.id, **rootopts)

+         br_arch = self.find_arch('noarch', self.session.host.getHost(

+         ), self.session.getBuildConfig(build_tag['id'], event=event_id))

+         broot = BuildRoot(self.session, self.options,

+                           build_tag['id'], br_arch, self.id, **rootopts)

          broot.workdir = self.workdir

  

          self.logger.debug("Initializing buildroot")
@@ -4787,7 +4913,8 @@ 

          logfile = self.workdir + '/checkout.log'

          uploadpath = self.getUploadDir()

  

-         self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(), build_tag=build_tag, scratch=opts.get('scratch'))

+         self.run_callbacks('preSCMCheckout', scminfo=scm.get_info(),

+                            build_tag=build_tag, scratch=opts.get('scratch'))

          # Check out spec file, etc. from SCM

          sourcedir = scm.checkout(scmdir, self.session, uploadpath, logfile)

          self.run_callbacks("postSCMCheckout",
@@ -4820,7 +4947,7 @@ 

          # Run spec file sanity checks.  Any failures will throw a BuildError

          self.spec_sanity_checks(spec_file)

  

-         #build srpm

+         # build srpm

          self.logger.debug("Running srpm build")

          broot.build_srpm(spec_file, sourcedir, scm.source_cmd)

  
@@ -4839,9 +4966,10 @@ 

          release = koji.get_header_field(h, 'release')

          srpm_name = "%(name)s-%(version)s-%(release)s.src.rpm" % locals()

          if srpm_name != os.path.basename(srpm):

-             raise koji.BuildError('srpm name mismatch: %s != %s' % (srpm_name, os.path.basename(srpm)))

+             raise koji.BuildError('srpm name mismatch: %s != %s' %

+                                   (srpm_name, os.path.basename(srpm)))

  

-         #upload srpm and return

+         # upload srpm and return

          self.uploadFile(srpm)

  

          brootid = broot.id
@@ -4852,10 +4980,11 @@ 

  

          return {'srpm': "%s/%s" % (uploadpath, srpm_name),

                  'logs': ["%s/%s" % (uploadpath, os.path.basename(f))

-                                         for f in log_files],

+                          for f in log_files],

                  'brootid': brootid,

                  'source': source,

-                }

+                 }

+ 

  

  class TagNotificationTask(BaseTaskHandler):

      Methods = ['tagNotification']
@@ -4863,7 +4992,7 @@ 

      _taskWeight = 0.1

  

      message_templ = \

- """From: %(from_addr)s\r

+         """From: %(from_addr)s\r

  Subject: %(nvr)s %(result)s %(operation)s by %(user_name)s\r

  To: %(to_addrs)s\r

  X-Koji-Package: %(pkg_name)s\r
@@ -4881,13 +5010,16 @@ 

  %(failure_info)s\r

  """

  

-     def handler(self, recipients, is_successful, tag_info, from_info, build_info, user_info, ignore_success=None, failure_msg=''):

+     def handler(self, recipients, is_successful, tag_info, from_info,

+                 build_info, user_info, ignore_success=None, failure_msg=''):

          if len(recipients) == 0:

              self.logger.debug('task %i: no recipients, not sending notifications', self.id)

              return

  

          if ignore_success and is_successful:

-             self.logger.debug('task %i: tag operation successful and ignore success is true, not sending notifications', self.id)

+             self.logger.debug(

+                 'task %i: tag operation successful and ignore success is true, '

+                 'not sending notifications', self.id)

              return

  

          build = self.session.getBuild(build_info)
@@ -4941,22 +5073,24 @@ 

          server = smtplib.SMTP(self.options.smtphost)

          if self.options.smtp_user is not None and self.options.smtp_pass is not None:

              server.login(self.options.smtp_user, self.options.smtp_pass)

-         #server.set_debuglevel(True)

+         # server.set_debuglevel(True)

  

          server.sendmail(from_addr, recipients, message)

          server.quit()

  

          return 'sent notification of tag operation %i to: %s' % (self.id, to_addrs)

  

+ 

  class BuildNotificationTask(BaseTaskHandler):

      Methods = ['buildNotification']

  

      _taskWeight = 0.1

  

      # XXX externalize these templates somewhere

-     subject_templ = """Package: %(build_nvr)s Tag: %(dest_tag)s Status: %(status)s Built by: %(build_owner)s"""

+     subject_templ = "Package: %(build_nvr)s Tag: %(dest_tag)s Status: %(status)s " \

+                     "Built by: %(build_owner)s"

      message_templ = \

- """From: %(from_addr)s\r

+         """From: %(from_addr)s\r

  Subject: %(subject)s\r

  To: %(to_addrs)s\r

  X-Koji-Tag: %(dest_tag)s\r
@@ -4995,7 +5129,7 @@ 

          result = None

          try:

              result = self.session.getTaskResult(task_id)

-         except:

+         except Exception:

              excClass, result = sys.exc_info()[:2]

              if hasattr(result, 'faultString'):

                  result = result.faultString
@@ -5055,7 +5189,10 @@ 

              return

  

          build_pkg_name = build['package_name']

-         build_pkg_evr = '%s%s-%s' % ((build['epoch'] and str(build['epoch']) + ':' or ''), build['version'], build['release'])

+         build_pkg_evr = '%s%s-%s' % \

+                         ((build['epoch'] and str(build['epoch']) + ':' or ''),

+                          build['version'],

+                          build['release'])

          build_nvr = koji.buildLabel(build)

          build_id = build['id']

          build_owner = build['owner_name']
@@ -5081,23 +5218,24 @@ 

                  cancel_info = "\r\nCanceled by: %s" % canceler['name']

          elif build['state'] == koji.BUILD_STATES['FAILED']:

              failure_data = task_data[task_id]['result']

-             failed_hosts = ['%s (%s)' % (task['host'], task['arch']) for task in task_data.values() if task['host'] and task['state'] == 'failed']

+             failed_hosts = ['%s (%s)' % (task['host'], task['arch'])

+                             for task in task_data.values()

+                             if task['host'] and task['state'] == 'failed']

              failure_info = "\r\n%s (%d) failed on %s:\r\n  %s" % (build_nvr, build_id,

                                                                    ', '.join(failed_hosts),

                                                                    failure_data)

  

          failure = failure_info or cancel_info or ''

  

-         tasks = {'failed' : [task for task in task_data.values() if task['state'] == 'failed'],

-                  'canceled' : [task for task in task_data.values() if task['state'] == 'canceled'],

-                  'closed' : [task for task in task_data.values() if task['state'] == 'closed']}

+         tasks = {'failed': [task for task in task_data.values() if task['state'] == 'failed'],

+                  'canceled': [task for task in task_data.values() if task['state'] == 'canceled'],

+                  'closed': [task for task in task_data.values() if task['state'] == 'closed']}

  

          srpms = []

          for taskinfo in task_data.values():

              for srpmfile in taskinfo['srpms']:

                  srpms.append(srpmfile)

-         srpms = self.uniq(srpms)

-         srpms.sort()

+         srpms = sorted(self.uniq(srpms))

  

          if srpms:

              output = "SRPMS:\r\n"
@@ -5125,9 +5263,11 @@ 

                          output += "logs:\r\n"

                          for (file_, volume) in task['logs']:

                              if tasks[task_state] != 'closed':

-                                 output += " %s/getfile?taskID=%s&name=%s&volume=%s\r\n" % (weburl, task['id'], file_, volume)

+                                 output += " %s/getfile?taskID=%s&name=%s&volume=%s\r\n" % (

+                                     weburl, task['id'], file_, volume)

                              else:

-                                 output += " %s\r\n" % '/'.join([buildurl, 'data', 'logs', task['build_arch'], file_])

+                                 output += " %s\r\n" % '/'.join([buildurl, 'data', 'logs',

+                                                                 task['build_arch'], file_])

                      if task['rpms']:

                          output += "rpms:\r\n"

                          for file_ in task['rpms']:
@@ -5135,11 +5275,13 @@ 

                      if task['misc']:

                          output += "misc:\r\n"

                          for (file_, volume) in task['misc']:

-                             output += " %s/getfile?taskID=%s&name=%s&volume=%s\r\n" % (weburl, task['id'], file_, volume)

+                             output += " %s/getfile?taskID=%s&name=%s&volume=%s\r\n" % (

+                                 weburl, task['id'], file_, volume)

                      output += "\r\n"

                  output += "\r\n"

  

-         changelog = koji.util.formatChangelog(self.session.getChangelogEntries(build_id, queryOpts={'limit': 3})).replace("\n","\r\n")

+         changelog = koji.util.formatChangelog(self.session.getChangelogEntries(

+             build_id, queryOpts={'limit': 3})).replace("\n", "\r\n")

          if changelog:

              changelog = "Changelog:\r\n%s" % changelog

  
@@ -5164,9 +5306,8 @@ 

      def uniq(self, items):

          """Remove duplicates from the list of items, and sort the list."""

          m = dict(zip(items, [1] * len(items)))

-         l = to_list(m.keys())

-         l.sort()

-         return l

+         s = sorted(to_list(m.keys()))

+         return s

  

  

  class NewRepoTask(BaseTaskHandler):
@@ -5192,9 +5333,9 @@ 

          for fn in os.listdir(path):

              if fn != 'groups' and os.path.isfile("%s/%s/pkglist" % (path, fn)):

                  arches.append(fn)

-         #see if we can find a previous repo to update from

-         #only shadowbuild tags should start with SHADOWBUILD, their repos are auto

-         #expired.  so lets get the most recent expired tag for newRepo shadowbuild tasks.

+         # see if we can find a previous repo to update from

+         # only shadowbuild tags should start with SHADOWBUILD, their repos are auto

+         # expired.  so lets get the most recent expired tag for newRepo shadowbuild tasks.

          if tinfo['name'].startswith('SHADOWBUILD'):

              oldrepo_state = koji.REPO_EXPIRED

          else:
@@ -5226,7 +5367,7 @@ 

              results = self.wait(to_list(subtasks.values()), all=True, failany=True)

              for (arch, task_id) in six.iteritems(subtasks):

                  data[arch] = results[task_id]

-                 self.logger.debug("DEBUG: %r : %r " % (arch,data[arch],))

+                 self.logger.debug("DEBUG: %r : %r " % (arch, data[arch],))

  

          # finalize

          kwargs = {}
@@ -5242,7 +5383,7 @@ 

      _taskWeight = 1.5

  

      def handler(self, repo_id, arch, oldrepo):

-         #arch is the arch of the repo, not the task

+         # arch is the arch of the repo, not the task

          rinfo = self.session.repoInfo(repo_id, strict=True)

          if rinfo['state'] != koji.REPO_INIT:

              raise koji.GenericError("Repo %(id)s not in INIT state (got %(state)s)" % rinfo)
@@ -5253,7 +5394,7 @@ 

          if not os.path.isdir(self.repodir):

              raise koji.GenericError("Repo directory missing: %s" % self.repodir)

          groupdata = os.path.join(toprepodir, 'groups', 'comps.xml')

-         #set up our output dir

+         # set up our output dir

          self.outdir = '%s/repo' % self.workdir

          self.datadir = '%s/repodata' % self.outdir

          pkglist = os.path.join(self.repodir, 'pkglist')
@@ -5286,7 +5427,7 @@ 

              cmd.extend(['-i', pkglist])

          if os.path.isfile(groupdata):

              cmd.extend(['-g', groupdata])

-         #attempt to recycle repodata from last repo

+         # attempt to recycle repodata from last repo

          if pkglist and oldrepo and self.options.createrepo_update:

              # old repo could be from inherited tag, so path needs to be

              # composed from that tag, not rinfo['tag_name']
@@ -5316,21 +5457,21 @@ 

          logfile = '%s/createrepo.log' % self.workdir

          status = log_output(self.session, cmd[0], cmd, logfile, self.getUploadDir(), logerror=True)

          if not isSuccess(status):

-             raise koji.GenericError('failed to create repo: %s' \

-                     % parseStatus(status, ' '.join(cmd)))

+             raise koji.GenericError('failed to create repo: %s'

+                                     % parseStatus(status, ' '.join(cmd)))

  

      def merge_repos(self, external_repos, arch, groupdata):

          # group repos by merge type

          repos_by_mode = {}

          for repo in external_repos:

              repos_by_mode.setdefault(

-                     repo.get('merge_mode', 'koji'), []).append(repo)

+                 repo.get('merge_mode', 'koji'), []).append(repo)

  

          # figure out merge mode

          if len(repos_by_mode) > 1:

              # TODO: eventually support mixing merge modes

              raise koji.GenericError('Found multiple merge modes for external '

-                     'repos: %s\n' % repos_by_mode.keys())

+                                     'repos: %s\n' % repos_by_mode.keys())

          merge_mode = to_list(repos_by_mode.keys())[0]

  

          # move current repo to the premerge location
@@ -5375,8 +5516,8 @@ 

          logfile = '%s/mergerepos.log' % self.workdir

          status = log_output(self.session, cmd[0], cmd, logfile, self.getUploadDir(), logerror=True)

          if not isSuccess(status):

-             raise koji.GenericError('failed to merge repos: %s' \

-                 % parseStatus(status, ' '.join(cmd)))

+             raise koji.GenericError('failed to merge repos: %s'

+                                     % parseStatus(status, ' '.join(cmd)))

  

  

  class NewDistRepoTask(BaseTaskHandler):
@@ -5438,28 +5579,29 @@ 

  

      archmap = {'s390x': 's390', 'ppc64': 'ppc', 'x86_64': 'i686'}

      compat = {"i386": ("athlon", "i686", "i586", "i486", "i386", "noarch"),

-           "x86_64": ("amd64", "ia32e", "x86_64", "noarch"),

-           "ia64": ("ia64", "noarch"),

-           "ppc": ("ppc", "noarch"),

-           "ppc64": ("ppc64p7", "ppc64pseries", "ppc64iseries", "ppc64", "noarch"),

-           "ppc64le": ("ppc64le", "noarch"),

-           "s390": ("s390", "noarch"),

-           "s390x": ("s390x",  "noarch"),

-           "sparc": ("sparcv9v", "sparcv9", "sparcv8", "sparc", "noarch"),

-           "sparc64": ("sparc64v", "sparc64", "noarch"),

-           "alpha": ("alphaev6", "alphaev56", "alphaev5", "alpha", "noarch"),

-           "arm": ("arm", "armv4l", "armv4tl", "armv5tel", "armv5tejl", "armv6l", "armv7l", "noarch"),

-           "armhfp": ("armv7hl", "armv7hnl", "noarch"),

-           "aarch64": ("aarch64", "noarch"),

-           "riscv64": ("riscv64", "noarch"),

-           "src": ("src",)

-           }

+               "x86_64": ("amd64", "ia32e", "x86_64", "noarch"),

+               "ia64": ("ia64", "noarch"),

+               "ppc": ("ppc", "noarch"),

+               "ppc64": ("ppc64p7", "ppc64pseries", "ppc64iseries", "ppc64", "noarch"),

+               "ppc64le": ("ppc64le", "noarch"),

+               "s390": ("s390", "noarch"),

+               "s390x": ("s390x", "noarch"),

+               "sparc": ("sparcv9v", "sparcv9", "sparcv8", "sparc", "noarch"),

+               "sparc64": ("sparc64v", "sparc64", "noarch"),

+               "alpha": ("alphaev6", "alphaev56", "alphaev5", "alpha", "noarch"),

+               "arm": ("arm", "armv4l", "armv4tl", "armv5tel", "armv5tejl", "armv6l", "armv7l",

+                       "noarch"),

+               "armhfp": ("armv7hl", "armv7hnl", "noarch"),

+               "aarch64": ("aarch64", "noarch"),

+               "riscv64": ("riscv64", "noarch"),

+               "src": ("src",)

+               }

  

      biarch = {"ppc": "ppc64", "x86_64": "i386", "sparc":

-           "sparc64", "s390x": "s390", "ppc64": "ppc"}

+               "sparc64", "s390x": "s390", "ppc64": "ppc"}

  

      def handler(self, tag, repo_id, arch, keys, opts):

-         #arch is the arch of the repo, not the task

+         # arch is the arch of the repo, not the task

          self.rinfo = self.session.repoInfo(repo_id, strict=True)

          if self.rinfo['state'] != koji.REPO_INIT:

              raise koji.GenericError("Repo %(id)s not in INIT state (got %(state)s)" % self.rinfo)
@@ -5481,7 +5623,7 @@ 

                  oldrepo = self.session.repoInfo(repo_id, strict=True)

                  if not oldrepo['dist']:

                      raise koji.GenericError("Base repo for deltas must also "

-                             "be a dist repo")

+                                             "be a dist repo")

                      # regular repos don't actually have rpms, just pkglist

                  path = koji.pathinfo.distrepo(repo_id, oldrepo['tag_name'])

                  if not os.path.exists(path):
@@ -5512,18 +5654,18 @@ 

          if oldrepo:

              oldrepodata = os.path.join(oldrepodir, arch, 'repodata')

          self.do_createrepo(self.repodir, '%s/pkglist' % self.repodir,

-                     groupdata, oldpkgs=oldpkgs, oldrepodata=oldrepodata,

-                     zck=opts.get('zck'), zck_dict_dir=opts.get('zck_dict_dir'))

+                            groupdata, oldpkgs=oldpkgs, oldrepodata=oldrepodata,

+                            zck=opts.get('zck'), zck_dict_dir=opts.get('zck_dict_dir'))

          for subrepo in self.subrepos:

              if oldrepo:

                  oldrepodata = os.path.join(oldrepodir, arch, subrepo, 'repodata')

              self.do_createrepo(

-                     '%s/%s' % (self.repodir, subrepo),

-                     '%s/%s/pkglist' % (self.repodir, subrepo),

-                     groupdata, oldpkgs=oldpkgs, oldrepodata=oldrepodata,

-                     logname='createrepo_%s' % subrepo,

-                     zck=opts.get('zck'),

-                     zck_dict_dir=opts.get('zck_dict_dir'))

+                 '%s/%s' % (self.repodir, subrepo),

+                 '%s/%s/pkglist' % (self.repodir, subrepo),

+                 groupdata, oldpkgs=oldpkgs, oldrepodata=oldrepodata,

+                 logname='createrepo_%s' % subrepo,

+                 zck=opts.get('zck'),

+                 zck_dict_dir=opts.get('zck_dict_dir'))

          if len(self.kojipkgs) == 0:

              fn = os.path.join(self.repodir, "repodata", "EMPTY_REPO")

              with open(fn, 'w') as fp:
@@ -5573,7 +5715,7 @@ 

          self.session.uploadWrapper(fn, self.uploadpath)

  

      def do_createrepo(self, repodir, pkglist, groupdata, oldpkgs=None,

-             logname=None, oldrepodata=None, zck=False, zck_dict_dir=None):

+                       logname=None, oldrepodata=None, zck=False, zck_dict_dir=None):

          """Run createrepo

  

          This is derived from CreaterepoTask.create_local_repo, but adapted to
@@ -5626,9 +5768,8 @@ 

          logfile = '%s/%s.log' % (self.workdir, logname)

          status = log_output(self.session, cmd[0], cmd, logfile, self.getUploadDir(), logerror=True)

          if not isSuccess(status):

-             raise koji.GenericError('failed to create repo: %s' \

-                     % parseStatus(status, ' '.join(cmd)))

- 

+             raise koji.GenericError('failed to create repo: %s'

+                                     % parseStatus(status, ' '.join(cmd)))

  

      def do_multilib_dnf(self, arch, ml_arch, conf):

          repodir = koji.pathinfo.distrepo(self.rinfo['id'], self.rinfo['tag_name'])
@@ -5645,22 +5786,22 @@ 

          mlm = multilib.DevelMultilibMethod(ml_conf)

          fs_missing = set()

          for bnp in self.kojipkgs:

-                 rpminfo = self.kojipkgs[bnp]

-                 ppath = rpminfo['_pkgpath']

-                 dnfbase.fill_sack(load_system_repo=False, load_available_repos=False)

-                 po = dnfbase.sack.add_cmdline_package(ppath)

-                 if mlm.select(po):

-                     # we need a multilib package to be included

-                     ml_bnp = bnp.replace(arch, self.archmap[arch])

-                     ml_path = os.path.join(mldir, ml_bnp[0].lower(), ml_bnp)

-                     # ^ XXX - should actually generate this

-                     if ml_bnp not in ml_pkgs:

-                         # not in our multilib repo

-                         self.logger.error('%s (multilib) is not on the filesystem' % ml_path)

-                         fs_missing.add(ml_path)

-                         # we defer failure so can report all the missing deps

-                         continue

-                     ml_true.add(ml_path)

+             rpminfo = self.kojipkgs[bnp]

+             ppath = rpminfo['_pkgpath']

+             dnfbase.fill_sack(load_system_repo=False, load_available_repos=False)

+             po = dnfbase.sack.add_cmdline_package(ppath)

+             if mlm.select(po):

+                 # we need a multilib package to be included

+                 ml_bnp = bnp.replace(arch, self.archmap[arch])

+                 ml_path = os.path.join(mldir, ml_bnp[0].lower(), ml_bnp)

+                 # ^ XXX - should actually generate this

+                 if ml_bnp not in ml_pkgs:

+                     # not in our multilib repo

+                     self.logger.error('%s (multilib) is not on the filesystem' % ml_path)

+                     fs_missing.add(ml_path)

+                     # we defer failure so can report all the missing deps

+                     continue

+                 ml_true.add(ml_path)

  

          # step 2: set up architectures for dnf configuration

          self.logger.info("Resolving multilib for %s using method devel" % arch)
@@ -5697,9 +5838,9 @@ 

          with open(yconfig_path, 'w') as f:

              f.write(dnfconfig)

          self.session.uploadWrapper(yconfig_path, self.uploadpath,

-             os.path.basename(yconfig_path))

+                                    os.path.basename(yconfig_path))

          conf = dnf.conf.Conf()

-         conf.reposdir = [] # don't use system repos at all

+         conf.reposdir = []  # don't use system repos at all

          conf.read(yconfig_path)

          dnfbase = dnf.Base(conf)

          if hasattr(koji.arch, 'ArchStorage'):
@@ -5737,7 +5878,7 @@ 

                      outfile.write(ml_path + '\n')

              self.session.uploadWrapper(missing_log, self.uploadpath)

              raise koji.GenericError('multilib packages missing. '

-                     'See missing_multilib.log')

+                                     'See missing_multilib.log')

  

          # step 5: update kojipkgs

          for dep_path in ml_needed:
@@ -5756,7 +5897,6 @@ 

              rpminfo['_multilib'] = True

              self.kojipkgs[bnp] = rpminfo

  

- 

      def do_multilib_yum(self, arch, ml_arch, conf):

          repodir = koji.pathinfo.distrepo(self.rinfo['id'], self.rinfo['tag_name'])

          mldir = os.path.join(repodir, koji.canonArch(ml_arch))
@@ -5771,21 +5911,21 @@ 

          mlm = multilib.DevelMultilibMethod(ml_conf)

          fs_missing = set()

          for bnp in self.kojipkgs:

-                 rpminfo = self.kojipkgs[bnp]

-                 ppath = rpminfo['_pkgpath']

-                 po = yum.packages.YumLocalPackage(filename=ppath)

-                 if mlm.select(po):

-                     # we need a multilib package to be included

-                     ml_bnp = bnp.replace(arch, self.archmap[arch])

-                     ml_path = os.path.join(mldir, ml_bnp[0].lower(), ml_bnp)

-                     # ^ XXX - should actually generate this

-                     if ml_bnp not in ml_pkgs:

-                         # not in our multilib repo

-                         self.logger.error('%s (multilib) is not on the filesystem' % ml_path)

-                         fs_missing.add(ml_path)

-                         # we defer failure so can report all the missing deps

-                         continue

-                     ml_true.add(ml_path)

+             rpminfo = self.kojipkgs[bnp]

+             ppath = rpminfo['_pkgpath']

+             po = yum.packages.YumLocalPackage(filename=ppath)

+             if mlm.select(po):

+                 # we need a multilib package to be included

+                 ml_bnp = bnp.replace(arch, self.archmap[arch])

+                 ml_path = os.path.join(mldir, ml_bnp[0].lower(), ml_bnp)

+                 # ^ XXX - should actually generate this

+                 if ml_bnp not in ml_pkgs:

+                     # not in our multilib repo

+                     self.logger.error('%s (multilib) is not on the filesystem' % ml_path)

+                     fs_missing.add(ml_path)

+                     # we defer failure so can report all the missing deps

+                     continue

+                 ml_true.add(ml_path)

  

          # step 2: set up architectures for yum configuration

          self.logger.info("Resolving multilib for %s using method devel" % arch)
@@ -5829,7 +5969,7 @@ 

          with open(yconfig_path, 'w') as f:

              f.write(yconfig)

          self.session.uploadWrapper(yconfig_path, self.uploadpath,

-             os.path.basename(yconfig_path))

+                                    os.path.basename(yconfig_path))

          yumbase.doConfigSetup(fn=yconfig_path)

          yumbase.conf.cache = 0

          yumbase.doRepoSetup()
@@ -5871,7 +6011,7 @@ 

                      outfile.write('\n')

              self.session.uploadWrapper(missing_log, self.uploadpath)

              raise koji.GenericError('multilib packages missing. '

-                     'See missing_multilib.log')

+                                     'See missing_multilib.log')

  

          # step 5: update kojipkgs

          for dep_path in ml_needed:
@@ -5912,8 +6052,9 @@ 

          for a in self.compat[arch]:

              # note: self.compat includes noarch for non-src already

              rpm_iter, builds = self.session.listTaggedRPMS(tag_id,

-                 event=opts['event'], arch=a, latest=opts['latest'],

-                 inherit=opts['inherit'], rpmsigs=True)

+                                                            event=opts['event'], arch=a,

+                                                            latest=opts['latest'],

+                                                            inherit=opts['inherit'], rpmsigs=True)

              for build in builds:

                  builddirs[build['id']] = koji.pathinfo.build(build)

              rpms += list(rpm_iter)
@@ -5950,11 +6091,11 @@ 

                      continue

                  # use the primary copy, if allowed (checked below)

                  pkgpath = '%s/%s' % (builddirs[rpminfo['build_id']],

-                     koji.pathinfo.rpm(rpminfo))

+                                      koji.pathinfo.rpm(rpminfo))

              else:

                  # use the signed copy

                  pkgpath = '%s/%s' % (builddirs[rpminfo['build_id']],

-                     koji.pathinfo.signed(rpminfo, rpminfo['sigkey']))

+                                      koji.pathinfo.signed(rpminfo, rpminfo['sigkey']))

              if not os.path.exists(pkgpath):

                  fs_missing.append(pkgpath)

                  # we'll raise an error below
@@ -5969,14 +6110,14 @@ 

              missing_log = os.path.join(self.workdir, 'missing_files.log')

              with open(missing_log, 'w') as outfile:

                  outfile.write('Some rpm files were missing.\n'

-                     'Most likely, you want to create these signed copies.\n\n'

-                     'Missing files:\n')

+                               'Most likely, you want to create these signed copies.\n\n'

+                               'Missing files:\n')

                  for pkgpath in sorted(fs_missing):

                      outfile.write(pkgpath)

                      outfile.write('\n')

              self.session.uploadWrapper(missing_log, self.uploadpath)

              raise koji.GenericError('Packages missing from the filesystem. '

-                     'See missing_files.log.')

+                                     'See missing_files.log.')

          if sig_missing:

              # log missing signatures and possibly error

              missing_log = os.path.join(self.workdir, 'missing_signatures.log')
@@ -5984,7 +6125,7 @@ 

                  outfile.write('Some rpms were missing requested signatures.\n')

                  if opts['skip_missing_signatures']:

                      outfile.write('The skip_missing_signatures option was specified, so '

-                             'these files were excluded.\n')

+                                   'these files were excluded.\n')

                  outfile.write('Acceptable keys: %r\n\n' % keys)

                  outfile.write('# RPM name: available keys\n')

                  fmt = '%(name)s-%(version)s-%(release)s.%(arch)s'
@@ -5993,10 +6134,10 @@ 

                      avail = to_list(rpm_idx.get(rpm_id, {}).keys())

                      outfile.write('%s: %r\n' % (fname, avail))

              self.session.uploadWrapper(missing_log, self.uploadpath)

-             if (not opts['skip_missing_signatures']

-                         and not opts['allow_missing_signatures']):

+             if (not opts['skip_missing_signatures'] and

+                     not opts['allow_missing_signatures']):

                  raise koji.GenericError('Unsigned packages found. See '

-                         'missing_signatures.log')

+                                         'missing_signatures.log')

  

      def link_pkgs(self):

          for bnp in self.kojipkgs:
@@ -6025,7 +6166,7 @@ 

              if subrepo:

                  # note the ../

                  subrepo_pkgs.setdefault(subrepo, []).append(

-                         '../Packages/%s/%s\n' % (bnplet, bnp))

+                     '../Packages/%s/%s\n' % (bnplet, bnp))

              else:

                  pkgs.append('Packages/%s/%s\n' % (bnplet, bnp))

  
@@ -6047,7 +6188,7 @@ 

  class WaitrepoTask(BaseTaskHandler):

  

      Methods = ['waitrepo']

-     #mostly just waiting

+     # mostly just waiting

      _taskWeight = 0.2

  

      PAUSE = 60
@@ -6091,26 +6232,31 @@ 

              repo = self.session.getRepo(taginfo['id'])

              if repo and repo != last_repo:

                  if builds:

-                     if koji.util.checkForBuilds(self.session, taginfo['id'], builds, repo['create_event']):

-                         self.logger.debug("Successfully waited %s for %s to appear in the %s repo" % \

-                             (koji.util.duration(start), koji.util.printList(nvrs), taginfo['name']))

+                     if koji.util.checkForBuilds(

+                             self.session, taginfo['id'], builds, repo['create_event']):

+                         self.logger.debug("Successfully waited %s for %s to appear "

+                                           "in the %s repo" %

+                                           (koji.util.duration(start), koji.util.printList(nvrs),

+                                            taginfo['name']))

                          return repo

                  elif newer_than:

                      if repo['create_ts'] > newer_than:

-                         self.logger.debug("Successfully waited %s for a new %s repo" % \

-                             (koji.util.duration(start), taginfo['name']))

+                         self.logger.debug("Successfully waited %s for a new %s repo" %

+                                           (koji.util.duration(start), taginfo['name']))

                          return repo

                  else:

-                     #no check requested -- return first ready repo

+                     # no check requested -- return first ready repo

                      return repo

  

              if (time.time() - start) > (self.TIMEOUT * 60.0):

                  if builds:

-                     raise koji.GenericError("Unsuccessfully waited %s for %s to appear in the %s repo" % \

-                         (koji.util.duration(start), koji.util.printList(nvrs), taginfo['name']))

+                     raise koji.GenericError("Unsuccessfully waited %s for %s to appear "

+                                             "in the %s repo" %

+                                             (koji.util.duration(start), koji.util.printList(nvrs),

+                                              taginfo['name']))

                  else:

-                     raise koji.GenericError("Unsuccessfully waited %s for a new %s repo" % \

-                         (koji.util.duration(start), taginfo['name']))

+                     raise koji.GenericError("Unsuccessfully waited %s for a new %s repo" %

+                                             (koji.util.duration(start), taginfo['name']))

  

              time.sleep(self.PAUSE)

              last_repo = repo
@@ -6140,7 +6286,7 @@ 

      parser.add_option("--debug-xmlrpc", action="store_true", default=False,

                        help="show xmlrpc debug output")

      parser.add_option("--debug-mock", action="store_true", default=False,

-                      #obsolete option

+                       # obsolete option

                        help=SUPPRESS_HELP)

      parser.add_option("--skip-main", action="store_true", default=False,

                        help="don't actually run main")
@@ -6148,7 +6294,7 @@ 

      parser.add_option("--minspace", type='int', help="Specify minspace")

      parser.add_option("--sleeptime", type='int', help="Specify the polling interval")

      parser.add_option("--admin-emails", type='str', action="store", metavar="EMAILS",

-                        help="Comma-separated addresses to send error notices to.")

+                       help="Comma-separated addresses to send error notices to.")

      parser.add_option("--topdir", help="Specify topdir")

      parser.add_option("--topurl", help="Specify topurl")

      parser.add_option("--workdir", help="Specify workdir")
@@ -6163,7 +6309,7 @@ 

  

      if args:

          parser.error("incorrect number of arguments")

-         #not reached

+         # not reached

          assert False  # pragma: no cover

  

      # load local config
@@ -6239,7 +6385,7 @@ 

                      defaults[name] = int(value)

                  except ValueError:

                      quit("value for %s option must be a valid integer" % name)

-             elif name in ['offline_retry', 'use_createrepo_c',  'createrepo_skip_stat',

+             elif name in ['offline_retry', 'use_createrepo_c', 'createrepo_skip_stat',

                            'createrepo_update', 'use_fast_upload', 'support_rpm_source_layout',

                            'krb_rdns', 'krb_canon_host', 'build_arch_can_fail', 'no_ssl_verify',

                            'log_timestamps']:
@@ -6256,12 +6402,12 @@ 

          if getattr(options, name, None) is None:

              setattr(options, name, value)

  

-     #honor topdir

+     # honor topdir

      if options.topdir:

          koji.BASEDIR = options.topdir

          koji.pathinfo.topdir = options.topdir

  

-     #make sure workdir exists

+     # make sure workdir exists

      if not os.path.exists(options.workdir):

          koji.ensuredir(options.workdir)

  
@@ -6299,6 +6445,7 @@ 

  

      return options

  

+ 

  def quit(msg=None, code=1):

      if msg:

          logging.getLogger("koji.build").error(msg)
@@ -6306,9 +6453,10 @@ 

          sys.stderr.flush()

      sys.exit(code)

  

+ 

  if __name__ == "__main__":

      koji.add_file_logger("koji", "/var/log/kojid.log")

-     #note we're setting logging params for all of koji*

+     # note we're setting logging params for all of koji*

      options = get_options()

      if options.log_level:

          lvl = getattr(logging, options.log_level, None)
@@ -6326,7 +6474,7 @@ 

      if options.admin_emails:

          koji.add_mail_logger("koji", options.admin_emails)

  

-     #start a session and login

+     # start a session and login

      session_opts = koji.grab_session_options(options)

      session = koji.ClientSession(options.server, session_opts)

      if options.cert and os.path.isfile(options.cert):
@@ -6360,14 +6508,14 @@ 

              quit("Could not connect to Kerberos authentication service: '%s'" % e.args[1])

      else:

          quit("No username/password supplied and Kerberos missing or not configured")

-     #make session exclusive

+     # make session exclusive

      try:

          session.exclusiveSession(force=options.force_lock)

      except koji.AuthLockError:

          quit("Error: Unable to get lock. Trying using --force-lock")

      if not session.logged_in:

          quit("Error: Unknown login error")

-     #make sure it works

+     # make sure it works

      try:

          ret = session.echo("OK")

      except requests.exceptions.ConnectionError:
@@ -6377,7 +6525,7 @@ 

  

      # run main

      if options.daemon:

-         #detach

+         # detach

          koji.daemonize()

          main(options, session)

          # not reached

file modified
+36 -29
@@ -47,24 +47,26 @@ 

      'alpha': ['alphaev4', 'alphaev45', 'alphaev5', 'alphaev56',

                'alphapca56', 'alphaev6', 'alphaev67', 'alphaev68', 'alphaev7'],

      'armhfp': ['armv7hl', 'armv7hnl', 'armv6hl', 'armv6hnl'],

-     'arm': ['armv5tel', 'armv5tejl', 'armv6l','armv7l'],

+     'arm': ['armv5tel', 'armv5tejl', 'armv6l', 'armv7l'],

      'sh4': ['sh4a']

-     }

+ }

  

  MULTILIB_ARCHES = {

      'x86_64': 'i386',

      'ppc64': 'ppc',

      's390x': 's390'

-     }

+ }

+ 

  

  def parse_args(args):

      """Parse our opts/args"""

      usage = """

-     mergerepos: take 2 or more repositories and merge their metadata into a new repo using Koji semantics

+     mergerepos: take 2 or more repositories and merge their metadata into a new

+                 repo using Koji semantics

  

      mergerepos --repo=url --repo=url --outputdir=/some/path"""

  

-     parser = OptionParser(version = "mergerepos 0.1", usage=usage)

+     parser = OptionParser(version="mergerepos 0.1", usage=usage)

      # query options

      parser.add_option("-r", "--repo", dest="repos", default=[], action="append",

                        help="repo url")
@@ -73,7 +75,8 @@ 

      parser.add_option("-a", "--arch", dest="arches", default=[], action="append",

                        help="List of arches to include in the repo")

      parser.add_option("-b", "--blocked", default=None,

-                       help="A file containing a list of srpm names to exclude from the merged repo")

+                       help="A file containing a list of srpm names to exclude "

+                            "from the merged repo")

      parser.add_option("--mode", default='koji', help="Select the merge mode")

      parser.add_option("-o", "--outputdir", default=None,

                        help="Location to create the repository")
@@ -99,7 +102,7 @@ 

                  opts.arches.extend(EXPAND_ARCHES[multilib_arch])

  

      # always include noarch

-     if not 'noarch' in opts.arches:

+     if 'noarch' not in opts.arches:

          opts.arches.append('noarch')

  

      if not opts.outputdir:
@@ -158,13 +161,13 @@ 

          # in the repolist

          count = 0

          for r in self.repolist:

-             count +=1

+             count += 1

              rid = 'repo%s' % count

              sys.stderr.write('Adding repo: %s\n' % r)

              n = self.yumbase.add_enable_repo(rid, baseurls=[r])

              n._merge_rank = count

  

-         #setup our sacks

+         # setup our sacks

          self.yumbase._getSacks(archlist=self.archlist)

  

          self.sort_and_filter()
@@ -174,18 +177,18 @@ 

          For each package object, check if the srpm name has ever been seen before.

          If is has not, keep the package.  If it has, check if the srpm name was first seen

          in the same repo as the current package.  If so, keep the package from the srpm with the

-         highest NVR.  If not, keep the packages from the first srpm we found, and delete packages from

-         all other srpms.

+         highest NVR.  If not, keep the packages from the first srpm we found, and delete packages

+         from all other srpms.

  

          Packages with matching NVRs in multiple repos will be taken from the first repo.

  

          If the srpm name appears in the blocked package list, any packages generated from the srpm

          will be deleted from the package sack as well.

  

-         This method will also generate a file called "pkgorigins" and add it to the repo metadata.  This

-         is a tab-separated map of package E:N-V-R.A to repo URL (as specified on the command-line).  This

-         allows a package to be tracked back to its origin, even if the location field in the repodata does

-         not match the original repo location.

+         This method will also generate a file called "pkgorigins" and add it to the repo metadata.

+         This is a tab-separated map of package E:N-V-R.A to repo URL (as specified on the

+         command-line). This allows a package to be tracked back to its origin, even if the location

+         field in the repodata does not match the original repo location.

          """

  

          if self.mode == 'simple':
@@ -205,9 +208,10 @@ 

                  if reponum == 0 and not pkg.basepath:

                      # this is the first repo (i.e. the koji repo) and appears

                      # to be using relative urls

-                     #XXX - kind of a hack, but yum leaves us little choice

-                     #force the pkg object to report a relative location

-                     loc = """<location href="%s"/>\n""" % yum.misc.to_xml(pkg.remote_path, attrib=True)

+                     # XXX - kind of a hack, but yum leaves us little choice

+                     # force the pkg object to report a relative location

+                     loc = """<location href="%s"/>\n""" % yum.misc.to_xml(pkg.remote_path,

+                                                                           attrib=True)

                      pkg._return_remote_location = make_const_func(loc)

                  if pkg.sourcerpm in seen_srpms:

                      # we're just looking at sourcerpms this pass and we've
@@ -221,13 +225,13 @@ 

                          # We found a rpm built from an srpm with the same name in a previous repo.

                          # The previous repo takes precedence, so ignore the srpm found here.

                          sys.stderr.write('Package %s already provided by repo %s'

-                                 ' (at %s in repo %s)\n'

-                                 % (srpm_name, other_repoid, str(pkg), pkg.repoid))

+                                          ' (at %s in repo %s)\n'

+                                          % (srpm_name, other_repoid, str(pkg), pkg.repoid))

                          continue

                      else:

                          # We're in the same repo, so compare srpm NVRs

                          other_srpm_name, other_ver, other_rel, other_epoch, other_arch = \

-                                          rpmUtils.miscutils.splitFilename(other_srpm)

+                             rpmUtils.miscutils.splitFilename(other_srpm)

                          cmp = rpmUtils.miscutils.compareEVR((epoch, ver, rel),

                                                              (other_epoch, other_ver, other_rel))

                          if cmp > 0:
@@ -235,13 +239,13 @@ 

                              # existing srpm in the dict, so update the dict

                              include_srpms[srpm_name] = (pkg.sourcerpm, pkg.repoid)

                              sys.stderr.write('Replacing older source nvr: '

-                                     '%s > %s\n' % (pkg.sourcerpm, other_srpm))

+                                              '%s > %s\n' % (pkg.sourcerpm, other_srpm))

                          elif cmp < 0:

                              sys.stderr.write('Ignoring older source nvr: '

-                                     '%s < %s\n' % (pkg.sourcerpm, other_srpm))

+                                              '%s < %s\n' % (pkg.sourcerpm, other_srpm))

                          # otherwise same, so we already have it

                  elif srpm_name in self.blocked:

-                     sys.stderr.write('Ignoring blocked package: %s\n\n' % \

+                     sys.stderr.write('Ignoring blocked package: %s\n\n' %

                                       pkg.sourcerpm)

                      continue

                  else:
@@ -260,14 +264,14 @@ 

                      sys.stderr.write('Duplicate rpm: %s\n' % pkg_nvra)

                  elif incl_srpm is None:

                      sys.stderr.write('Excluding %s (%s is blocked)\n'

-                             % (pkg_nvra, srpm_name))

+                                      % (pkg_nvra, srpm_name))

                      repo.sack.delPackage(pkg)

                  elif incl_srpm == pkg.sourcerpm:

                      origins.write('%s\t%s\n' % (pkg_nvra, repo.urls[0]))

                      seen_rpms[pkg_nvra] = 1

                  else:

                      sys.stderr.write('Excluding %s (wrong srpm version '

-                             '%s != %s)\n' % (pkg_nvra, pkg.sourcerpm, incl_srpm))

+                                      '%s != %s)\n' % (pkg_nvra, pkg.sourcerpm, incl_srpm))

                      repo.sack.delPackage(pkg)

  

          origins.close()
@@ -296,9 +300,10 @@ 

                  if reponum == 0 and not pkg.basepath:

                      # this is the first repo (i.e. the koji repo) and appears

                      # to be using relative urls

-                     #XXX - kind of a hack, but yum leaves us little choice

-                     #force the pkg object to report a relative location

-                     loc = """<location href="%s"/>\n""" % yum.misc.to_xml(pkg.remote_path, attrib=True)

+                     # XXX - kind of a hack, but yum leaves us little choice

+                     # force the pkg object to report a relative location

+                     loc = """<location href="%s"/>\n""" % yum.misc.to_xml(pkg.remote_path,

+                                                                           attrib=True)

                      pkg._return_remote_location = make_const_func(loc)

  

          pkgorigins = os.path.join(self.yumbase.conf.cachedir, 'pkgorigins')
@@ -337,6 +342,7 @@ 

          mdgen.doRepoMetadata()

          mdgen.doFinalMove()

  

+ 

  def main(args):

      """main"""

      opts = parse_args(args)
@@ -358,5 +364,6 @@ 

      finally:

          merge.close()

  

+ 

  if __name__ == "__main__":

      main(sys.argv[1:])

file modified
+23 -21
@@ -50,7 +50,7 @@ 

      """

      for v in six.itervalues(vars(plugin)):

          if isinstance(v, six.class_types):

-             #skip classes

+             # skip classes

              continue

          if callable(v):

              if getattr(v, 'exported_cli', False):
@@ -104,7 +104,7 @@ 

      common_commands = ['build', 'help', 'download-build',

                         'latest-build', 'search', 'list-targets']

      usage = _("%%prog [global-options] command [command-options-and-arguments]"

-                 "\n\nCommon commands: %s" % ', '.join(sorted(common_commands)))

+               "\n\nCommon commands: %s" % ', '.join(sorted(common_commands)))

      parser = OptionParser(usage=usage)

      parser.disable_interspersed_args()

      progname = os.path.basename(sys.argv[0]) or 'koji'
@@ -129,7 +129,8 @@ 

                        help=_("do not authenticate"))

      parser.add_option("--force-auth", action="store_true", default=False,

                        help=_("authenticate even for read-only operations"))

-     parser.add_option("--authtype", help=_("force use of a type of authentication, options: noauth, ssl, password, or kerberos"))

+     parser.add_option("--authtype", help=_("force use of a type of authentication, options: "

+                                            "noauth, ssl, password, or kerberos"))

      parser.add_option("-d", "--debug", action="store_true",

                        help=_("show debug output"))

      parser.add_option("--debug-xmlrpc", action="store_true",
@@ -144,8 +145,9 @@ 

      parser.add_option("--topurl", help=_("url for Koji file access"))

      parser.add_option("--pkgurl", help=SUPPRESS_HELP)

      parser.add_option("--plugin-paths", metavar='PATHS',

-             help=_("specify additional plugin paths (colon separated)"))

-     parser.add_option("--help-commands", action="store_true", default=False, help=_("list commands"))

+                       help=_("specify additional plugin paths (colon separated)"))

+     parser.add_option("--help-commands", action="store_true", default=False,

+                       help=_("list commands"))

      (options, args) = parser.parse_args()

  

      # load local config
@@ -166,12 +168,12 @@ 

          value = os.path.expanduser(getattr(options, name))

          setattr(options, name, value)

  

-     #honor topdir

+     # honor topdir

      if options.topdir:

          koji.BASEDIR = options.topdir

          koji.pathinfo.topdir = options.topdir

  

-     #pkgurl is obsolete

+     # pkgurl is obsolete

      if options.pkgurl:

          if options.topurl:

              warn("Warning: the pkgurl option is obsolete")
@@ -193,9 +195,9 @@ 

          return options, '_list_commands', [0, '']

  

      aliases = {

-         'cancel-task' : 'cancel',

-         'cxl' : 'cancel',

-         'list-commands' : 'help',

+         'cancel-task': 'cancel',

+         'cxl': 'cancel',

+         'list-commands': 'help',

          'move-pkg': 'move-build',

          'move': 'move-build',

          'latest-pkg': 'latest-build',
@@ -252,7 +254,7 @@ 

      pyver = getattr(options, 'pyver', None)

      if not pyver:

          return

-     if pyver not in [2,3]:

+     if pyver not in [2, 3]:

          logger.warning('Invalid python version requested: %s', pyver)

      if sys.version_info[0] == pyver:

          return
@@ -278,20 +280,20 @@ 

          categories_chosen = list(categories_chosen)

      categories_chosen.sort()

      handlers = []

-     for name,value in globals().items():

+     for name, value in globals().items():

          if name.startswith('handle_'):

-             alias = name.replace('handle_','')

-             alias = alias.replace('_','-')

-             handlers.append((alias,value))

+             alias = name.replace('handle_', '')

+             alias = alias.replace('_', '-')

+             handlers.append((alias, value))

          elif name.startswith('anon_handle_'):

-             alias = name.replace('anon_handle_','')

-             alias = alias.replace('_','-')

-             handlers.append((alias,value))

+             alias = name.replace('anon_handle_', '')

+             alias = alias.replace('_', '-')

+             handlers.append((alias, value))

      handlers.sort()

      print(_("Available commands:"))

      for category in categories_chosen:

          print(_("\n%s:" % categories[category]))

-         for alias,handler in handlers:

+         for alias, handler in handlers:

              desc = handler.__doc__ or ''

              if desc.startswith('[%s] ' % category):

                  desc = desc[len('[%s] ' % category):]
@@ -335,7 +337,7 @@ 

              rv = 0

      except (KeyboardInterrupt, SystemExit):

          rv = 1

-     except:

+     except Exception:

          if options.debug:

              raise

          else:
@@ -344,6 +346,6 @@ 

              logger.error("%s: %s" % (exctype.__name__, value))

      try:

          session.logout()

-     except:

+     except Exception:

          pass

      sys.exit(rv)

file modified
+775 -609
@@ -23,12 +23,27 @@ 

  

  import koji

  from koji.util import base64encode, to_list

- from koji_cli.lib import (_, _list_tasks, _progress_callback, _running_in_bg,

-                           activate_session, arg_filter, download_file, error,

-                           format_inheritance_flags, get_usage_str, greetings,

-                           linked_upload, list_task_output_all_volumes,

-                           print_task_headers, print_task_recurse, unique_path,

-                           warn, watch_logs, watch_tasks)

+ from koji_cli.lib import (

+     _,

+     _list_tasks,

+     _progress_callback,

+     _running_in_bg,

+     activate_session,

+     arg_filter,

+     download_file,

+     error,

+     format_inheritance_flags,

+     get_usage_str,

+     greetings,

+     linked_upload,

+     list_task_output_all_volumes,

+     print_task_headers,

+     print_task_recurse,

+     unique_path,

+     warn,

+     watch_logs,

+     watch_tasks

+ )

  

  try:

      import libcomps
@@ -43,7 +58,7 @@ 

  def _printable_unicode(s):

      if six.PY2:

          return s.encode('utf-8')

-     else: # no cover: 2.x

+     else:  # no cover: 2.x

          return s

  

  
@@ -134,7 +149,7 @@ 

      usage = _('usage: %prog assign-task <task_id> <hostname>')

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option('-f', '--force', action='store_true', default=False,

-                           help=_('force to assign a non-free task'))

+                       help=_('force to assign a non-free task'))

      (options, args) = parser.parse_args(args)

  

      if len(args) != 2:
@@ -170,7 +185,8 @@ 

      "[admin] Add a host"

      usage = _("usage: %prog add-host [options] <hostname> <arch> [<arch> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--krb-principal", help=_("set a non-default kerberos principal for the host"))

+     parser.add_option("--krb-principal",

+                       help=_("set a non-default kerberos principal for the host"))

      (options, args) = parser.parse_args(args)

      if len(args) < 2:

          parser.error(_("Please specify a hostname and at least one arch"))
@@ -193,7 +209,8 @@ 

      "[admin] Edit a host"

      usage = _("usage: %prog edit-host <hostname> [<hostname> ...] [options]")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--arches", help=_("Space or comma-separated list of supported architectures"))

+     parser.add_option("--arches",

+                       help=_("Space or comma-separated list of supported architectures"))

      parser.add_option("--capacity", type="float", help=_("Capacity of this host"))

      parser.add_option("--description", metavar="DESC", help=_("Description of this host"))

      parser.add_option("--comment", help=_("A brief comment about this host"))
@@ -339,15 +356,16 @@ 

      opts['force'] = options.force

      opts['block'] = False

      # check if list of packages exists for that tag already

-     dsttag=session.getTag(tag)

+     dsttag = session.getTag(tag)

      if dsttag is None:

          print("No such tag: %s" % tag)

          sys.exit(1)

-     pkglist = dict([(p['package_name'], p['package_id']) for p in session.listPackages(tagID=dsttag['id'])])

+     pkglist = dict([(p['package_name'], p['package_id'])

+                     for p in session.listPackages(tagID=dsttag['id'])])

      to_add = []

      for package in args[1:]:

          package_id = pkglist.get(package, None)

-         if not package_id is None:

+         if package_id is not None:

              print("Package %s already exists in tag %s" % (package, tag))

              continue

          to_add.append(package)
@@ -366,18 +384,20 @@ 

      "[admin] Block a package in the listing for tag"

      usage = _("usage: %prog block-pkg [options] <tag> <package> [<package> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--force", action='store_true', default=False, help=_("Override blocks and owner if necessary"))

+     parser.add_option("--force", action='store_true', default=False,

+                       help=_("Override blocks and owner if necessary"))

      (options, args) = parser.parse_args(args)

      if len(args) < 2:

          parser.error(_("Please specify a tag and at least one package"))

      activate_session(session, goptions)

      tag = args[0]

      # check if list of packages exists for that tag already

-     dsttag=session.getTag(tag)

+     dsttag = session.getTag(tag)

      if dsttag is None:

          print("No such tag: %s" % tag)

          return 1

-     pkglist = dict([(p['package_name'], p['package_id']) for p in session.listPackages(tagID=dsttag['id'], inherited=True)])

+     pkglist = dict([(p['package_name'], p['package_id'])

+                     for p in session.listPackages(tagID=dsttag['id'], inherited=True)])

      ret = 0

      for package in args[1:]:

          package_id = pkglist.get(package, None)
@@ -410,11 +430,12 @@ 

      opts = {}

      opts['force'] = options.force

      # check if list of packages exists for that tag already

-     dsttag=session.getTag(tag)

+     dsttag = session.getTag(tag)

      if dsttag is None:

          print("No such tag: %s" % tag)

          return 1

-     pkglist = dict([(p['package_name'], p['package_id']) for p in session.listPackages(tagID=dsttag['id'])])

+     pkglist = dict([(p['package_name'], p['package_id'])

+                     for p in session.listPackages(tagID=dsttag['id'])])

      ret = 0

      for package in args[1:]:

          package_id = pkglist.get(package, None)
@@ -442,9 +463,9 @@ 

      parser.add_option("--nowait", action="store_false", dest="wait",

                        help=_("Don't wait on build"))

      parser.add_option("--wait-repo", action="store_true",

-                        help=_("Wait for the actual buildroot repo of given target"))

+                       help=_("Wait for the actual buildroot repo of given target"))

      parser.add_option("--wait-build", metavar="NVR", action="append", dest="wait_builds",

-                        default=[], help=_("Wait for the given nvr to appear in buildroot repo"))

+                       default=[], help=_("Wait for the given nvr to appear in buildroot repo"))

      parser.add_option("--quiet", action="store_true",

                        help=_("Do not print the task information"), default=options.quiet)

      parser.add_option("--arch-override", help=_("Override build arches"))
@@ -457,7 +478,8 @@ 

                        help=_("Run the build at a lower priority"))

      (build_opts, args) = parser.parse_args(args)

      if len(args) != 2:

-         parser.error(_("Exactly two arguments (a build target and a SCM URL or srpm file) are required"))

+         parser.error(_("Exactly two arguments (a build target and a SCM URL or srpm file) are "

+                        "required"))

      if build_opts.arch_override and not build_opts.scratch:

          parser.error(_("--arch_override is only allowed for --scratch builds"))

      activate_session(session, options)
@@ -484,11 +506,11 @@ 

              opts[key] = val

      priority = None

      if build_opts.background:

-         #relative to koji.PRIO_DEFAULT

+         # relative to koji.PRIO_DEFAULT

          priority = 5

      # try to check that source is an SRPM

      if '://' not in source:

-         #treat source as an srpm and upload it

+         # treat source as an srpm and upload it

          if not build_opts.quiet:

              print("Uploading srpm: %s" % source)

          serverdir = unique_path('cli-build')
@@ -506,7 +528,7 @@ 

      if build_opts.wait or (build_opts.wait is None and not _running_in_bg()):

          session.logout()

          return watch_tasks(session, [task_id], quiet=build_opts.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

      else:

          return

  
@@ -537,8 +559,10 @@ 

      # check that the destination tag is in the inheritance tree of the build tag

      # otherwise there is no way that a chain-build can work

      ancestors = session.getFullInheritance(build_target['build_tag'])

-     if dest_tag['id'] not in [build_target['build_tag']] + [ancestor['parent_id'] for ancestor in ancestors]:

-         print(_("Packages in destination tag %(dest_tag_name)s are not inherited by build tag %(build_tag_name)s" % build_target))

+     if dest_tag['id'] not in [build_target['build_tag']] + \

+             [ancestor['parent_id'] for ancestor in ancestors]:

+         print(_("Packages in destination tag %(dest_tag_name)s are not inherited by build tag "

+                 "%(build_tag_name)s" % build_target))

          print(_("Target %s is not usable for a chain-build" % build_target['name']))

          return 1

  
@@ -546,7 +570,7 @@ 

  

      src_list = []

      build_level = []

-     #src_lists is a list of lists of sources to build.

+     # src_lists is a list of lists of sources to build.

      #  each list is block of builds ("build level") which must all be completed

      #  before the next block begins. Blocks are separated on the command line with ':'

      for src in sources:
@@ -567,11 +591,12 @@ 

          src_list.append(build_level)

  

      if len(src_list) < 2:

-         parser.error(_('You must specify at least one dependency between builds with : (colon)\nIf there are no dependencies, use the build command instead'))

+         parser.error(_('You must specify at least one dependency between builds with : (colon)\n'

+                        'If there are no dependencies, use the build command instead'))

  

      priority = None

      if build_opts.background:

-         #relative to koji.PRIO_DEFAULT

+         # relative to koji.PRIO_DEFAULT

          priority = 5

  

      task_id = session.chainBuild(src_list, target, priority=priority)
@@ -583,7 +608,7 @@ 

      else:

          session.logout()

          return watch_tasks(session, [task_id], quiet=build_opts.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

  

  

  def handle_maven_build(options, session, args):
@@ -592,7 +617,8 @@ 

      usage += _("\n       %prog maven-build --ini=CONFIG... [options] <target>")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--patches", action="store", metavar="URL",

-                       help=_("SCM URL of a directory containing patches to apply to the sources before building"))

+                       help=_("SCM URL of a directory containing patches to apply to the sources "

+                              "before building"))

      parser.add_option("-G", "--goal", action="append",

                        dest="goals", metavar="GOAL", default=[],

                        help=_("Additional goal to run before \"deploy\""))
@@ -658,7 +684,8 @@ 

              parser.error(e.args[0])

          opts = to_list(params.values())[0]

          if opts.pop('type', 'maven') != 'maven':

-             parser.error(_("Section %s does not contain a maven-build config") % to_list(params.keys())[0])

+             parser.error(_("Section %s does not contain a maven-build config") %

+                          to_list(params.keys())[0])

          source = opts.pop('scmurl')

      else:

          source = args[1]
@@ -671,7 +698,7 @@ 

          opts['skip_tag'] = True

      priority = None

      if build_opts.background:

-         #relative to koji.PRIO_DEFAULT

+         # relative to koji.PRIO_DEFAULT

          priority = 5

      task_id = session.mavenBuild(source, target, opts, priority=priority)

      if not build_opts.quiet:
@@ -682,31 +709,35 @@ 

      else:

          session.logout()

          return watch_tasks(session, [task_id], quiet=build_opts.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

  

  

  def handle_wrapper_rpm(options, session, args):

      """[build] Build wrapper rpms for any archives associated with a build."""

      usage = _("usage: %prog wrapper-rpm [options] <target> <build-id|n-v-r> <URL>")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--create-build", action="store_true", help=_("Create a new build to contain wrapper rpms"))

+     parser.add_option("--create-build", action="store_true",

+                       help=_("Create a new build to contain wrapper rpms"))

      parser.add_option("--ini", action="append",

                        dest="inis", metavar="CONFIG", default=[],

                        help=_("Pass build parameters via a .ini file"))

      parser.add_option("-s", "--section",

                        help=_("Get build parameters from this section of the .ini"))

-     parser.add_option("--skip-tag", action="store_true", help=_("If creating a new build, don't tag it"))

+     parser.add_option("--skip-tag", action="store_true",

+                       help=_("If creating a new build, don't tag it"))

      parser.add_option("--scratch", action="store_true", help=_("Perform a scratch build"))

      parser.add_option("--nowait", action="store_true", help=_("Don't wait on build"))

-     parser.add_option("--background", action="store_true", help=_("Run the build at a lower priority"))

+     parser.add_option("--background", action="store_true",

+                       help=_("Run the build at a lower priority"))

  

      (build_opts, args) = parser.parse_args(args)

      if build_opts.inis:

-         if len(args)!= 1:

+         if len(args) != 1:

              parser.error(_("Exactly one argument (a build target) is required"))

      else:

          if len(args) < 3:

-             parser.error(_("You must provide a build target, a build ID or NVR, and a SCM URL to a specfile fragment"))

+             parser.error(_("You must provide a build target, a build ID or NVR, "

+                            "and a SCM URL to a specfile fragment"))

      activate_session(session, options)

  

      target = args[0]
@@ -718,7 +749,8 @@ 

              parser.error(e.args[0])

          opts = to_list(params.values())[0]

          if opts.get('type') != 'wrapper':

-             parser.error(_("Section %s does not contain a wrapper-rpm config") % to_list(params.keys())[0])

+             parser.error(_("Section %s does not contain a wrapper-rpm config") %

+                          to_list(params.keys())[0])

          url = opts['scmurl']

          package = opts['buildrequires'][0]

          target_info = session.getBuildTarget(target, strict=True)
@@ -749,7 +781,7 @@ 

      else:

          session.logout()

          return watch_tasks(session, [task_id], quiet=options.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

  

  

  def handle_maven_chain(options, session, args):
@@ -801,7 +833,7 @@ 

      else:

          session.logout()

          return watch_tasks(session, [task_id], quiet=options.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

  

  

  def handle_resubmit(goptions, session, args):
@@ -810,9 +842,9 @@ 

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--nowait", action="store_true", help=_("Don't wait on task"))

      parser.add_option("--nowatch", action="store_true", dest="nowait",

-             help=_("An alias for --nowait"))

+                       help=_("An alias for --nowait"))

      parser.add_option("--quiet", action="store_true", default=goptions.quiet,

-             help=_("Do not print the task information"))

+                       help=_("Do not print the task information"))

      (options, args) = parser.parse_args(args)

      if len(args) != 1:

          parser.error(_("Please specify a single task ID"))
@@ -829,7 +861,7 @@ 

      else:

          session.logout()

          return watch_tasks(session, [newID], quiet=options.quiet,

-                 poll_interval=goptions.poll_interval)

+                            poll_interval=goptions.poll_interval)

  

  

  def handle_call(goptions, session, args):
@@ -837,7 +869,8 @@ 

      usage = _("usage: %prog call [options] <name> [<arg> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--python", action="store_true", help=_("Use python syntax for values"))

-     parser.add_option("--kwargs", help=_("Specify keyword arguments as a dictionary (implies --python)"))

+     parser.add_option("--kwargs",

+                       help=_("Specify keyword arguments as a dictionary (implies --python)"))

      parser.add_option("--json-output", action="store_true", help=_("Use JSON syntax for output"))

      (options, args) = parser.parse_args(args)

      if len(args) < 1:
@@ -880,7 +913,8 @@ 

      parser.add_option("--target", help=_("Create a mock config for a build target"))

      parser.add_option("--task", help=_("Duplicate the mock config of a previous task"))

      parser.add_option("--latest", action="store_true", help=_("use the latest redirect url"))

-     parser.add_option("--buildroot", help=_("Duplicate the mock config for the specified buildroot id"))

+     parser.add_option("--buildroot",

+                       help=_("Duplicate the mock config for the specified buildroot id"))

      parser.add_option("--mockdir", default="/var/lib/mock", metavar="DIR",

                        help=_("Specify mockdir"))

      parser.add_option("--topdir", metavar="DIR",
@@ -894,7 +928,7 @@ 

      (options, args) = parser.parse_args(args)

      activate_session(session, goptions)

      if args:

-         #for historical reasons, we also accept buildroot name as first arg

+         # for historical reasons, we also accept buildroot name as first arg

          if not options.name:

              options.name = args[0]

          else:
@@ -1089,7 +1123,7 @@ 

              'method': 'restartHosts',

              'state':

                  [koji.TASK_STATES[s] for s in ('FREE', 'OPEN', 'ASSIGNED')],

-             }

+         }

          others = session.listTasks(query)

          if others:

              print('Found other restartHosts tasks running.')
@@ -1112,7 +1146,7 @@ 

      if my_opts.wait or (my_opts.wait is None and not _running_in_bg()):

          session.logout()

          return watch_tasks(session, [task_id], quiet=my_opts.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

      else:

          return

  
@@ -1121,9 +1155,11 @@ 

      "[admin] Import externally built RPMs into the database"

      usage = _("usage: %prog import [options] <package> [<package> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--link", action="store_true", help=_("Attempt to hardlink instead of uploading"))

+     parser.add_option("--link", action="store_true",

+                       help=_("Attempt to hardlink instead of uploading"))

      parser.add_option("--test", action="store_true", help=_("Don't actually import"))

-     parser.add_option("--create-build", action="store_true", help=_("Auto-create builds as needed"))

+     parser.add_option("--create-build", action="store_true",

+                       help=_("Auto-create builds as needed"))

      parser.add_option("--src-epoch", help=_("When auto-creating builds, use this epoch"))

      (options, args) = parser.parse_args(args)

      if len(args) < 1:
@@ -1138,24 +1174,23 @@ 

      activate_session(session, goptions)

      to_import = {}

      for path in args:

-         data = koji.get_header_fields(path, ('name','version','release','epoch',

-                                     'arch','sigmd5','sourcepackage','sourcerpm'))

+         data = koji.get_header_fields(path, ('name', 'version', 'release', 'epoch',

+                                              'arch', 'sigmd5', 'sourcepackage', 'sourcerpm'))

          if data['sourcepackage']:

              data['arch'] = 'src'

              nvr = "%(name)s-%(version)s-%(release)s" % data

          else:

              nvr = "%(name)s-%(version)s-%(release)s" % koji.parse_NVRA(data['sourcerpm'])

-         to_import.setdefault(nvr,[]).append((path,data))

+         to_import.setdefault(nvr, []).append((path, data))

      builds_missing = False

-     nvrs = to_list(to_import.keys())

-     nvrs.sort()

+     nvrs = sorted(to_list(to_import.keys()))

      for nvr in nvrs:

          to_import[nvr].sort()

          for path, data in to_import[nvr]:

              if data['sourcepackage']:

                  break

          else:

-             #no srpm included, check for build

+             # no srpm included, check for build

              binfo = session.getBuild(nvr)

              if not binfo:

                  print(_("Missing build or srpm: %s") % nvr)
@@ -1164,9 +1199,9 @@ 

          print(_("Aborting import"))

          return

  

-     #local function to help us out below

+     # local function to help us out below

      def do_import(path, data):

-         rinfo = dict([(k,data[k]) for k in ('name','version','release','arch')])

+         rinfo = dict([(k, data[k]) for k in ('name', 'version', 'release', 'arch')])

          prev = session.getRPM(rinfo)

          if prev and not prev.get('external_repo_id', 0):

              if prev['payloadhash'] == koji.hex_string(data['sigmd5']):
@@ -1175,7 +1210,7 @@ 

                  print(_("WARNING: md5sum mismatch for %s") % path)

                  print(_("  A different rpm with the same name has already been imported"))

                  print(_("  Existing sigmd5 is %r, your import has %r") % (

-                         prev['payloadhash'], koji.hex_string(data['sigmd5'])))

+                     prev['payloadhash'], koji.hex_string(data['sigmd5'])))

              print(_("Skipping import"))

              return

          if options.test:
@@ -1232,7 +1267,7 @@ 

  

          if need_build:

              # if we're doing this here, we weren't given the matching srpm

-             if not options.create_build: # pragma: no cover

+             if not options.create_build:  # pragma: no cover

                  if binfo:

                      # should have caught this earlier, but just in case...

                      b_state = koji.BUILD_STATES[binfo['state']]
@@ -1270,7 +1305,8 @@ 

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--noprogress", action="store_true",

                        help=_("Do not display progress of the upload"))

-     parser.add_option("--link", action="store_true", help=_("Attempt to hardlink instead of uploading"))

+     parser.add_option("--link", action="store_true",

+                       help=_("Attempt to hardlink instead of uploading"))

      parser.add_option("--test", action="store_true", help=_("Don't actually import"))

      parser.add_option("--token", action="store", default=None, help=_("Build reservation token"))

      (options, args) = parser.parse_args(args)
@@ -1347,29 +1383,29 @@ 

      comps.fromxml_f(filename)

      force = options.force

      ptypes = {

-         libcomps.PACKAGE_TYPE_DEFAULT : 'default',

-         libcomps.PACKAGE_TYPE_OPTIONAL : 'optional',

-         libcomps.PACKAGE_TYPE_CONDITIONAL : 'conditional',

-         libcomps.PACKAGE_TYPE_MANDATORY : 'mandatory',

-         libcomps.PACKAGE_TYPE_UNKNOWN : 'unknown',

+         libcomps.PACKAGE_TYPE_DEFAULT: 'default',

+         libcomps.PACKAGE_TYPE_OPTIONAL: 'optional',

+         libcomps.PACKAGE_TYPE_CONDITIONAL: 'conditional',

+         libcomps.PACKAGE_TYPE_MANDATORY: 'mandatory',

+         libcomps.PACKAGE_TYPE_UNKNOWN: 'unknown',

      }

      for group in comps.groups:

          print("Group: %s (%s)" % (group.id, group.name))

          session.groupListAdd(

-                     tag, group.id, force=force, display_name=group.name,

-                     is_default=bool(group.default),

-                     uservisible=bool(group.uservisible),

-                     description=group.desc,

-                     langonly=group.lang_only,

-                     biarchonly=bool(group.biarchonly))

+             tag, group.id, force=force, display_name=group.name,

+             is_default=bool(group.default),

+             uservisible=bool(group.uservisible),

+             description=group.desc,

+             langonly=group.lang_only,

+             biarchonly=bool(group.biarchonly))

          for pkg in group.packages:

-             pkgopts = {'type' : ptypes[pkg.type],

-                         'basearchonly' : bool(pkg.basearchonly),

-                         }

+             pkgopts = {'type': ptypes[pkg.type],

+                        'basearchonly': bool(pkg.basearchonly),

+                        }

              if pkg.type == libcomps.PACKAGE_TYPE_CONDITIONAL:

                  pkgopts['requires'] = pkg.requires

              for k in pkgopts.keys():

-                 if six.PY2 and isinstance(pkgopts[k], unicode):

+                 if six.PY2 and isinstance(pkgopts[k], unicode):  # noqa: F821

                      pkgopts[k] = str(pkgopts[k])

              s_opts = ', '.join(["'%s': %r" % (k, pkgopts[k]) for k in sorted(pkgopts.keys())])

              print("  Package: %s: {%s}" % (pkg.name, s_opts))
@@ -1378,7 +1414,7 @@ 

          # libcomps does not support metapkgs

  

  

- def _import_comps_alt(session, filename, tag, options): # no cover 3.x

+ def _import_comps_alt(session, filename, tag, options):  # no cover 3.x

      """Import comps data using yum.comps module"""

      print('WARN: yum.comps does not support the biarchonly of group and basearchonly of package')

      comps = yumcomps.Comps()
@@ -1387,28 +1423,28 @@ 

      for group in comps.groups:

          print("Group: %(groupid)s (%(name)s)" % vars(group))

          session.groupListAdd(tag, group.groupid, force=force, display_name=group.name,

-                         is_default=bool(group.default),

-                         uservisible=bool(group.user_visible),

-                         description=group.description,

-                         langonly=group.langonly)

-         #yum.comps does not support the biarchonly field

+                              is_default=bool(group.default),

+                              uservisible=bool(group.user_visible),

+                              description=group.description,

+                              langonly=group.langonly)

+         # yum.comps does not support the biarchonly field

          for ptype, pdata in [('mandatory', group.mandatory_packages),

                               ('default', group.default_packages),

                               ('optional', group.optional_packages),

                               ('conditional', group.conditional_packages)]:

              for pkg in pdata:

-                 #yum.comps does not support basearchonly

-                 pkgopts = {'type' : ptype}

+                 # yum.comps does not support basearchonly

+                 pkgopts = {'type': ptype}

                  if ptype == 'conditional':

                      pkgopts['requires'] = pdata[pkg]

                  for k in pkgopts.keys():

-                     if six.PY2 and isinstance(pkgopts[k], unicode):

+                     if six.PY2 and isinstance(pkgopts[k], unicode):  # noqa: F821

                          pkgopts[k] = str(pkgopts[k])

                  s_opts = ', '.join(["'%s': %r" % (k, pkgopts[k]) for k in sorted(pkgopts.keys())])

                  print("  Package: %s: {%s}" % (pkg, s_opts))

                  session.groupPackageListAdd(tag, group.groupid, pkg, force=force, **pkgopts)

-         #yum.comps does not support group dependencies

-         #yum.comps does not support metapkgs

+         # yum.comps does not support group dependencies

+         # yum.comps does not support metapkgs

  

  

  def handle_import_sig(goptions, session, args):
@@ -1429,7 +1465,8 @@ 

              parser.error(_("No such file: %s") % path)

      activate_session(session, goptions)

      for path in args:

-         data = koji.get_header_fields(path, ('name','version','release','arch','siggpg','sigpgp','sourcepackage'))

+         data = koji.get_header_fields(path, ('name', 'version', 'release', 'arch', 'siggpg',

+                                              'sigpgp', 'sourcepackage'))

          if data['sourcepackage']:

              data['arch'] = 'src'

          sigkey = data['siggpg']
@@ -1449,7 +1486,8 @@ 

              print("No such rpm in system: %(name)s-%(version)s-%(release)s.%(arch)s" % data)

              continue

          if rinfo.get('external_repo_id'):

-             print("Skipping external rpm: %(name)s-%(version)s-%(release)s.%(arch)s@%(external_repo_name)s" % rinfo)

+             print("Skipping external rpm: %(name)s-%(version)s-%(release)s.%(arch)s@"

+                   "%(external_repo_name)s" % rinfo)

              continue

          sighdr = koji.rip_rpm_sighdr(path)

          previous = session.queryRPMSigs(rpm_id=rinfo['id'], sigkey=sigkey)
@@ -1476,7 +1514,8 @@ 

      "[admin] Write signed RPMs to disk"

      usage = _("usage: %prog write-signed-rpm [options] <signature-key> <n-v-r> [<n-v-r> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--all", action="store_true", help=_("Write out all RPMs signed with this key"))

+     parser.add_option("--all", action="store_true",

+                       help=_("Write out all RPMs signed with this key"))

      parser.add_option("--buildid", help=_("Specify a build id rather than an n-v-r"))

      (options, args) = parser.parse_args(args)

      if len(args) < 1:
@@ -1509,7 +1548,7 @@ 

              rpms.extend(session.listRPMs(buildID=build['id']))

      for i, rpminfo in enumerate(rpms):

          nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rpminfo

-         print("[%d/%d] %s" % (i+1, len(rpms), nvra))

+         print("[%d/%d] %s" % (i + 1, len(rpms), nvra))

          session.writeSignedRPM(rpminfo['id'], key)

  

  
@@ -1540,9 +1579,9 @@ 

      #  4) for a specified tag, remove all signed copies (no inheritance)

      #     (but skip builds that are multiply tagged)

  

-     #for now, we're just implementing mode #1

-     #(with the modification that we check to see if the build was latest within

-     #the last N days)

+     # for now, we're just implementing mode #1

+     # (with the modification that we check to see if the build was latest within

+     # the last N days)

      if options.ignore_tag_file:

          with open(options.ignore_tag_file) as fo:

              options.ignore_tag.extend([line.strip() for line in fo.readlines()])
@@ -1558,7 +1597,7 @@ 

          if options.verbose:

              print("Getting builds...")

          qopts = {

-             'state' : koji.BUILD_STATES['COMPLETE'],

+             'state': koji.BUILD_STATES['COMPLETE'],

              'queryOpts': {

                  'limit': 50000,

                  'offset': 0,
@@ -1579,7 +1618,7 @@ 

              print("...got %i builds" % len(builds))

          builds.sort()

      else:

-         #single build

+         # single build

          binfo = session.getBuild(options.build)

          if not binfo:

              parser.error('No such build: %s' % options.build)
@@ -1601,21 +1640,21 @@ 

          time_str = time.asctime(time.localtime(ts))

          return "%s: %s" % (time_str, fmt % x)

      for nvr, binfo in builds:

-         #listBuilds returns slightly different data than normal

+         # listBuilds returns slightly different data than normal

          if 'id' not in binfo:

              binfo['id'] = binfo['build_id']

          if 'name' not in binfo:

              binfo['name'] = binfo['package_name']

          if options.debug:

              print("DEBUG: %s" % nvr)

-         #see how recently this build was latest for a tag

+         # see how recently this build was latest for a tag

          is_latest = False

          is_protected = False

          last_latest = None

          tags = {}

          for entry in session.queryHistory(build=binfo['id'])['tag_listing']:

-             #we used queryHistory rather than listTags so we can consider tags

-             #that the build was recently untagged from

+             # we used queryHistory rather than listTags so we can consider tags

+             # that the build was recently untagged from

              tags.setdefault(entry['tag.name'], 1)

          if options.debug:

              print("Tags: %s" % to_list(tags.keys()))
@@ -1633,43 +1672,43 @@ 

                      break

              if ignore_tag:

                  continue

-             #in order to determine how recently this build was latest, we have

-             #to look at the tagging history.

+             # in order to determine how recently this build was latest, we have

+             # to look at the tagging history.

              hist = session.queryHistory(tag=tag_name, package=binfo['name'])['tag_listing']

              if not hist:

-                 #really shouldn't happen

+                 # really shouldn't happen

                  raise koji.GenericError("No history found for %s in %s" % (nvr, tag_name))

              timeline = []

              for x in hist:

-                 #note that for revoked entries, we're effectively splitting them into

-                 #two parts: creation and revocation.

+                 # note that for revoked entries, we're effectively splitting them into

+                 # two parts: creation and revocation.

                  timeline.append((x['create_event'], 1, x))

-                 #at the same event, revokes happen first

+                 # at the same event, revokes happen first

                  if x['revoke_event'] is not None:

                      timeline.append((x['revoke_event'], 0, x))

              timeline.sort(key=lambda entry: entry[:2])

-             #find most recent creation entry for our build and crop there

+             # find most recent creation entry for our build and crop there

              latest_ts = None

-             for i in range(len(timeline)-1, -1, -1):

-                 #searching in reverse cronological order

+             for i in range(len(timeline) - 1, -1, -1):

+                 # searching in reverse cronological order

                  event_id, is_create, entry = timeline[i]

                  if entry['build_id'] == binfo['id'] and is_create:

                      latest_ts = event_id

                      break

              if not latest_ts:

-                 #really shouldn't happen

+                 # really shouldn't happen

                  raise koji.GenericError("No creation event found for %s in %s" % (nvr, tag_name))

              our_entry = entry

              if options.debug:

                  print(_histline(event_id, our_entry))

-             #now go through the events since most recent creation entry

-             timeline = timeline[i+1:]

+             # now go through the events since most recent creation entry

+             timeline = timeline[i + 1:]

              if not timeline:

                  is_latest = True

                  if options.debug:

                      print("%s is latest in tag %s" % (nvr, tag_name))

                  break

-             #before we go any further, is this a protected tag?

+             # before we go any further, is this a protected tag?

              protect_tag = False

              for pattern in options.protect_tag:

                  if fnmatch.fnmatch(tag_name, pattern):
@@ -1680,56 +1719,57 @@ 

                  # if this build was in this tag within that limit, then we will

                  # not prune its signed copies

                  if our_entry['revoke_event'] is None:

-                     #we're still tagged with a protected tag

+                     # we're still tagged with a protected tag

                      if options.debug:

                          print("Build %s has protected tag %s" % (nvr, tag_name))

                      is_protected = True

                      break

                  elif our_entry['revoke_ts'] > cutoff_ts:

-                     #we were still tagged here sometime before the cutoff

+                     # we were still tagged here sometime before the cutoff

                      if options.debug:

-                         print("Build %s had protected tag %s until %s" \

-                                 % (nvr, tag_name, time.asctime(time.localtime(our_entry['revoke_ts']))))

+                         print("Build %s had protected tag %s until %s"

+                               % (nvr, tag_name,

+                                  time.asctime(time.localtime(our_entry['revoke_ts']))))

                      is_protected = True

                      break

              replaced_ts = None

              revoke_ts = None

              others = {}

              for event_id, is_create, entry in timeline:

-                 #So two things can knock this build from the title of latest:

+                 # So two things can knock this build from the title of latest:

                  #  - it could be untagged (entry revoked)

                  #  - another build could become latest (replaced)

-                 #Note however that if the superceding entry is itself revoked, then

-                 #our build could become latest again

+                 # Note however that if the superceding entry is itself revoked, then

+                 # our build could become latest again

                  if options.debug:

                      print(_histline(event_id, entry))

                  if entry['build_id'] == binfo['id']:

                      if is_create:

-                         #shouldn't happen

-                         raise koji.GenericError("Duplicate creation event found for %s in %s" \

-                                                     % (nvr, tag_name))

+                         # shouldn't happen

+                         raise koji.GenericError("Duplicate creation event found for %s in %s"

+                                                 % (nvr, tag_name))

                      else:

-                         #we've been revoked

+                         # we've been revoked

                          revoke_ts = entry['revoke_ts']

                          break

                  else:

                      if is_create:

-                         #this build has become latest

+                         # this build has become latest

                          replaced_ts = entry['create_ts']

                          if entry['active']:

-                             #this entry not revoked yet, so we're done for this tag

+                             # this entry not revoked yet, so we're done for this tag

                              break

-                         #since this entry is revoked later, our build might eventually be

-                         #uncovered, so we have to keep looking

+                         # since this entry is revoked later, our build might eventually be

+                         # uncovered, so we have to keep looking

                          others[entry['build_id']] = 1

                      else:

-                         #other build revoked

-                         #see if our build has resurfaced

+                         # other build revoked

+                         # see if our build has resurfaced

                          if entry['build_id'] in others:

                              del others[entry['build_id']]

                          if replaced_ts is not None and not others:

-                             #we've become latest again

-                             #(note: we're not revoked yet because that triggers a break above)

+                             # we've become latest again

+                             # (note: we're not revoked yet because that triggers a break above)

                              replaced_ts = None

                              latest_ts = entry['revoke_ts']

              if last_latest is None:
@@ -1738,30 +1778,30 @@ 

                  timestamps = [last_latest]

              if revoke_ts is None:

                  if replaced_ts is None:

-                     #turns out we are still latest

+                     # turns out we are still latest

                      is_latest = True

                      if options.debug:

                          print("%s is latest (again) in tag %s" % (nvr, tag_name))

                      break

                  else:

-                     #replaced (but not revoked)

+                     # replaced (but not revoked)

                      timestamps.append(replaced_ts)

                      if options.debug:

-                         print("tag %s: %s not latest (replaced %s)" \

-                                 % (tag_name, nvr, time.asctime(time.localtime(replaced_ts))))

+                         print("tag %s: %s not latest (replaced %s)"

+                               % (tag_name, nvr, time.asctime(time.localtime(replaced_ts))))

              elif replaced_ts is None:

-                 #revoked but not replaced

+                 # revoked but not replaced

                  timestamps.append(revoke_ts)

                  if options.debug:

-                     print("tag %s: %s not latest (revoked %s)" \

-                             % (tag_name, nvr, time.asctime(time.localtime(revoke_ts))))

+                     print("tag %s: %s not latest (revoked %s)"

+                           % (tag_name, nvr, time.asctime(time.localtime(revoke_ts))))

              else:

-                 #revoked AND replaced

+                 # revoked AND replaced

                  timestamps.append(min(revoke_ts, replaced_ts))

                  if options.debug:

-                     print("tag %s: %s not latest (revoked %s, replaced %s)" \

-                             % (tag_name, nvr, time.asctime(time.localtime(revoke_ts)),

-                                 time.asctime(time.localtime(replaced_ts))))

+                     print("tag %s: %s not latest (revoked %s, replaced %s)"

+                           % (tag_name, nvr, time.asctime(time.localtime(revoke_ts)),

+                              time.asctime(time.localtime(replaced_ts))))

              last_latest = max(timestamps)

              if last_latest > cutoff_ts:

                  if options.debug:
@@ -1772,13 +1812,13 @@ 

              continue

          if is_protected:

              continue

-         #not latest anywhere since cutoff, so we can remove all signed copies

+         # not latest anywhere since cutoff, so we can remove all signed copies

          rpms = session.listRPMs(buildID=binfo['id'])

          session.multicall = True

          for rpminfo in rpms:

              session.queryRPMSigs(rpm_id=rpminfo['id'])

          by_sig = {}

-         #index by sig

+         # index by sig

          for rpminfo, [sigs] in zip(rpms, session.multiCall()):

              for sig in sigs:

                  sigkey = sig['sigkey']
@@ -1799,7 +1839,7 @@ 

                  except OSError:

                      continue

                  if not stat.S_ISREG(st.st_mode):

-                     #warn about this

+                     # warn about this

                      print("Skipping %s. Not a regular file" % signedpath)

                      continue

                  if st.st_mtime > cutoff_ts:
@@ -1816,10 +1856,10 @@ 

                          print("Error removing %s: %s" % (signedpath, e))

                          print("This script needs write access to %s" % koji.BASEDIR)

                          continue

-                 mycount +=1

+                 mycount += 1

                  build_files += 1

                  build_space += st.st_size

-                 #XXX - this makes some layout assumptions, but

+                 # XXX - this makes some layout assumptions, but

                  #      pathinfo doesn't report what we need

                  mydir = os.path.dirname(signedpath)

                  archdirs[mydir] = 1
@@ -1851,8 +1891,8 @@ 

              total_files += build_files

              total_space += build_space

              if options.verbose:

-                 print("Build: %s, Removed %i signed copies (%i bytes). Total: %i/%i" \

-                         % (nvr, build_files, build_space, total_files, total_space))

+                 print("Build: %s, Removed %i signed copies (%i bytes). Total: %i/%i"

+                       % (nvr, build_files, build_space, total_files, total_space))

          elif options.debug and by_sig:

              print("(build has no signed copies)")

      print("--- Grand Totals ---")
@@ -1879,7 +1919,7 @@ 

          if not binfo:

              print("No such build: %s" % nvr)

          elif binfo['volume_id'] == volinfo['id']:

-             print("Build %s already on volume %s" %(nvr, volinfo['name']))

+             print("Build %s already on volume %s" % (nvr, volinfo['name']))

          else:

              builds.append(binfo)

      if not builds:
@@ -2048,7 +2088,8 @@ 

          rinfo = session.getRPM(rpm_info, strict=True)

          rpm_idx[rinfo['id']] = rinfo

          if rinfo.get('external_repo_id'):

-             parser.error(_("External rpm: %(name)s-%(version)s-%(release)s.%(arch)s@%(external_repo_name)s") % rinfo)

+             parser.error(_("External rpm: %(name)s-%(version)s-%(release)s.%(arch)s@"

+                            "%(external_repo_name)s") % rinfo)

          qopts['rpm_id'] = rinfo['id']

      if options.build:

          build = options.build
@@ -2078,7 +2119,7 @@ 

          for rinfo in rpms:

              rpm_idx.setdefault(rinfo['id'], rinfo)

              tagged[rinfo['id']] = 1

-     #Now figure out which sig entries actually have live copies

+     # Now figure out which sig entries actually have live copies

      for sig in sigs:

          rpm_id = sig['rpm_id']

          sigkey = sig['sigkey']
@@ -2109,13 +2150,18 @@ 

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--noprogress", action="store_true",

                        help=_("Do not display progress of the upload"))

-     parser.add_option("--create-build", action="store_true", help=_("Auto-create builds as needed"))

-     parser.add_option("--link", action="store_true", help=_("Attempt to hardlink instead of uploading"))

-     parser.add_option("--type", help=_("The type of archive being imported.  Currently supported types: maven, win, image"))

-     parser.add_option("--type-info", help=_("Type-specific information to associate with the archives.  "

-                                             "For Maven archives this should be a local path to a .pom file.  "

-                                             "For Windows archives this should be relpath:platforms[:flags]))  "

-                                             "Images need an arch"))

+     parser.add_option("--create-build", action="store_true",

+                       help=_("Auto-create builds as needed"))

+     parser.add_option("--link", action="store_true",

+                       help=_("Attempt to hardlink instead of uploading"))

+     parser.add_option("--type",

+                       help=_("The type of archive being imported. "

+                              "Currently supported types: maven, win, image"))

+     parser.add_option("--type-info",

+                       help=_("Type-specific information to associate with the archives. "

+                              "For Maven archives this should be a local path to a .pom file. "

+                              "For Windows archives this should be relpath:platforms[:flags])) "

+                              "Images need an arch"))

      (suboptions, args) = parser.parse_args(args)

  

      if not len(args) > 1:
@@ -2283,11 +2329,14 @@ 

      usage = _("usage: %prog latest-build [options] <tag> <package> [<package> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--arch", help=_("List all of the latest packages for this arch"))

-     parser.add_option("--all", action="store_true", help=_("List all of the latest packages for this tag"))

+     parser.add_option("--all", action="store_true",

+                       help=_("List all of the latest packages for this tag"))

      parser.add_option("--quiet", action="store_true", default=goptions.quiet,

-                 help=_("Do not print the header information"))

+                       help=_("Do not print the header information"))

      parser.add_option("--paths", action="store_true", help=_("Show the file paths"))

-     parser.add_option("--type", help=_("Show builds of the given type only.  Currently supported types: maven"))

+     parser.add_option("--type",

+                       help=_("Show builds of the given type only. "

+                              "Currently supported types: maven"))

      (options, args) = parser.parse_args(args)

      if len(args) == 0:

          parser.error(_("A tag name must be specified"))
@@ -2324,27 +2373,30 @@ 

                  if options.type == 'maven':

                      for x in data:

                          x['path'] = pathinfo.mavenbuild(x)

-                     fmt = "%(path)-40s  %(tag_name)-20s  %(maven_group_id)-20s  %(maven_artifact_id)-20s  %(owner_name)s"

+                     fmt = "%(path)-40s  %(tag_name)-20s  %(maven_group_id)-20s  " \

+                           "%(maven_artifact_id)-20s  %(owner_name)s"

                  else:

                      for x in data:

                          x['path'] = pathinfo.build(x)

                      fmt = "%(path)-40s  %(tag_name)-20s  %(owner_name)s"

              else:

                  if options.type == 'maven':

-                     fmt = "%(nvr)-40s  %(tag_name)-20s  %(maven_group_id)-20s  %(maven_artifact_id)-20s  %(owner_name)s"

+                     fmt = "%(nvr)-40s  %(tag_name)-20s  %(maven_group_id)-20s  " \

+                           "%(maven_artifact_id)-20s  %(owner_name)s"

                  else:

                      fmt = "%(nvr)-40s  %(tag_name)-20s  %(owner_name)s"

              if not options.quiet:

                  if options.type == 'maven':

-                     print("%-40s  %-20s  %-20s  %-20s  %s" % ("Build", "Tag", "Group Id", "Artifact Id", "Built by"))

-                     print("%s  %s  %s  %s  %s" % ("-"*40, "-"*20, "-"*20, "-"*20, "-"*16))

+                     print("%-40s  %-20s  %-20s  %-20s  %s" %

+                           ("Build", "Tag", "Group Id", "Artifact Id", "Built by"))

+                     print("%s  %s  %s  %s  %s" %

+                           ("-" * 40, "-" * 20, "-" * 20, "-" * 20, "-" * 16))

                  else:

-                     print("%-40s  %-20s  %s" % ("Build","Tag","Built by"))

-                     print("%s  %s  %s" % ("-"*40, "-"*20, "-"*16))

+                     print("%-40s  %-20s  %s" % ("Build", "Tag", "Built by"))

+                     print("%s  %s  %s" % ("-" * 40, "-" * 20, "-" * 16))

                  options.quiet = True

  

-         output = [ fmt % x for x in data]

-         output.sort()

+         output = sorted([fmt % x for x in data])

          for line in output:

              print(line)

  
@@ -2364,7 +2416,7 @@ 

              # older servers may not provide argdesc

              expanded = []

              for arg in x['args']:

-                 if type(arg) is str:

+                 if isinstance(arg, str):

                      expanded.append(arg)

                  else:

                      expanded.append('%s=%s' % (arg[0], arg[1]))
@@ -2384,14 +2436,18 @@ 

      parser.add_option("--rpms", action="store_true", help=_("Show rpms instead of builds"))

      parser.add_option("--inherit", action="store_true", help=_("Follow inheritance"))

      parser.add_option("--latest", action="store_true", help=_("Only show the latest builds/rpms"))

-     parser.add_option("--latest-n", type='int', metavar="N", help=_("Only show the latest N builds/rpms"))

+     parser.add_option("--latest-n", type='int', metavar="N",

+                       help=_("Only show the latest N builds/rpms"))

      parser.add_option("--quiet", action="store_true", default=goptions.quiet,

-                 help=_("Do not print the header information"))

+                       help=_("Do not print the header information"))

      parser.add_option("--paths", action="store_true", help=_("Show the file paths"))

      parser.add_option("--sigs", action="store_true", help=_("Show signatures"))

-     parser.add_option("--type", help=_("Show builds of the given type only.  Currently supported types: maven, win, image"))

+     parser.add_option("--type",

+                       help=_("Show builds of the given type only. "

+                              "Currently supported types: maven, win, image"))

      parser.add_option("--event", type='int', metavar="EVENT#", help=_("query at event"))

-     parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("query at last event before timestamp"))

+     parser.add_option("--ts", type='int', metavar="TIMESTAMP",

+                       help=_("query at last event before timestamp"))

      parser.add_option("--repo", type='int', metavar="REPO#", help=_("query at event for a repo"))

      (options, args) = parser.parse_args(args)

      if len(args) == 0:
@@ -2437,7 +2493,7 @@ 

          rpms, builds = session.listTaggedRPMS(tag, **opts)

          data = rpms

          if options.paths:

-             build_idx = dict([(b['id'],b) for b in builds])

+             build_idx = dict([(b['id'], b) for b in builds])

              for rinfo in data:

                  build = build_idx[rinfo['build_id']]

                  builddir = pathinfo.build(build)
@@ -2460,26 +2516,29 @@ 

              if options.type == 'maven':

                  for x in data:

                      x['path'] = pathinfo.mavenbuild(x)

-                 fmt = "%(path)-40s  %(tag_name)-20s  %(maven_group_id)-20s  %(maven_artifact_id)-20s  %(owner_name)s"

+                 fmt = "%(path)-40s  %(tag_name)-20s  %(maven_group_id)-20s  " \

+                       "%(maven_artifact_id)-20s  %(owner_name)s"

              else:

                  for x in data:

                      x['path'] = pathinfo.build(x)

                  fmt = "%(path)-40s  %(tag_name)-20s  %(owner_name)s"

          else:

              if options.type == 'maven':

-                 fmt = "%(nvr)-40s  %(tag_name)-20s  %(maven_group_id)-20s  %(maven_artifact_id)-20s  %(owner_name)s"

+                 fmt = "%(nvr)-40s  %(tag_name)-20s  %(maven_group_id)-20s  " \

+                       "%(maven_artifact_id)-20s  %(owner_name)s"

              else:

                  fmt = "%(nvr)-40s  %(tag_name)-20s  %(owner_name)s"

          if not options.quiet:

              if options.type == 'maven':

-                 print("%-40s  %-20s  %-20s  %-20s  %s" % ("Build", "Tag", "Group Id", "Artifact Id", "Built by"))

-                 print("%s  %s  %s  %s  %s" % ("-"*40, "-"*20, "-"*20, "-"*20, "-"*16))

+                 print("%-40s  %-20s  %-20s  %-20s  %s" %

+                       ("Build", "Tag", "Group Id", "Artifact Id", "Built by"))

+                 print("%s  %s  %s  %s  %s" %

+                       ("-" * 40, "-" * 20, "-" * 20, "-" * 20, "-" * 16))

              else:

-                 print("%-40s  %-20s  %s" % ("Build","Tag","Built by"))

-                 print("%s  %s  %s" % ("-"*40, "-"*20, "-"*16))

+                 print("%-40s  %-20s  %s" % ("Build", "Tag", "Built by"))

+                 print("%s  %s  %s" % ("-" * 40, "-" * 20, "-" * 16))

  

-     output = [ fmt % x for x in data]

-     output.sort()

+     output = sorted([fmt % x for x in data])

      for line in output:

          print(line)

  
@@ -2504,8 +2563,7 @@ 

      data = session.listRPMs(**opts)

  

      fmt = "%(nvr)s.%(arch)s"

-     order = [(fmt % x, x) for x in data]

-     order.sort()

+     order = sorted([(fmt % x, x) for x in data])

      for nvra, rinfo in order:

          if options.verbose and rinfo.get('is_update'):

              print(nvra, "[update]")
@@ -2558,8 +2616,7 @@ 

      if options.show_references:

          fmt = fmt + "  %(refs)s"

  

-     output = [ fmt % x for x in data]

-     output.sort()

+     output = sorted([fmt % x for x in data])

      for line in output:

          print(line)

  
@@ -2597,11 +2654,11 @@ 

          opts['event'] = event['id']

          event['timestr'] = time.asctime(time.localtime(event['ts']))

          print("Querying at event %(id)i (%(timestr)s)" % event)

-     tmp_list = [(x['name'], x) for x in session.getTagGroups(args[0], **opts)]

-     tmp_list.sort()

+     tmp_list = sorted([(x['name'], x) for x in session.getTagGroups(args[0], **opts)])

      groups = [x[1] for x in tmp_list]

  

      tags_cache = {}

+ 

      def get_cached_tag(tag_id):

          if tag_id not in tags_cache:

              tag = session.getTag(tag_id, strict=False)
@@ -2615,13 +2672,11 @@ 

          if len(args) > 1 and group['name'] != args[1]:

              continue

          print("%s  [%s]" % (group['name'], get_cached_tag(group['tag_id'])))

-         groups = [(x['name'], x) for x in group['grouplist']]

-         groups.sort()

+         groups = sorted([(x['name'], x) for x in group['grouplist']])

          for x in [x[1] for x in groups]:

              x['tag_name'] = get_cached_tag(x['tag_id'])

              print_group_list_req_group(x)

-         pkgs = [(x['package'], x) for x in group['packagelist']]

-         pkgs.sort()

+         pkgs = sorted([(x['package'], x) for x in group['packagelist']])

          for x in [x[1] for x in pkgs]:

              x['tag_name'] = get_cached_tag(x['tag_id'])

              print_group_list_req_package(x)
@@ -2718,9 +2773,9 @@ 

      usage = _("usage: %prog list-channels")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--simple", action="store_true", default=False,

-                 help=_("Print just list of channels without additional info"))

+                       help=_("Print just list of channels without additional info"))

      parser.add_option("--quiet", action="store_true", default=goptions.quiet,

-                 help=_("Do not print header information"))

+                       help=_("Do not print header information"))

      (options, args) = parser.parse_args(args)

      activate_session(session, goptions)

      channels = session.listChannels()
@@ -2747,7 +2802,8 @@ 

          if not options.quiet:

              print('Channel        Enabled  Ready Disbld   Load    Cap    Perc')

          for channel in channels:

-             print("%(name)-15s %(enabled)6d %(ready)6d %(disabled)6d %(load)6d %(capacity)6d %(perc_load)6d%%" % channel)

+             print("%(name)-15s %(enabled)6d %(ready)6d %(disabled)6d %(load)6d %(capacity)6d "

+                   "%(perc_load)6d%%" % channel)

  

  

  def anon_handle_list_hosts(goptions, session, args):
@@ -2757,12 +2813,15 @@ 

      parser.add_option("--arch", action="append", default=[], help=_("Specify an architecture"))

      parser.add_option("--channel", help=_("Specify a channel"))

      parser.add_option("--ready", action="store_true", help=_("Limit to ready hosts"))

-     parser.add_option("--not-ready", action="store_false", dest="ready", help=_("Limit to not ready hosts"))

+     parser.add_option("--not-ready", action="store_false", dest="ready",

+                       help=_("Limit to not ready hosts"))

      parser.add_option("--enabled", action="store_true", help=_("Limit to enabled hosts"))

-     parser.add_option("--not-enabled", action="store_false", dest="enabled", help=_("Limit to not enabled hosts"))

-     parser.add_option("--disabled", action="store_false", dest="enabled", help=_("Alias for --not-enabled"))

+     parser.add_option("--not-enabled", action="store_false", dest="enabled",

+                       help=_("Limit to not enabled hosts"))

+     parser.add_option("--disabled", action="store_false", dest="enabled",

+                       help=_("Alias for --not-enabled"))

      parser.add_option("--quiet", action="store_true", default=goptions.quiet,

-                 help=_("Do not print header information"))

+                       help=_("Do not print header information"))

      parser.add_option("--show-channels", action="store_true", help=_("Show host's channels"))

      (options, args) = parser.parse_args(args)

      opts = {}
@@ -2778,13 +2837,14 @@ 

          opts['ready'] = options.ready

      if options.enabled is not None:

          opts['enabled'] = options.enabled

-     tmp_list = [(x['name'], x) for x in session.listHosts(**opts)]

-     tmp_list.sort()

+     tmp_list = sorted([(x['name'], x) for x in session.listHosts(**opts)])

      hosts = [x[1] for x in tmp_list]

  

      def yesno(x):

-         if x: return 'Y'

-         else: return 'N'

+         if x:

+             return 'Y'

+         else:

+             return 'N'

  

      # pull in the last update using multicall to speed it up a bit

      session.multicall = True
@@ -2814,11 +2874,13 @@ 

      else:

          longest_host = 8

      if not options.quiet:

-         hdr = "{hostname:<{longest_host}} Enb Rdy Load/Cap  Arches           Last Update".format(longest_host=longest_host, hostname='Hostname')

+         hdr = "{hostname:<{longest_host}} Enb Rdy Load/Cap  Arches           Last Update".format(

+             longest_host=longest_host, hostname='Hostname')

          if options.show_channels:

              hdr += "         Channels"

          print(hdr)

-     mask = "%%(name)-%ss %%(enabled)-3s %%(ready)-3s %%(task_load)4.1f/%%(capacity)-4.1f %%(arches)-16s %%(update)-19s" % longest_host

+     mask = "%%(name)-%ss %%(enabled)-3s %%(ready)-3s %%(task_load)4.1f/%%(capacity)-4.1f " \

+            "%%(arches)-16s %%(update)-19s" % longest_host

      if options.show_channels:

          mask += " %(channels)s"

      for host in hosts:
@@ -2833,12 +2895,13 @@ 

      parser.add_option("--tag", help=_("Specify tag"))

      parser.add_option("--package", help=_("Specify package"))

      parser.add_option("--quiet", action="store_true", default=goptions.quiet,

-                 help=_("Do not print header information"))

+                       help=_("Do not print header information"))

      parser.add_option("--noinherit", action="store_true", help=_("Don't follow inheritance"))

      parser.add_option("--show-blocked", action="store_true", help=_("Show blocked packages"))

      parser.add_option("--show-dups", action="store_true", help=_("Show superseded owners"))

      parser.add_option("--event", type='int', metavar="EVENT#", help=_("query at event"))

-     parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("query at last event before timestamp"))

+     parser.add_option("--ts", type='int', metavar="TIMESTAMP",

+                       help=_("query at last event before timestamp"))

      parser.add_option("--repo", type='int', metavar="REPO#", help=_("query at event for a repo"))

      (options, args) = parser.parse_args(args)

      if len(args) != 0:
@@ -2862,7 +2925,7 @@ 

          # no limiting clauses were specified

          allpkgs = True

      opts['inherited'] = not options.noinherit

-     #hiding dups only makes sense if we're querying a tag

+     # hiding dups only makes sense if we're querying a tag

      if options.tag:

          opts['with_dups'] = options.show_dups

      else:
@@ -2889,21 +2952,21 @@ 

      if not options.quiet:

          if allpkgs:

              print("Package")

-             print('-'*23)

+             print('-' * 23)

          else:

-             print("%-23s %-23s %-16s %-15s" % ('Package','Tag','Extra Arches','Owner'))

-             print("%s %s %s %s" % ('-'*23,'-'*23,'-'*16,'-'*15))

+             print("%-23s %-23s %-16s %-15s" % ('Package', 'Tag', 'Extra Arches', 'Owner'))

+             print("%s %s %s %s" % ('-' * 23, '-' * 23, '-' * 16, '-' * 15))

      for pkg in data:

          if allpkgs:

              print(pkg['package_name'])

          else:

-             if not options.show_blocked and pkg.get('blocked',False):

+             if not options.show_blocked and pkg.get('blocked', False):

                  continue

              if 'tag_id' in pkg:

                  if pkg['extra_arches'] is None:

                      pkg['extra_arches'] = ""

                  fmt = "%(package_name)-23s %(tag_name)-23s %(extra_arches)-16s %(owner_name)-15s"

-                 if pkg.get('blocked',False):

+                 if pkg.get('blocked', False):

                      fmt += " [BLOCKED]"

              else:

                  fmt = "%(package_name)s"
@@ -2930,7 +2993,7 @@ 

      parser.add_option("-r", "--reverse", action="store_true", default=False,

                        help=_("Print the list in reverse order"))

      parser.add_option("--quiet", action="store_true", default=goptions.quiet,

-                 help=_("Do not print the header information"))

+                       help=_("Do not print the header information"))

      (options, args) = parser.parse_args(args)

      if len(args) != 0:

          parser.error(_("This command takes no arguments"))
@@ -2993,7 +3056,7 @@ 

              dt = dateutil.parser.parse(val)

              ts = time.mktime(dt.timetuple())

              setattr(options, opt, ts)

-         except:

+         except Exception:

              parser.error(_("Invalid time specification: %s") % val)

      if options.before:

          opts['completeBefore'] = getattr(options, 'before')
@@ -3023,7 +3086,7 @@ 

      fmt = "%(nvr)-55s  %(owner_name)-16s  %(state)s"

      if not options.quiet:

          print("%-55s  %-16s  %s" % ("Build", "Built by", "State"))

-         print("%s  %s  %s" % ("-"*55, "-"*16, "-"*16))

+         print("%s  %s  %s" % ("-" * 55, "-" * 16, "-" * 16))

  

      for build in data:

          print(fmt % build)
@@ -3033,7 +3096,8 @@ 

      "[info] Print basic information about an RPM"

      usage = _("usage: %prog rpminfo [options] <n-v-r.a> [<n-v-r.a> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--buildroots", action="store_true", help=_("show buildroots the rpm was used in"))

+     parser.add_option("--buildroots", action="store_true",

+                       help=_("show buildroots the rpm was used in"))

      (options, args) = parser.parse_args(args)

      if len(args) < 1:

          parser.error(_("Please specify an RPM"))
@@ -3061,10 +3125,13 @@ 

              print("External Repository: %(name)s [%(id)i]" % repo)

              print("External Repository url: %(url)s" % repo)

          else:

-             print("RPM Path: %s" % os.path.join(koji.pathinfo.build(buildinfo), koji.pathinfo.rpm(info)))

+             print("RPM Path: %s" %

+                   os.path.join(koji.pathinfo.build(buildinfo), koji.pathinfo.rpm(info)))

              print("SRPM: %(epoch)s%(name)s-%(version)s-%(release)s [%(id)d]" % buildinfo)

-             print("SRPM Path: %s" % os.path.join(koji.pathinfo.build(buildinfo), koji.pathinfo.rpm(buildinfo)))

-             print("Built: %s" % time.strftime('%a, %d %b %Y %H:%M:%S %Z', time.localtime(info['buildtime'])))

+             print("SRPM Path: %s" %

+                   os.path.join(koji.pathinfo.build(buildinfo), koji.pathinfo.rpm(buildinfo)))

+             print("Built: %s" % time.strftime('%a, %d %b %Y %H:%M:%S %Z',

+                                               time.localtime(info['buildtime'])))

          print("SIGMD5: %(payloadhash)s" % info)

          print("Size: %(size)s" % info)

          if not info.get('external_repo_id', 0):
@@ -3077,7 +3144,8 @@ 

          else:

              br_info = session.getBuildroot(info['buildroot_id'])

              if br_info['br_type'] == koji.BR_TYPES['STANDARD']:

-                 print("Buildroot: %(id)i (tag %(tag_name)s, arch %(arch)s, repo %(repo_id)i)" % br_info)

+                 print("Buildroot: %(id)i (tag %(tag_name)s, arch %(arch)s, repo %(repo_id)i)" %

+                       br_info)

                  print("Build Host: %(host_name)s" % br_info)

                  print("Build Task: %(task_id)i" % br_info)

              else:
@@ -3087,11 +3155,11 @@ 

          if info.get('extra'):

              print("Extra: %(extra)r" % info)

          if options.buildroots:

-             br_list = session.listBuildroots(rpmID=info['id'], queryOpts={'order':'buildroot.id'})

+             br_list = session.listBuildroots(rpmID=info['id'], queryOpts={'order': 'buildroot.id'})

              print("Used in %i buildroots:" % len(br_list))

              if len(br_list):

-                 print("  %8s %-28s %-8s %-29s" % ('id','build tag','arch','build host'))

-                 print("  %s %s %s %s" % ('-'*8, '-'*28, '-'*8, '-'*29))

+                 print("  %8s %-28s %-8s %-29s" % ('id', 'build tag', 'arch', 'build host'))

+                 print("  %s %s %s %s" % ('-' * 8, '-' * 28, '-' * 8, '-' * 29))

              for br_info in br_list:

                  print("  %(id)8i %(tag_name)-28s %(arch)-8s %(host_name)-29s" % br_info)

  
@@ -3100,7 +3168,8 @@ 

      "[info] Print basic information about a build"

      usage = _("usage: %prog buildinfo [options] <n-v-r> [<n-v-r> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--changelog", action="store_true", help=_("Show the changelog for the build"))

+     parser.add_option("--changelog", action="store_true",

+                       help=_("Show the changelog for the build"))

      (options, args) = parser.parse_args(args)

      if len(args) < 1:

          parser.error(_("Please specify a build"))
@@ -3152,7 +3221,8 @@ 

              print("Maven archives:")

              for archive in maven_archives:

                  archives_seen.setdefault(archive['id'], 1)

-                 print(os.path.join(koji.pathinfo.mavenbuild(info), koji.pathinfo.mavenfile(archive)))

+                 print(os.path.join(koji.pathinfo.mavenbuild(info),

+                                    koji.pathinfo.mavenfile(archive)))

          win_archives = session.listArchives(buildID=info['id'], type='win')

          if win_archives:

              print("Windows archives:")
@@ -3211,14 +3281,14 @@ 

              description = info['description'].splitlines()

              print("Description: %s" % description[0])

              for line in description[1:]:

-                 print("%s%s" % (" "*13, line))

+                 print("%s%s" % (" " * 13, line))

          else:

              print("Description:")

          if info['comment']:

              comment = info['comment'].splitlines()

              print("Comment: %s" % comment[0])

              for line in comment[1:]:

-                 print("%s%s" % (" "*9, line))

+                 print("%s%s" % (" " * 9, line))

          else:

              print("Comment:")

          print("Enabled: %s" % (info['enabled'] and 'yes' or 'no'))
@@ -3229,11 +3299,12 @@ 

          else:

              update = update[:update.find('.')]

          print("Last Update: %s" % update)

-         print("Channels: %s" % ' '.join([c['name'] for c in session.listChannels(hostID=info['id'])]))

+         print("Channels: %s" % ' '.join([c['name']

+                                          for c in session.listChannels(hostID=info['id'])]))

          print("Active Buildroots:")

-         states = {0:"INIT", 1:"WAITING", 2:"BUILDING"}

+         states = {0: "INIT", 1: "WAITING", 2: "BUILDING"}

          rows = [('NAME', 'STATE', 'CREATION TIME')]

-         for s in range(0,3):

+         for s in range(0, 3):

              for b in session.listBuildroots(hostID=info['id'], state=s):

                  rows.append((("%s-%s-%s" % (b['tag_name'], b['id'], b['repo_id'])), states[s],

                               b['create_event_time'][:b['create_event_time'].find('.')]))
@@ -3250,35 +3321,35 @@ 

      usage += _("\nclone-tag will create the destination tag if it does not already exist")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option('--config', action='store_true',

-             help=_("Copy config from the source to the dest tag"))

+                       help=_("Copy config from the source to the dest tag"))

      parser.add_option('--groups', action='store_true',

-             help=_("Copy group information"))

+                       help=_("Copy group information"))

      parser.add_option('--pkgs', action='store_true',

-             help=_("Copy package list from the source to the dest tag"))

+                       help=_("Copy package list from the source to the dest tag"))

      parser.add_option('--builds', action='store_true',

-             help=_("Tag builds into the dest tag"))

+                       help=_("Tag builds into the dest tag"))

      parser.add_option('--all', action='store_true',

-             help=_("The same as --config --groups --pkgs --builds"))

+                       help=_("The same as --config --groups --pkgs --builds"))

      parser.add_option('--latest-only', action='store_true',

-             help=_("Tag only the latest build of each package"))

+                       help=_("Tag only the latest build of each package"))

      parser.add_option('--inherit-builds', action='store_true',

-             help=_("Include all builds inherited into the source tag into "

-                    "the dest tag"))

+                       help=_("Include all builds inherited into the source tag into "

+                              "the dest tag"))

      parser.add_option('--ts', type='int', metavar="TIMESTAMP",

-             help=_('Clone tag at last event before specific timestamp'))

+                       help=_('Clone tag at last event before specific timestamp'))

      parser.add_option('--event', type='int',

-             help=_('Clone tag at a specific event'))

+                       help=_('Clone tag at a specific event'))

      parser.add_option('--repo', type='int',

-             help=_('Clone tag at a specific repo event'))

+                       help=_('Clone tag at a specific repo event'))

      parser.add_option("-v", "--verbose", action="store_true",

-             help=_("show changes"))

+                       help=_("show changes"))

      parser.add_option("--notify", action="store_true", default=False,

-             help=_('Send tagging/untagging notifications'))

+                       help=_('Send tagging/untagging notifications'))

      parser.add_option("-f", "--force", action="store_true",

-             help=_("override tag locks if necessary"))

+                       help=_("override tag locks if necessary"))

      parser.add_option("-n", "--test", action="store_true", help=_("test mode"))

      parser.add_option("--batch", type='int', default=1000, metavar='SIZE',

-             help=_("batch size of multicalls [0 to disable, default: %default]"))

+                       help=_("batch size of multicalls [0 to disable, default: %default]"))

      (options, args) = parser.parse_args(args)

  

      if len(args) != 2:
@@ -3307,14 +3378,15 @@ 

      dsttag = session.getTag(args[1])

      if not srctag:

          parser.error(_("Unknown src-tag: %s" % args[0]))

-     if (srctag['locked'] and not options.force) or (dsttag and dsttag['locked'] and not options.force):

+     if (srctag['locked'] and not options.force) \

+             or (dsttag and dsttag['locked'] and not options.force):

          parser.error(_("Error: You are attempting to clone from or to a tag which is locked.\n"

                         "Please use --force if this is what you really want to do."))

  

      # init debug lists.

-     chgpkglist=[]

-     chgbldlist=[]

-     chggrplist=[]

+     chgpkglist = []

+     chgbldlist = []

+     chggrplist = []

      # case of brand new dst-tag.

      if not dsttag:

          if not options.config:
@@ -3485,7 +3557,7 @@ 

              bdellist.extend(dblds)

          baddlist.sort(key=lambda x: x['package_name'])

          bdellist.sort(key=lambda x: x['package_name'])

-         gaddlist = [] # list containing new groups to be added from src tag

+         gaddlist = []  # list containing new groups to be added from src tag

          for (grpname, group) in six.iteritems(srcgroups):

              if grpname not in dstgroups:

                  gaddlist.append(group)
@@ -3502,8 +3574,8 @@ 

                  grpchanges[grpname]['inherited'] = False

                  if dstgroup['tag_id'] != dsttag['id']:

                      grpchanges[grpname]['inherited'] = True

-                 srcgrppkglist=[]

-                 dstgrppkglist=[]

+                 srcgrppkglist = []

+                 dstgrppkglist = []

                  for pkg in group['packagelist']:

                      srcgrppkglist.append(pkg['package'])

                  for pkg in dstgroups[grpname]['packagelist']:
@@ -3702,22 +3774,23 @@ 

              session.multiCall(batch=options.batch)

      # print final list of actions.

      if options.verbose:

-         pfmt='    %-7s %-28s %-10s %-10s %-10s\n'

-         bfmt='    %-7s %-28s %-40s %-10s %-10s %-10s\n'

-         gfmt='    %-7s %-28s %-28s\n'

+         pfmt = '    %-7s %-28s %-10s %-10s %-10s\n'

+         bfmt = '    %-7s %-28s %-40s %-10s %-10s %-10s\n'

+         gfmt = '    %-7s %-28s %-28s\n'

          sys.stdout.write('\nList of changes:\n\n')

          sys.stdout.write(pfmt % ('Action', 'Package', 'Blocked', 'Owner', 'From Tag'))

-         sys.stdout.write(pfmt % ('-'*7, '-'*28, '-'*10, '-'*10, '-'*10))

+         sys.stdout.write(pfmt % ('-' * 7, '-' * 28, '-' * 10, '-' * 10, '-' * 10))

          for changes in chgpkglist:

              sys.stdout.write(pfmt % changes)

          sys.stdout.write('\n')

-         sys.stdout.write(bfmt % ('Action', 'From/To Package', 'Build(s)', 'State', 'Owner', 'From Tag'))

-         sys.stdout.write(bfmt %  ('-'*7, '-'*28, '-'*40, '-'*10, '-'*10, '-'*10))

+         sys.stdout.write(bfmt %

+                          ('Action', 'From/To Package', 'Build(s)', 'State', 'Owner', 'From Tag'))

+         sys.stdout.write(bfmt % ('-' * 7, '-' * 28, '-' * 40, '-' * 10, '-' * 10, '-' * 10))

          for changes in chgbldlist:

              sys.stdout.write(bfmt % changes)

          sys.stdout.write('\n')

          sys.stdout.write(gfmt % ('Action', 'Package', 'Group'))

-         sys.stdout.write(gfmt %  ('-'*7, '-'*28, '-'*28))

+         sys.stdout.write(gfmt % ('-' * 7, '-' * 28, '-' * 28))

          for changes in chggrplist:

              sys.stdout.write(gfmt % changes)

  
@@ -3736,7 +3809,7 @@ 

      if len(args) > 2:

          dest_tag = args[2]

      else:

-         #most targets have the same name as their destination

+         # most targets have the same name as their destination

          dest_tag = name

      activate_session(session, goptions)

      if not (session.hasPerm('admin') or session.hasPerm('target')):
@@ -3775,7 +3848,7 @@ 

          parser.error(_("This action requires target or admin privileges"))

  

      targetInfo = session.getBuildTarget(args[0])

-     if targetInfo == None:

+     if targetInfo is None:

          raise koji.GenericError("No build target with the name or id '%s'" % args[0])

  

      targetInfo['orig_name'] = targetInfo['name']
@@ -3798,7 +3871,8 @@ 

              return 1

          targetInfo['dest_tag_name'] = options.dest_tag

  

-     session.editBuildTarget(targetInfo['orig_name'], targetInfo['name'], targetInfo['build_tag_name'], targetInfo['dest_tag_name'])

+     session.editBuildTarget(targetInfo['orig_name'], targetInfo['name'],

+                             targetInfo['build_tag_name'], targetInfo['dest_tag_name'])

  

  

  def handle_remove_target(goptions, session, args):
@@ -3851,7 +3925,7 @@ 

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--name", help=_("Specify the build target name"))

      parser.add_option("--quiet", action="store_true", default=goptions.quiet,

-                 help=_("Do not print the header information"))

+                       help=_("Do not print the header information"))

      (options, args) = parser.parse_args(args)

      if len(args) != 0:

          parser.error(_("This command takes no arguments"))
@@ -3859,20 +3933,19 @@ 

  

      fmt = "%(name)-30s %(build_tag_name)-30s %(dest_tag_name)-30s"

      if not options.quiet:

-         print("%-30s %-30s %-30s" % ('Name','Buildroot','Destination'))

+         print("%-30s %-30s %-30s" % ('Name', 'Buildroot', 'Destination'))

          print("-" * 93)

-     tmp_list = [(x['name'], x) for x in session.getBuildTargets(options.name)]

-     tmp_list.sort()

+     tmp_list = sorted([(x['name'], x) for x in session.getBuildTargets(options.name)])

      targets = [x[1] for x in tmp_list]

      for target in targets:

          print(fmt % target)

-     #pprint.pprint(session.getBuildTargets())

+     # pprint.pprint(session.getBuildTargets())

  

  

  def _printInheritance(tags, sibdepths=None, reverse=False):

      if len(tags) == 0:

          return

-     if sibdepths == None:

+     if sibdepths is None:

          sibdepths = []

      currtag = tags[0]

      tags = tags[1:]
@@ -3915,11 +3988,13 @@ 

      "[info] Print the inheritance information for a tag"

      usage = _("usage: %prog list-tag-inheritance [options] <tag>")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--reverse", action="store_true", help=_("Process tag's children instead of its parents"))

+     parser.add_option("--reverse", action="store_true",

+                       help=_("Process tag's children instead of its parents"))

      parser.add_option("--stop", help=_("Stop processing inheritance at this tag"))

      parser.add_option("--jump", help=_("Jump from one tag to another when processing inheritance"))

      parser.add_option("--event", type='int', metavar="EVENT#", help=_("query at event"))

-     parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("query at last event before timestamp"))

+     parser.add_option("--ts", type='int', metavar="TIMESTAMP",

+                       help=_("query at last event before timestamp"))

      parser.add_option("--repo", type='int', metavar="REPO#", help=_("query at event for a repo"))

      (options, args) = parser.parse_args(args)

      if len(args) != 1:
@@ -3990,9 +4065,9 @@ 

          if not buildinfo:

              parser.error(_("Invalid build %s" % options.build))

  

-     tags = session.listTags(buildinfo.get('id',None), pkginfo.get('id',None))

+     tags = session.listTags(buildinfo.get('id', None), pkginfo.get('id', None))

      tags.sort(key=lambda x: x['name'])

-     #if options.verbose:

+     # if options.verbose:

      #    fmt = "%(name)s [%(id)i] %(perm)s %(locked)s %(arches)s"

      if options.show_id:

          fmt = "%(name)s [%(id)i]"
@@ -4027,7 +4102,8 @@ 

      parser.add_option("--build", help=_("Only show data for a specific build"))

      parser.add_option("--package", help=_("Only show data for a specific package"))

      parser.add_option("--tag", help=_("Only show data for a specific tag"))

-     parser.add_option("--all", action="store_true", help=_("Allows listing the entire global history"))

+     parser.add_option("--all", action="store_true",

+                       help=_("Allows listing the entire global history"))

      (options, args) = parser.parse_args(args)

      koji.util.deprecated("list-tag-history is deprecated and will be removed in a future version. "

                           "See: https://pagure.io/koji/issue/836")
@@ -4092,16 +4168,17 @@ 

          del x['.related']

          bad_edit = None

          if len(edit) != 1:

-             bad_edit = "%i elements" % (len(edit)+1)

+             bad_edit = "%i elements" % (len(edit) + 1)

          other = edit[0]

-         #check edit for sanity

+         # check edit for sanity

          if create or not other[2]:

              bad_edit = "out of order"

          if event_id != other[0]:

              bad_edit = "non-matching"

          if bad_edit:

-             print("Warning: unusual edit at event %i in table %s (%s)" % (event_id, table, bad_edit))

-             #we'll simply treat them as separate events

+             print("Warning: unusual edit at event %i in table %s (%s)" %

+                   (event_id, table, bad_edit))

+             # we'll simply treat them as separate events

              pprint.pprint(entry)

              pprint.pprint(edit)

              _print_histline(entry, **kwargs)
@@ -4241,7 +4318,7 @@ 

          else:

              fmt = "%s entry revoked" % table

      time_str = time.asctime(time.localtime(ts))

-     parts  = [time_str, fmt % x]

+     parts = [time_str, fmt % x]

      if options.events or options.verbose:

          parts.insert(1, "(eid %i)" % event_id)

      if who:
@@ -4260,8 +4337,7 @@ 

          else:

              return '%s.name' % key

      if edit:

-         keys = to_list(x.keys())

-         keys.sort()

+         keys = sorted(to_list(x.keys()))

          y = other[-1]

          for key in keys:

              if key in hidden_fields:
@@ -4275,8 +4351,7 @@ 

                  continue

              print("    %s: %s -> %s" % (key, x[key], y[key]))

      elif create and options.verbose and table != 'tag_listing':

-         keys = to_list(x.keys())

-         keys.sort()

+         keys = sorted(to_list(x.keys()))

          # the table keys have already been represented in the base format string

          also_hidden = list(_table_keys[table])

          also_hidden.extend([get_nkey(k) for k in also_hidden])
@@ -4296,25 +4371,26 @@ 

                  dkey = key

              print("    %s: %s" % (dkey, x[key]))

  

+ 

  _table_keys = {

-     'user_perms' : ['user_id', 'perm_id'],

-     'user_groups' : ['user_id', 'group_id'],

-     'cg_users' : ['user_id', 'cg_id'],

-     'tag_inheritance' : ['tag_id', 'parent_id'],

-     'tag_config' : ['tag_id'],

-     'tag_extra' : ['tag_id', 'key'],

-     'build_target_config' : ['build_target_id'],

-     'external_repo_config' : ['external_repo_id'],

+     'user_perms': ['user_id', 'perm_id'],

+     'user_groups': ['user_id', 'group_id'],

+     'cg_users': ['user_id', 'cg_id'],

+     'tag_inheritance': ['tag_id', 'parent_id'],

+     'tag_config': ['tag_id'],

+     'tag_extra': ['tag_id', 'key'],

+     'build_target_config': ['build_target_id'],

+     'external_repo_config': ['external_repo_id'],

      'host_config': ['host_id'],

      'host_channels': ['host_id', 'channel_id'],

-     'tag_external_repos' : ['tag_id', 'external_repo_id'],

-     'tag_listing' : ['build_id', 'tag_id'],

-     'tag_packages' : ['package_id', 'tag_id'],

-     'tag_package_owners' : ['package_id', 'tag_id'],

-     'group_config' : ['group_id', 'tag_id'],

-     'group_req_listing' : ['group_id', 'tag_id', 'req_id'],

-     'group_package_listing' : ['group_id', 'tag_id', 'package'],

-     }

+     'tag_external_repos': ['tag_id', 'external_repo_id'],

+     'tag_listing': ['build_id', 'tag_id'],

+     'tag_packages': ['package_id', 'tag_id'],

+     'tag_package_owners': ['package_id', 'tag_id'],

+     'group_config': ['group_id', 'tag_id'],

+     'group_req_listing': ['group_id', 'tag_id', 'req_id'],

+     'group_package_listing': ['group_id', 'tag_id', 'package'],

+ }

  

  

  def anon_handle_list_history(goptions, session, args):
@@ -4325,28 +4401,36 @@ 

      parser.add_option("--build", help=_("Only show data for a specific build"))

      parser.add_option("--package", help=_("Only show data for a specific package"))

      parser.add_option("--tag", help=_("Only show data for a specific tag"))

-     parser.add_option("--editor", "--by", metavar="USER", help=_("Only show entries modified by user"))

+     parser.add_option("--editor", "--by", metavar="USER",

+                       help=_("Only show entries modified by user"))

      parser.add_option("--user", help=_("Only show entries affecting a user"))

      parser.add_option("--permission", help=_("Only show entries relating to a given permission"))

      parser.add_option("--cg", help=_("Only show entries relating to a given permission"))

-     parser.add_option("--external-repo", "--erepo", help=_("Only show entries relating to a given external repo"))

-     parser.add_option("--build-target", "--target", help=_("Only show entries relating to a given build target"))

+     parser.add_option("--external-repo", "--erepo",

+                       help=_("Only show entries relating to a given external repo"))

+     parser.add_option("--build-target", "--target",

+                       help=_("Only show entries relating to a given build target"))

      parser.add_option("--group", help=_("Only show entries relating to a given group"))

      parser.add_option("--host", help=_("Only show entries related to given host"))

      parser.add_option("--channel", help=_("Only show entries related to given channel"))

-     parser.add_option("--before", metavar="TIMESTAMP", help=_("Only show entries before timestamp"))

+     parser.add_option("--before", metavar="TIMESTAMP",

+                       help=_("Only show entries before timestamp"))

      parser.add_option("--after", metavar="TIMESTAMP", help=_("Only show entries after timestamp"))

-     parser.add_option("--before-event", metavar="EVENT_ID", type='int', help=_("Only show entries before event"))

-     parser.add_option("--after-event", metavar="EVENT_ID", type='int', help=_("Only show entries after event"))

+     parser.add_option("--before-event", metavar="EVENT_ID", type='int',

+                       help=_("Only show entries before event"))

+     parser.add_option("--after-event", metavar="EVENT_ID", type='int',

+                       help=_("Only show entries after event"))

      parser.add_option("--watch", action="store_true", help=_("Monitor history data"))

-     parser.add_option("--active", action='store_true', help=_("Only show entries that are currently active"))

+     parser.add_option("--active", action='store_true',

+                       help=_("Only show entries that are currently active"))

      parser.add_option("--revoked", action='store_false', dest='active',

-                             help=_("Only show entries that are currently revoked"))

+                       help=_("Only show entries that are currently revoked"))

      parser.add_option("--context", action="store_true", help=_("Show related entries"))

      parser.add_option("-s", "--show", action="append", help=_("Show data from selected tables"))

      parser.add_option("-v", "--verbose", action="store_true", help=_("Show more detail"))

      parser.add_option("-e", "--events", action="store_true", help=_("Show event ids"))

-     parser.add_option("--all", action="store_true", help=_("Allows listing the entire global history"))

+     parser.add_option("--all", action="store_true",

+                       help=_("Allows listing the entire global history"))

      (options, args) = parser.parse_args(args)

      if len(args) != 0:

          parser.error(_("This command takes no arguments"))
@@ -4366,7 +4450,7 @@ 

              dt = dateutil.parser.parse(val)

              ts = time.mktime(dt.timetuple())

              setattr(options, opt, ts)

-         except:

+         except Exception:

              parser.error(_("Invalid time specification: %s") % val)

      for opt in ('package', 'tag', 'build', 'editor', 'user', 'permission',

                  'cg', 'external_repo', 'build_target', 'group', 'before',
@@ -4415,11 +4499,11 @@ 

                  if x['revoke_event'] is not None:

                      if distinguish_match(x, 'revoked'):

                          timeline.append((x['revoke_event'], table, 0, x.copy()))

-                     #pprint.pprint(timeline[-1])

+                     # pprint.pprint(timeline[-1])

                  if distinguish_match(x, 'created'):

                      timeline.append((x['create_event'], table, 1, x))

          timeline.sort(key=lambda entry: entry[:3])

-         #group edits together

+         # group edits together

          new_timeline = []

          last_event = None

          edit_index = {}
@@ -4548,7 +4632,8 @@ 

          if len(params) > 2:

              _handleOpts(lines, params[2])

      elif method in ('createLiveCD', 'createAppliance', 'createLiveMedia'):

-         argnames = ['Name', 'Version', 'Release', 'Arch', 'Target Info', 'Build Tag', 'Repo', 'Kickstart File']

+         argnames = ['Name', 'Version', 'Release', 'Arch', 'Target Info', 'Build Tag', 'Repo',

+                     'Kickstart File']

          for n, v in zip(argnames, params):

              lines.append("%s: %s" % (n, v))

          if len(params) > 8:
@@ -4572,7 +4657,8 @@ 

              lines.append("Old Repo ID: %i" % oldrepo['id'])

              lines.append("Old Repo Creation: %s" % koji.formatTimeLong(oldrepo['creation_time']))

          if len(params) > 3:

-             lines.append("External Repos: %s" % ', '.join([ext['external_repo_name'] for ext in params[3]]))

+             lines.append("External Repos: %s" %

+                          ', '.join([ext['external_repo_name'] for ext in params[3]]))

      elif method == 'tagNotification':

          destTag = session.getTag(params[2])

          srcTag = None
@@ -4596,7 +4682,8 @@ 

          lines.append("Subtasks:")

          for subtask in params[1]:

              lines.append("  Method: %s" % subtask[0])

-             lines.append("  Parameters: %s" % ", ".join([str(subparam) for subparam in subtask[1]]))

+             lines.append("  Parameters: %s" %

+                          ", ".join([str(subparam) for subparam in subtask[1]]))

              if len(subtask) > 2 and subtask[2]:

                  subopts = subtask[2]

                  _handleOpts(lines, subopts, prefix='  ')
@@ -4625,7 +4712,7 @@ 

         and its children."""

  

      BUILDDIR = '/var/lib/mock'

-     indent = " "*2*level

+     indent = " " * 2 * level

  

      info = session.getTaskInfo(task_id)

  
@@ -4645,12 +4732,12 @@ 

      for filename in files:

          if filename.endswith('.log'):

              logs += [os.path.join(koji.pathinfo.work(volume=volume),

-                      koji.pathinfo.taskrelpath(task_id),

-                      filename) for volume in files[filename]]

+                                   koji.pathinfo.taskrelpath(task_id),

+                                   filename) for volume in files[filename]]

          else:

              output += [os.path.join(koji.pathinfo.work(volume=volume),

-                        koji.pathinfo.taskrelpath(task_id),

-                        filename) for volume in files[filename]]

+                                     koji.pathinfo.taskrelpath(task_id),

+                                     filename) for volume in files[filename]]

  

      owner = session.getUser(info['owner'])['name']

  
@@ -4674,7 +4761,8 @@ 

      if buildroot_infos:

          print("%sBuildroots:" % indent)

          for root in buildroot_infos:

-             print("%s  %s/%s-%d-%d/" % (indent, BUILDDIR, root['tag_name'], root['id'], root['repo_id']))

+             print("%s  %s/%s-%d-%d/" %

+                   (indent, BUILDDIR, root['tag_name'], root['id'], root['repo_id']))

      if logs:

          print("%sLog Files:" % indent)

          for log_path in logs:
@@ -4699,7 +4787,8 @@ 

      """[info] Show information about a task"""

      usage = _("usage: %prog taskinfo [options] <task_id> [<task_id> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("-r", "--recurse", action="store_true", help=_("Show children of this task as well"))

+     parser.add_option("-r", "--recurse", action="store_true",

+                       help=_("Show children of this task as well"))

      parser.add_option("-v", "--verbose", action="store_true", help=_("Be verbose"))

      (options, args) = parser.parse_args(args)

      if len(args) < 1:
@@ -4717,7 +4806,8 @@ 

      usage = _("usage: %prog taginfo [options] <tag> [<tag> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--event", type='int', metavar="EVENT#", help=_("query at event"))

-     parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("query at last event before timestamp"))

+     parser.add_option("--ts", type='int', metavar="TIMESTAMP",

+                       help=_("query at last event before timestamp"))

      parser.add_option("--repo", type='int', metavar="REPO#", help=_("query at event for a repo"))

      (options, args) = parser.parse_args(args)

      if len(args) < 1:
@@ -4747,10 +4837,9 @@ 

      for n, info in enumerate(tags):

          if n > 0:

              print('')

-         print("Tag: %(name)s [%(id)d]" %info)

-         print("Arches: %(arches)s" %info)

-         group_list = [x['name'] for x in session.getTagGroups(info['id'], **event_opts)]

-         group_list.sort()

+         print("Tag: %(name)s [%(id)d]" % info)

+         print("Arches: %(arches)s" % info)

+         group_list = sorted([x['name'] for x in session.getTagGroups(info['id'], **event_opts)])

          print("Groups: " + ', '.join(group_list))

          if info.get('locked'):

              print('LOCKED')
@@ -4759,7 +4848,8 @@ 

              print("Required permission: %r" % perms.get(perm_id, perm_id))

          if session.mavenEnabled():

              print("Maven support?: %s" % (info['maven_support'] and 'yes' or 'no'))

-             print("Include all Maven archives?: %s" % (info['maven_include_all'] and 'yes' or 'no'))

+             print("Include all Maven archives?: %s" %

+                   (info['maven_include_all'] and 'yes' or 'no'))

          if 'extra' in info:

              print("Tag options:")

              for key in sorted(info['extra'].keys()):
@@ -4781,7 +4871,8 @@ 

                  if event:

                      print("  %s (%s)" % (target['name'], target['build_tag_name']))

                  else:

-                     print("  %s (%s, %s)" % (target['name'], target['build_tag_name'], repos[target['build_tag']]))

+                     print("  %s (%s, %s)" %

+                           (target['name'], target['build_tag_name'], repos[target['build_tag']]))

          if build_targets:

              print("This tag is a buildroot for one or more targets")

              if not event:
@@ -4810,8 +4901,10 @@ 

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--parent", help=_("Specify parent"))

      parser.add_option("--arches", help=_("Specify arches"))

-     parser.add_option("--maven-support", action="store_true", help=_("Enable creation of Maven repos for this tag"))

-     parser.add_option("--include-all", action="store_true", help=_("Include all packages in this tag when generating Maven repos"))

+     parser.add_option("--maven-support", action="store_true",

+                       help=_("Enable creation of Maven repos for this tag"))

+     parser.add_option("--include-all", action="store_true",

+                       help=_("Include all packages in this tag when generating Maven repos"))

      parser.add_option("-x", "--extra", action="append", default=[], metavar="key=value",

                        help=_("Set tag extra option"))

      (options, args) = parser.parse_args(args)
@@ -4836,7 +4929,7 @@ 

              value = arg_filter(value)

              extra[key] = value

          opts['extra'] = extra

-     session.createTag(args[0],**opts)

+     session.createTag(args[0], **opts)

  

  

  def handle_edit_tag(goptions, session, args):
@@ -4849,10 +4942,15 @@ 

      parser.add_option("--lock", action="store_true", help=_("Lock the tag"))

      parser.add_option("--unlock", action="store_true", help=_("Unlock the tag"))

      parser.add_option("--rename", help=_("Rename the tag"))

-     parser.add_option("--maven-support", action="store_true", help=_("Enable creation of Maven repos for this tag"))

-     parser.add_option("--no-maven-support", action="store_true", help=_("Disable creation of Maven repos for this tag"))

-     parser.add_option("--include-all", action="store_true", help=_("Include all packages in this tag when generating Maven repos"))

-     parser.add_option("--no-include-all", action="store_true", help=_("Do not include all packages in this tag when generating Maven repos"))

+     parser.add_option("--maven-support", action="store_true",

+                       help=_("Enable creation of Maven repos for this tag"))

+     parser.add_option("--no-maven-support", action="store_true",

+                       help=_("Disable creation of Maven repos for this tag"))

+     parser.add_option("--include-all", action="store_true",

+                       help=_("Include all packages in this tag when generating Maven repos"))

+     parser.add_option("--no-include-all", action="store_true",

+                       help=_("Do not include all packages in this tag when generating Maven "

+                              "repos"))

      parser.add_option("-x", "--extra", action="append", default=[], metavar="key=value",

                        help=_("Set tag extra option"))

      parser.add_option("-r", "--remove-extra", action="append", default=[], metavar="key",
@@ -4892,7 +4990,7 @@ 

          opts['extra'] = extra

      if options.remove_extra:

          opts['remove_extra'] = options.remove_extra

-     #XXX change callname

+     # XXX change callname

      session.editTag2(tag, **opts)

  

  
@@ -4927,7 +5025,7 @@ 

          selected = [session.getTag(name, strict=True) for name in args]

      for tag in selected:

          if options.master:

-             #set the master lock

+             # set the master lock

              if tag['locked']:

                  print(_("Tag %s: master lock already set") % tag['name'])

                  continue
@@ -4940,7 +5038,8 @@ 

                  print(_("Tag %s: %s permission already required") % (tag['name'], perm))

                  continue

              elif options.test:

-                 print(_("Would have set permission requirement %s for tag %s") % (perm, tag['name']))

+                 print(_("Would have set permission requirement %s for tag %s") %

+                       (perm, tag['name']))

                  continue

              session.editTag2(tag['id'], perm=perm_id)

  
@@ -5001,7 +5100,8 @@ 

      (options, args) = parser.parse_args(args)

  

      if len(args) != 2:

-         parser.error(_("This command takes exctly two argument: a tag name or ID and that tag's new parent name or ID"))

+         parser.error(_("This command takes exctly two argument: a tag name or ID and that tag's "

+                        "new parent name or ID"))

  

      activate_session(session, goptions)

  
@@ -5019,12 +5119,14 @@ 

      samePriority = [datum for datum in inheritanceData if datum['priority'] == priority]

  

      if sameParents and not options.force:

-         print(_("Error: You are attempting to add %s as %s's parent even though it already is %s's parent.")

-                     % (parent['name'], tag['name'], tag['name']))

+         print(_("Error: You are attempting to add %s as %s's parent even though it already is "

+                 "%s's parent.")

+               % (parent['name'], tag['name'], tag['name']))

          print(_("Please use --force if this is what you really want to do."))

          return

      if samePriority:

-         print(_("Error: There is already an active inheritance with that priority on %s, please specify a different priority with --priority." % tag['name']))

+         print(_("Error: There is already an active inheritance with that priority on %s, "

+                 "please specify a different priority with --priority." % tag['name']))

          return

  

      new_data = {}
@@ -5057,7 +5159,8 @@ 

          parser.error(_("This command takes at least one argument: a tag name or ID"))

  

      if len(args) > 3:

-         parser.error(_("This command takes at most three argument: a tag name or ID, a parent tag name or ID, and a priority"))

+         parser.error(_("This command takes at most three argument: a tag name or ID, "

+                        "a parent tag name or ID, and a priority"))

  

      activate_session(session, goptions)

  
@@ -5100,11 +5203,12 @@ 

      inheritanceData = session.getInheritanceData(tag['id'])

      samePriority = [datum for datum in inheritanceData if datum['priority'] == options.priority]

      if samePriority:

-         print(_("Error: There is already an active inheritance with that priority on %s, please specify a different priority with --priority.") % tag['name'])

+         print(_("Error: There is already an active inheritance with that priority on %s, "

+                 "please specify a different priority with --priority.") % tag['name'])

          return 1

  

      new_data = data.copy()

-     if options.priority is not None  and options.priority.isdigit():

+     if options.priority is not None and options.priority.isdigit():

          new_data['priority'] = int(options.priority)

      if options.maxdepth is not None:

          if options.maxdepth.isdigit():
@@ -5137,7 +5241,8 @@ 

          parser.error(_("This command takes at least one argument: a tag name or ID"))

  

      if len(args) > 3:

-         parser.error(_("This command takes at most three argument: a tag name or ID, a parent tag name or ID, and a priority"))

+         parser.error(_("This command takes at most three argument: a tag name or ID, a parent tag "

+                        "name or ID, and a priority"))

  

      activate_session(session, goptions)

  
@@ -5196,7 +5301,8 @@ 

      parser.add_option("-x", "--expand", action="store_true", default=False,

                        help=_("Expand groups in comps format"))

      parser.add_option("--spec", action="store_true", help=_("Print build spec"))

-     parser.add_option("--show-blocked", action="store_true", dest="incl_blocked", help=_("Show blocked packages"))

+     parser.add_option("--show-blocked", action="store_true", dest="incl_blocked",

+                       help=_("Show blocked packages"))

      (options, args) = parser.parse_args(args)

      if len(args) != 1:

          parser.error(_("Incorrect number of arguments"))
@@ -5211,7 +5317,7 @@ 

      if options.comps:

          print(koji.generate_comps(groups, expand_groups=options.expand))

      elif options.spec:

-         print(koji.make_groups_spec(groups,name='buildgroups',buildgroup='build'))

+         print(koji.make_groups_spec(groups, name='buildgroups', buildgroup='build'))

      else:

          pprint.pprint(groups)

  
@@ -5220,16 +5326,18 @@ 

      "[info] List external repos"

      usage = _("usage: %prog list-external-repos [options]")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--url",  help=_("Select by url"))

-     parser.add_option("--name",  help=_("Select by name"))

+     parser.add_option("--url", help=_("Select by url"))

+     parser.add_option("--name", help=_("Select by name"))

      parser.add_option("--id", type="int", help=_("Select by id"))

      parser.add_option("--tag", help=_("Select by tag"))

      parser.add_option("--used", action='store_true', help=_("List which tags use the repo(s)"))

-     parser.add_option("--inherit", action='store_true', help=_("Follow tag inheritance when selecting by tag"))

+     parser.add_option("--inherit", action='store_true',

+                       help=_("Follow tag inheritance when selecting by tag"))

      parser.add_option("--event", type='int', metavar="EVENT#", help=_("Query at event"))

-     parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("Query at last event before timestamp"))

+     parser.add_option("--ts", type='int', metavar="TIMESTAMP",

+                       help=_("Query at last event before timestamp"))

      parser.add_option("--repo", type='int', metavar="REPO#",

-                             help=_("Query at event corresponding to (nonexternal) repo"))

+                       help=_("Query at event corresponding to (nonexternal) repo"))

      parser.add_option("--quiet", action="store_true", default=goptions.quiet,

                        help=_("Do not display the column headers"))

      (options, args) = parser.parse_args(args)
@@ -5263,7 +5371,7 @@ 

          format = "basic"

          opts['info'] = options.id or options.name or None

          opts['url'] = options.url or None

-         data = session.listExternalRepos (**opts)

+         data = session.listExternalRepos(**opts)

  

      # There are three different output formats

      #  1) Listing just repo data (name, url)
@@ -5272,15 +5380,15 @@ 

      if format == "basic":

          format = "%(name)-25s %(url)s"

          header1 = "%-25s %s" % ("External repo name", "URL")

-         header2 = "%s %s" % ("-"*25, "-"*40)

+         header2 = "%s %s" % ("-" * 25, "-" * 40)

      elif format == "tag":

          format = "%(priority)-3i %(external_repo_name)-25s %(merge_mode)-10s %(url)s"

          header1 = "%-3s %-25s %-10s URL" % ("Pri", "External repo name", "Mode")

-         header2 = "%s %s %s %s" % ("-"*3, "-"*25, "-"*10, "-"*40)

+         header2 = "%s %s %s %s" % ("-" * 3, "-" * 25, "-" * 10, "-" * 40)

      elif format == "multitag":

          format = "%(tag_name)-20s %(priority)-3i %(merge_mode)-10s %(external_repo_name)s"

          header1 = "%-20s %-3s %-10s %s" % ("Tag", "Pri", "Mode", "External repo name")

-         header2 = "%s %s %s %s" % ("-"*20, "-"*3, "-"*10, "-"*25)

+         header2 = "%s %s %s %s" % ("-" * 20, "-" * 3, "-" * 10, "-" * 25)

      if not options.quiet:

          print(header1)

          print(header2)
@@ -5293,12 +5401,12 @@ 

  def _pick_external_repo_priority(session, tag):

      """pick priority after current ones, leaving space for later insertions"""

      repolist = session.getTagExternalRepos(tag_info=tag)

-     #ordered by priority

+     # ordered by priority

      if not repolist:

          priority = 5

      else:

          priority = (repolist[-1]['priority'] + 7) // 5 * 5

-         #at least 3 higher than current max and a multiple of 5

+         # at least 3 higher than current max and a multiple of 5

      return priority

  

  
@@ -5356,16 +5464,16 @@ 

              if options.mode:

                  callopts['merge_mode'] = options.mode

              session.addExternalRepoToTag(tag, rinfo['name'], priority, **callopts)

-             print("Added external repo %s to tag %s (priority %i)" \

-                     % (rinfo['name'], tag, priority))

+             print("Added external repo %s to tag %s (priority %i)"

+                   % (rinfo['name'], tag, priority))

  

  

  def handle_edit_external_repo(goptions, session, args):

      "[admin] Edit data for an external repo"

      usage = _("usage: %prog edit-external-repo <name>")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--url",  help=_("Change the url"))

-     parser.add_option("--name",  help=_("Change the name"))

+     parser.add_option("--url", help=_("Change the url"))

+     parser.add_option("--name", help=_("Change the name"))

      (options, args) = parser.parse_args(args)

      if len(args) != 1:

          parser.error(_("Incorrect number of arguments"))
@@ -5404,15 +5512,16 @@ 

              return 0

          tags = current_tags

      if delete:

-         #removing entirely

+         # removing entirely

          if current_tags and not options.force:

-             print(_("Error: external repo %s used by tag(s): %s") % (repo, ', '.join(current_tags)))

+             print(_("Error: external repo %s used by tag(s): %s") %

+                   (repo, ', '.join(current_tags)))

              print(_("Use --force to remove anyway"))

              return 1

          session.deleteExternalRepo(args[0])

      else:

          for tag in tags:

-             if not tag in current_tags:

+             if tag not in current_tags:

                  print(_("External repo %s not associated with tag %s") % (repo, tag))

                  continue

              session.removeExternalRepoFromTag(tag, repo)
@@ -5424,31 +5533,31 @@ 

      """[build] Create a live CD image given a kickstart file"""

  

      # Usage & option parsing.

-     usage = _("usage: %prog spin-livecd [options] <name> <version> <target>" +

-               " <arch> <kickstart-file>")

+     usage = _("usage: %prog spin-livecd [options] <name> <version> <target> <arch> "

+               "<kickstart-file>")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--wait", action="store_true",

-         help=_("Wait on the livecd creation, even if running in the background"))

+                       help=_("Wait on the livecd creation, even if running in the background"))

      parser.add_option("--nowait", action="store_false", dest="wait",

-         help=_("Don't wait on livecd creation"))

+                       help=_("Don't wait on livecd creation"))

      parser.add_option("--noprogress", action="store_true",

-         help=_("Do not display progress of the upload"))

+                       help=_("Do not display progress of the upload"))

      parser.add_option("--background", action="store_true",

-         help=_("Run the livecd creation task at a lower priority"))

+                       help=_("Run the livecd creation task at a lower priority"))

      parser.add_option("--ksurl", metavar="SCMURL",

-         help=_("The URL to the SCM containing the kickstart file"))

+                       help=_("The URL to the SCM containing the kickstart file"))

      parser.add_option("--ksversion", metavar="VERSION",

-         help=_("The syntax version used in the kickstart file"))

+                       help=_("The syntax version used in the kickstart file"))

      parser.add_option("--scratch", action="store_true",

-         help=_("Create a scratch LiveCD image"))

+                       help=_("Create a scratch LiveCD image"))

      parser.add_option("--repo", action="append",

-         help=_("Specify a repo that will override the repo used to install " +

-                "RPMs in the LiveCD. May be used multiple times. The " +

-                "build tag repo associated with the target is the default."))

+                       help=_("Specify a repo that will override the repo used to install "

+                              "RPMs in the LiveCD. May be used multiple times. The "

+                              "build tag repo associated with the target is the default."))

      parser.add_option("--release", help=_("Forcibly set the release field"))

      parser.add_option("--volid", help=_("Set the volume id"))

      parser.add_option("--specfile", metavar="URL",

-         help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))

+                       help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))

      parser.add_option("--skip-tag", action="store_true",

                        help=_("Do not attempt to tag package"))

      (task_options, args) = parser.parse_args(args)
@@ -5456,8 +5565,8 @@ 

      # Make sure the target and kickstart is specified.

      print('spin-livecd is deprecated and will be replaced with spin-livemedia')

      if len(args) != 5:

-         parser.error(_("Five arguments are required: a name, a version, an" +

-                        " architecture, a build target, and a relative path to" +

+         parser.error(_("Five arguments are required: a name, a version, an"

+                        " architecture, a build target, and a relative path to"

                         " a kickstart file."))

      if task_options.volid is not None and len(task_options.volid) > 32:

          parser.error(_('Volume ID has a maximum length of 32 characters'))
@@ -5469,38 +5578,39 @@ 

      """[build] Create a livemedia image given a kickstart file"""

  

      # Usage & option parsing.

-     usage = _("usage: %prog spin-livemedia [options] <name> <version> <target>" +

-               " <arch> <kickstart-file>")

+     usage = _("usage: %prog spin-livemedia [options] <name> <version> <target> <arch> "

+               "<kickstart-file>")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--wait", action="store_true",

-         help=_("Wait on the livemedia creation, even if running in the background"))

+                       help=_("Wait on the livemedia creation, even if running in the background"))

      parser.add_option("--nowait", action="store_false", dest="wait",

-         help=_("Don't wait on livemedia creation"))

+                       help=_("Don't wait on livemedia creation"))

      parser.add_option("--noprogress", action="store_true",

-         help=_("Do not display progress of the upload"))

+                       help=_("Do not display progress of the upload"))

      parser.add_option("--background", action="store_true",

-         help=_("Run the livemedia creation task at a lower priority"))

+                       help=_("Run the livemedia creation task at a lower priority"))

      parser.add_option("--ksurl", metavar="SCMURL",

-         help=_("The URL to the SCM containing the kickstart file"))

+                       help=_("The URL to the SCM containing the kickstart file"))

      parser.add_option("--install-tree-url", metavar="URL",

-         help=_("Provide the URL for the install tree"))

+                       help=_("Provide the URL for the install tree"))

      parser.add_option("--ksversion", metavar="VERSION",

-         help=_("The syntax version used in the kickstart file"))

+                       help=_("The syntax version used in the kickstart file"))

      parser.add_option("--scratch", action="store_true",

-         help=_("Create a scratch LiveMedia image"))

+                       help=_("Create a scratch LiveMedia image"))

      parser.add_option("--repo", action="append",

-         help=_("Specify a repo that will override the repo used to install " +

-                "RPMs in the LiveMedia. May be used multiple times. The " +

-                "build tag repo associated with the target is the default."))

+                       help=_("Specify a repo that will override the repo used to install "

+                              "RPMs in the LiveMedia. May be used multiple times. The "

+                              "build tag repo associated with the target is the default."))

      parser.add_option("--release", help=_("Forcibly set the release field"))

      parser.add_option("--volid", help=_("Set the volume id"))

      parser.add_option("--specfile", metavar="URL",

-         help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))

+                       help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))

      parser.add_option("--skip-tag", action="store_true",

                        help=_("Do not attempt to tag package"))

      parser.add_option("--can-fail", action="store", dest="optional_arches",

-         metavar="ARCH1,ARCH2,...", default="",

-         help=_("List of archs which are not blocking for build (separated by commas."))

+                       metavar="ARCH1,ARCH2,...", default="",

+                       help=_("List of archs which are not blocking for build "

+                              "(separated by commas."))

      parser.add_option('--lorax_dir', metavar='DIR',

                        help=_('The relative path to the lorax templates '

                               'directory within the checkout of "lorax_url".'))
@@ -5512,9 +5622,9 @@ 

  

      # Make sure the target and kickstart is specified.

      if len(args) != 5:

-         parser.error(_("Five arguments are required: a name, a version, a" +

-                        " build target, an architecture, and a relative path to" +

-                        " a kickstart file."))

+         parser.error(_("Five arguments are required: a name, a version, a "

+                        "build target, an architecture, and a relative path to "

+                        "a kickstart file."))

      if task_options.lorax_url is not None and task_options.lorax_dir is None:

          parser.error(_('The "--lorax_url" option requires that "--lorax_dir" '

                         'also be used.'))
@@ -5529,97 +5639,99 @@ 

      """[build] Create an appliance given a kickstart file"""

  

      # Usage & option parsing

-     usage = _("usage: %prog spin-appliance [options] <name> <version> " +

-               "<target> <arch> <kickstart-file>")

+     usage = _("usage: %prog spin-appliance [options] <name> <version> <target> <arch> "

+               "<kickstart-file>")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--wait", action="store_true",

-         help=_("Wait on the appliance creation, even if running in the background"))

+                       help=_("Wait on the appliance creation, even if running in the background"))

      parser.add_option("--nowait", action="store_false", dest="wait",

-         help=_("Don't wait on appliance creation"))

+                       help=_("Don't wait on appliance creation"))

      parser.add_option("--noprogress", action="store_true",

-         help=_("Do not display progress of the upload"))

+                       help=_("Do not display progress of the upload"))

      parser.add_option("--background", action="store_true",

-         help=_("Run the appliance creation task at a lower priority"))

+                       help=_("Run the appliance creation task at a lower priority"))

      parser.add_option("--ksurl", metavar="SCMURL",

-         help=_("The URL to the SCM containing the kickstart file"))

+                       help=_("The URL to the SCM containing the kickstart file"))

      parser.add_option("--ksversion", metavar="VERSION",

-         help=_("The syntax version used in the kickstart file"))

+                       help=_("The syntax version used in the kickstart file"))

      parser.add_option("--scratch", action="store_true",

-         help=_("Create a scratch appliance"))

+                       help=_("Create a scratch appliance"))

      parser.add_option("--repo", action="append",

-         help=_("Specify a repo that will override the repo used to install " +

-                "RPMs in the appliance. May be used multiple times. The " +

-                "build tag repo associated with the target is the default."))

+                       help=_("Specify a repo that will override the repo used to install "

+                              "RPMs in the appliance. May be used multiple times. The "

+                              "build tag repo associated with the target is the default."))

      parser.add_option("--release", help=_("Forcibly set the release field"))

      parser.add_option("--specfile", metavar="URL",

-         help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))

+                       help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))

      parser.add_option("--skip-tag", action="store_true",

                        help=_("Do not attempt to tag package"))

      parser.add_option("--vmem", metavar="VMEM", default=None,

-         help=_("Set the amount of virtual memory in the appliance in MB, " +

-                "default is 512"))

+                       help=_("Set the amount of virtual memory in the appliance in MB, "

+                              "default is 512"))

      parser.add_option("--vcpu", metavar="VCPU", default=None,

-         help=_("Set the number of virtual cpus in the appliance, " +

-                "default is 1"))

+                       help=_("Set the number of virtual cpus in the appliance, "

+                              "default is 1"))

      parser.add_option("--format", metavar="DISK_FORMAT", default='raw',

-         help=_("Disk format, default is raw. Other options are qcow, " +

-                "qcow2, and vmx."))

+                       help=_("Disk format, default is raw. Other options are qcow, "

+                              "qcow2, and vmx."))

  

      (task_options, args) = parser.parse_args(args)

  

      # Make sure the target and kickstart is specified.

      print('spin-appliance is deprecated and will be replaced with image-build')

      if len(args) != 5:

-         parser.error(_("Five arguments are required: a name, a version, " +

-                        "an architecture, a build target, and a relative path" +

-                        " to a kickstart file."))

+         parser.error(_("Five arguments are required: a name, a version, "

+                        "an architecture, a build target, and a relative path "

+                        "to a kickstart file."))

      return _build_image(options, task_options, session, args, 'appliance')

  

  

  def handle_image_build_indirection(options, session, args):

      """[build] Create a disk image using other disk images via the Indirection plugin"""

-     usage = _("usage: %prog image-build-indirection [base_image] " +

+     usage = _("usage: %prog image-build-indirection [base_image] "

                "[utility_image] [indirection_build_template]")

      usage += _("\n       %prog image-build --config <FILE>\n")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--config",

-         help=_("Use a configuration file to define image-build options " +

-                "instead of command line options (they will be ignored)."))

+                       help=_("Use a configuration file to define image-build options "

+                              "instead of command line options (they will be ignored)."))

      parser.add_option("--background", action="store_true",

-         help=_("Run the image creation task at a lower priority"))

+                       help=_("Run the image creation task at a lower priority"))

      parser.add_option("--name",

-          help=_("Name of the output image"))

+                       help=_("Name of the output image"))

      parser.add_option("--version",

-          help=_("Version of the output image"))

+                       help=_("Version of the output image"))

      parser.add_option("--release",

-          help=_("Release of the output image"))

+                       help=_("Release of the output image"))

      parser.add_option("--arch",

-          help=_("Architecture of the output image and input images"))

+                       help=_("Architecture of the output image and input images"))

      parser.add_option("--target",

-          help=_("Build target to use for the indirection build"))

+                       help=_("Build target to use for the indirection build"))

      parser.add_option("--skip-tag", action="store_true",

-          help=_("Do not tag the resulting build"))

+                       help=_("Do not tag the resulting build"))

      parser.add_option("--base-image-task",

-          help=_("ID of the createImage task of the base image to be used"))

+                       help=_("ID of the createImage task of the base image to be used"))

      parser.add_option("--base-image-build",

-          help=_("NVR or build ID of the base image to be used"))

+                       help=_("NVR or build ID of the base image to be used"))

      parser.add_option("--utility-image-task",

-          help=_("ID of the createImage task of the utility image to be used"))

+                       help=_("ID of the createImage task of the utility image to be used"))

      parser.add_option("--utility-image-build",

-          help=_("NVR or build ID of the utility image to be used"))

+                       help=_("NVR or build ID of the utility image to be used"))

      parser.add_option("--indirection-template",

-         help=_("Name of the local file, or SCM file containing the template used to drive the indirection plugin"))

+                       help=_("Name of the local file, or SCM file containing the template used to "

+                              "drive the indirection plugin"))

      parser.add_option("--indirection-template-url",

-         help=_("SCM URL containing the template used to drive the indirection plugin"))

+                       help=_("SCM URL containing the template used to drive the indirection "

+                              "plugin"))

      parser.add_option("--results-loc",

-         help=_("Relative path inside the working space image where the results should be extracted from"))

+                       help=_("Relative path inside the working space image where the results "

+                              "should be extracted from"))

      parser.add_option("--scratch", action="store_true",

-         help=_("Create a scratch image"))

+                       help=_("Create a scratch image"))

      parser.add_option("--wait", action="store_true",

-         help=_("Wait on the image creation, even if running in the background"))

+                       help=_("Wait on the image creation, even if running in the background"))

      parser.add_option("--noprogress", action="store_true",

-         help=_("Do not display progress of the upload"))

- 

+                       help=_("Do not display progress of the upload"))

  

      (task_options, args) = parser.parse_args(args)

      _build_image_indirection(options, task_options, session, args)
@@ -5639,17 +5751,19 @@ 

              bool(task_opts.base_image_build)):

          raise koji.GenericError(_("You must specify either a base-image task or build ID/NVR"))

  

-     required_opts = [ 'name', 'version', 'arch', 'target', 'indirection_template', 'results_loc' ]

-     optional_opts = [ 'indirection_template_url', 'scratch', 'utility_image_task', 'utility_image_build',

-                       'base_image_task', 'base_image_build', 'release', 'skip_tag' ]

+     required_opts = ['name', 'version', 'arch', 'target', 'indirection_template', 'results_loc']

+     optional_opts = ['indirection_template_url', 'scratch', 'utility_image_task',

+                      'utility_image_build', 'base_image_task', 'base_image_build', 'release',

+                      'skip_tag']

  

-     missing = [ ]

+     missing = []

      for opt in required_opts:

          if not getattr(task_opts, opt, None):

              missing.append(opt)

  

      if len(missing) > 0:

-         print("Missing the following required options: %s" % ' '.join(['--%s' % o.replace('_','-') for o in missing]))

+         print("Missing the following required options: %s" %

+               ' '.join(['--%s' % o.replace('_', '-') for o in missing]))

          raise koji.GenericError(_("Missing required options specified above"))

  

      activate_session(session, options)
@@ -5674,26 +5788,26 @@ 

      dest_tag = session.getTag(tmp_target['dest_tag'])

      if not dest_tag:

          raise koji.GenericError(_("Unknown destination tag: %s" %

-                                    tmp_target['dest_tag_name']))

+                                   tmp_target['dest_tag_name']))

  

      # Set the architecture

      task_opts.arch = koji.canonArch(task_opts.arch)

  

- 

      # Upload the indirection template file to the staging area.

      # If it's a URL, it's kojid's job to go get it when it does the checkout.

      if not task_opts.indirection_template_url:

          if not task_opts.scratch:

              # only scratch builds can omit indirection_template_url

-             raise koji.GenericError(_("Non-scratch builds must provide a URL for the indirection template"))

+             raise koji.GenericError(

+                 _("Non-scratch builds must provide a URL for the indirection template"))

          templatefile = task_opts.indirection_template

          serverdir = unique_path('cli-image-indirection')

          session.uploadWrapper(templatefile, serverdir, callback=callback)

          task_opts.indirection_template = os.path.join('work', serverdir,

-             os.path.basename(templatefile))

+                                                       os.path.basename(templatefile))

          print('')

  

-     hub_opts = { }

+     hub_opts = {}

      # Just pass everything in as opts.  No posiitonal arguments at all.  Why not?

      for opt in required_opts + optional_opts:

          val = getattr(task_opts, opt, None)
@@ -5708,10 +5822,10 @@ 

      if not options.quiet:

          print("Created task: %d" % task_id)

          print("Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id))

-     #if task_opts.wait or (task_opts.wait is None and not _running_in_bg()):

+     # if task_opts.wait or (task_opts.wait is None and not _running_in_bg()):

      #    session.logout()

      #    return watch_tasks(session, [task_id], quiet=options.quiet)

-     #else:

+     # else:

      #    return

  

  
@@ -5721,62 +5835,63 @@ 

                 'vsphere-ova', 'vagrant-virtualbox', 'vagrant-libvirt',

                 'vagrant-vmware-fusion', 'vagrant-hyperv', 'docker', 'raw-xz',

                 'liveimg-squashfs', 'tar-gz')

-     usage = _("usage: %prog image-build [options] <name> <version> " +

+     usage = _("usage: %prog image-build [options] <name> <version> "

                "<target> <install-tree-url> <arch> [<arch> ...]")

      usage += _("\n       %prog image-build --config <FILE>\n")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--background", action="store_true",

-         help=_("Run the image creation task at a lower priority"))

+                       help=_("Run the image creation task at a lower priority"))

      parser.add_option("--config",

-         help=_("Use a configuration file to define image-build options " +

-                "instead of command line options (they will be ignored)."))

+                       help=_("Use a configuration file to define image-build options "

+                              "instead of command line options (they will be ignored)."))

      parser.add_option("--disk-size", default=10,

-         help=_("Set the disk device size in gigabytes"))

+                       help=_("Set the disk device size in gigabytes"))

      parser.add_option("--distro",

-         help=_("specify the RPM based distribution the image will be based " +

-              "on with the format RHEL-X.Y, CentOS-X.Y, SL-X.Y, or Fedora-NN. " +

-              "The packages for the Distro you choose must have been built " +

-              "in this system."))

+                       help=_("specify the RPM based distribution the image will be based "

+                              "on with the format RHEL-X.Y, CentOS-X.Y, SL-X.Y, or Fedora-NN. "

+                              "The packages for the Distro you choose must have been built "

+                              "in this system."))

      parser.add_option("--format", default=[], action="append",

-         help=_("Convert results to one or more formats " +

-                "(%s), this option may be used " % ', '.join(formats) +

-                "multiple times. By default, specifying this option will " +

-                "omit the raw disk image (which is 10G in size) from the " +

-                "build results. If you really want it included with converted " +

-                "images, pass in 'raw' as an option."))

+                       help=_("Convert results to one or more formats "

+                              "(%s), this option may be used "

+                              "multiple times. By default, specifying this option will "

+                              "omit the raw disk image (which is 10G in size) from the "

+                              "build results. If you really want it included with converted "

+                              "images, pass in 'raw' as an option.") % ', '.join(formats))

      parser.add_option("--kickstart", help=_("Path to a local kickstart file"))

      parser.add_option("--ksurl", metavar="SCMURL",

-         help=_("The URL to the SCM containing the kickstart file"))

+                       help=_("The URL to the SCM containing the kickstart file"))

      parser.add_option("--ksversion", metavar="VERSION",

-         help=_("The syntax version used in the kickstart file"))

+                       help=_("The syntax version used in the kickstart file"))

      parser.add_option("--noprogress", action="store_true",

-         help=_("Do not display progress of the upload"))

+                       help=_("Do not display progress of the upload"))

      parser.add_option("--nowait", action="store_false", dest="wait",

-         help=_("Don't wait on image creation"))

+                       help=_("Don't wait on image creation"))

      parser.add_option("--ova-option", action="append",

-         help=_("Override a value in the OVA description XML. Provide a value " +

-                "in a name=value format, such as 'ovf_memory_mb=6144'"))

+                       help=_("Override a value in the OVA description XML. Provide a value "

+                              "in a name=value format, such as 'ovf_memory_mb=6144'"))

      parser.add_option("--factory-parameter", nargs=2, action="append",

-         help=_("Pass a parameter to Image Factory. The results are highly specific " +

-                "to the image format being created. This is a two argument parameter " +

-                "that can be specified an arbitrary number of times. For example: "

-                "--factory-parameter docker_cmd '[ \"/bin/echo Hello World\" ]'"))

+                       help=_("Pass a parameter to Image Factory. The results are highly specific "

+                              "to the image format being created. This is a two argument parameter "

+                              "that can be specified an arbitrary number of times. For example: "

+                              "--factory-parameter docker_cmd '[ \"/bin/echo Hello World\" ]'"))

      parser.add_option("--release", help=_("Forcibly set the release field"))

      parser.add_option("--repo", action="append",

-         help=_("Specify a repo that will override the repo used to install " +

-                "RPMs in the image. May be used multiple times. The " +

-                "build tag repo associated with the target is the default."))

+                       help=_("Specify a repo that will override the repo used to install "

+                              "RPMs in the image. May be used multiple times. The "

+                              "build tag repo associated with the target is the default."))

      parser.add_option("--scratch", action="store_true",

-         help=_("Create a scratch image"))

+                       help=_("Create a scratch image"))

      parser.add_option("--skip-tag", action="store_true",

                        help=_("Do not attempt to tag package"))

      parser.add_option("--can-fail", action="store", dest="optional_arches",

-         metavar="ARCH1,ARCH2,...", default="",

-         help=_("List of archs which are not blocking for build (separated by commas."))

+                       metavar="ARCH1,ARCH2,...", default="",

+                       help=_("List of archs which are not blocking for build "

+                              "(separated by commas."))

      parser.add_option("--specfile", metavar="URL",

-         help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))

+                       help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))

      parser.add_option("--wait", action="store_true",

-         help=_("Wait on the image creation, even if running in the background"))

+                       help=_("Wait on the image creation, even if running in the background"))

  

      (task_options, args) = parser.parse_args(args)

  
@@ -5814,21 +5929,21 @@ 

          # as do factory-parameters

          section = 'factory-parameters'

          if config.has_section(section):

-             task_options.factory_parameter = [ ]

+             task_options.factory_parameter = []

              for k, v in config.items(section):

                  # We do this, rather than a dict, to match what argparse spits out

-                 task_options.factory_parameter.append( (k, v) )

+                 task_options.factory_parameter.append((k, v))

  

      else:

          if len(args) < 5:

-             parser.error(_("At least five arguments are required: a name, " +

-                            "a version, a build target, a URL to an " +

+             parser.error(_("At least five arguments are required: a name, "

+                            "a version, a build target, a URL to an "

                             "install tree, and 1 or more architectures."))

      if not task_options.ksurl and not task_options.kickstart:

          parser.error(_('You must specify --kickstart'))

      if not task_options.distro:

          parser.error(

-             _("You must specify --distro. Examples: Fedora-16, RHEL-6.4, " +

+             _("You must specify --distro. Examples: Fedora-16, RHEL-6.4, "

                "SL-6.4 or CentOS-6.4"))

      return _build_image_oz(options, task_options, session, args)

  
@@ -5863,7 +5978,7 @@ 

      dest_tag = session.getTag(tmp_target['dest_tag'])

      if not dest_tag:

          raise koji.GenericError(_("Unknown destination tag: %s" %

-                                    tmp_target['dest_tag_name']))

+                                   tmp_target['dest_tag_name']))

  

      # Set the architecture

      if img_type == 'livemedia':
@@ -5888,7 +6003,7 @@ 

          'ksversion', 'release', 'repo', 'scratch', 'skip_tag',

          'specfile', 'vcpu', 'vmem', 'volid', 'optional_arches',

          'lorax_dir', 'lorax_url',

-         ]

+     ]

      for opt in passthru_opts:

          val = getattr(task_opts, opt, None)

          if val is not None:
@@ -5907,7 +6022,7 @@ 

      if task_opts.wait or (task_opts.wait is None and not _running_in_bg()):

          session.logout()

          return watch_tasks(session, [task_id], quiet=options.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

      else:

          return

  
@@ -5939,7 +6054,7 @@ 

      dest_tag = session.getTag(tmp_target['dest_tag'])

      if not dest_tag:

          raise koji.GenericError(_("Unknown destination tag: %s" %

-                                    tmp_target['dest_tag_name']))

+                                   tmp_target['dest_tag_name']))

  

      # Set the architectures

      arches = []
@@ -5956,7 +6071,7 @@ 

          serverdir = unique_path('cli-image')

          session.uploadWrapper(ksfile, serverdir, callback=callback)

          task_opts.kickstart = os.path.join('work', serverdir,

-             os.path.basename(ksfile))

+                                            os.path.basename(ksfile))

          print('')

  

      hub_opts = {}
@@ -5970,7 +6085,7 @@ 

  

      # finally, create the task.

      task_id = session.buildImageOz(args[0], args[1], arches, target, args[3],

-                                  opts=hub_opts, priority=priority)

+                                    opts=hub_opts, priority=priority)

  

      if not options.quiet:

          print("Created task: %d" % task_id)
@@ -5978,7 +6093,7 @@ 

      if task_opts.wait or (task_opts.wait is None and not _running_in_bg()):

          session.logout()

          return watch_tasks(session, [task_id], quiet=options.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

      else:

          return

  
@@ -5989,17 +6104,17 @@ 

      usage = _("usage: %prog win-build [options] <target> <URL> <VM>")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--winspec", metavar="URL",

-                       help=_("SCM URL to retrieve the build descriptor from. " + \

-                              "If not specified, the winspec must be in the root directory " + \

+                       help=_("SCM URL to retrieve the build descriptor from. "

+                              "If not specified, the winspec must be in the root directory "

                               "of the source repository."))

      parser.add_option("--patches", metavar="URL",

-                       help=_("SCM URL of a directory containing patches to apply " + \

+                       help=_("SCM URL of a directory containing patches to apply "

                               "to the sources before building"))

      parser.add_option("--cpus", type="int",

-                       help=_("Number of cpus to allocate to the build VM " + \

+                       help=_("Number of cpus to allocate to the build VM "

                               "(requires admin access)"))

      parser.add_option("--mem", type="int",

-                       help=_("Amount of memory (in megabytes) to allocate to the build VM " + \

+                       help=_("Amount of memory (in megabytes) to allocate to the build VM "

                               "(requires admin access)"))

      parser.add_option("--static-mac", action="store_true",

                        help=_("Retain the original MAC address when cloning the VM"))
@@ -6020,7 +6135,8 @@ 

                        help=_("Do not print the task information"), default=options.quiet)

      (build_opts, args) = parser.parse_args(args)

      if len(args) != 3:

-         parser.error(_("Exactly three arguments (a build target, a SCM URL, and a VM name) are required"))

+         parser.error(

+             _("Exactly three arguments (a build target, a SCM URL, and a VM name) are required"))

      activate_session(session, options)

      target = args[0]

      if target.lower() == "none" and build_opts.repo_id:
@@ -6045,7 +6161,7 @@ 

              opts[key] = val

      priority = None

      if build_opts.background:

-         #relative to koji.PRIO_DEFAULT

+         # relative to koji.PRIO_DEFAULT

          priority = 5

      task_id = session.winBuild(vm_name, scmurl, target, opts, priority=priority)

      if not build_opts.quiet:
@@ -6054,7 +6170,7 @@ 

      if build_opts.wait or (build_opts.wait is None and not _running_in_bg()):

          session.logout()

          return watch_tasks(session, [task_id], quiet=build_opts.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

      else:

          return

  
@@ -6116,10 +6232,12 @@ 

  

  def handle_set_task_priority(goptions, session, args):

      "[admin] Set task priority"

-     usage = _("usage: %prog set-task-priority [options] --priority=<priority> <task_id> [<task_id> ...]")

+     usage = _("usage: %prog set-task-priority [options] --priority=<priority> <task_id> "

+               "[<task_id> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--priority", type="int", help=_("New priority"))

-     parser.add_option("--recurse", action="store_true", default=False, help=_("Change priority of child tasks as well"))

+     parser.add_option("--recurse", action="store_true", default=False,

+                       help=_("Change priority of child tasks as well"))

      (options, args) = parser.parse_args(args)

      if len(args) == 0:

          parser.error(_("You must specify at least one task id"))
@@ -6151,7 +6269,7 @@ 

      parser.add_option("--channel", help=_("Only tasks in this channel"))

      parser.add_option("--host", help=_("Only tasks for this host"))

      parser.add_option("--quiet", action="store_true", default=goptions.quiet,

-                 help=_("Do not display the column headers"))

+                       help=_("Do not display the column headers"))

      (options, args) = parser.parse_args(args)

      if len(args) != 0:

          parser.error(_("This command takes no arguments"))
@@ -6183,7 +6301,7 @@ 

      tag = args[1]

      with session.multicall(strict=True) as m:

          for package in args[2:]:

-             m.packageListSetArches(tag,package,arches,force=options.force)

+             m.packageListSetArches(tag, package, arches, force=options.force)

  

  

  def handle_set_pkg_owner(goptions, session, args):
@@ -6199,7 +6317,7 @@ 

      tag = args[1]

      with session.multicall(strict=True) as m:

          for package in args[2:]:

-             m.packageListSetOwner(tag,package,owner,force=options.force)

+             m.packageListSetOwner(tag, package, owner, force=options.force)

  

  

  def handle_set_pkg_owner_global(goptions, session, args):
@@ -6208,7 +6326,8 @@ 

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--verbose", action='store_true', help=_("List changes"))

      parser.add_option("--test", action='store_true', help=_("Test mode"))

-     parser.add_option("--old-user", "--from", action="store", help=_("Only change ownership for packages belonging to this user"))

+     parser.add_option("--old-user", "--from", action="store",

+                       help=_("Only change ownership for packages belonging to this user"))

      (options, args) = parser.parse_args(args)

      if options.old_user:

          if len(args) < 1:
@@ -6222,7 +6341,7 @@ 

      if not user:

          print("No such user: %s" % owner)

          return 1

-     opts = {'with_dups' : True}

+     opts = {'with_dups': True}

      old_user = None

      if options.old_user:

          old_user = session.getUser(options.old_user)
@@ -6246,16 +6365,18 @@ 

      for entry in to_change:

          if user['id'] == entry['owner_id']:

              if options.verbose:

-                 print("Preserving owner=%s for package %s in tag %s" \

-                         % (user['name'], package,  entry['tag_name']))

+                 print("Preserving owner=%s for package %s in tag %s"

+                       % (user['name'], package, entry['tag_name']))

          else:

              if options.test:

-                 print("Would have changed owner for %s in tag %s: %s -> %s" \

-                         % (entry['package_name'], entry['tag_name'], entry['owner_name'], user['name']))

+                 print("Would have changed owner for %s in tag %s: %s -> %s"

+                       % (entry['package_name'], entry['tag_name'], entry['owner_name'],

+                          user['name']))

                  continue

              if options.verbose:

-                 print("Changing owner for %s in tag %s: %s -> %s" \

-                         % (entry['package_name'], entry['tag_name'], entry['owner_name'], user['name']))

+                 print("Changing owner for %s in tag %s: %s -> %s"

+                       % (entry['package_name'], entry['tag_name'], entry['owner_name'],

+                          user['name']))

              session.packageListSetOwner(entry['tag_id'], entry['package_name'], user['id'])

  

  
@@ -6298,7 +6419,7 @@ 

              parser.error(_("at least one task id must be specified"))

  

      return watch_tasks(session, tasks, quiet=options.quiet,

-             poll_interval=goptions.poll_interval)

+                        poll_interval=goptions.poll_interval)

  

  

  def anon_handle_watch_logs(goptions, session, args):
@@ -6306,8 +6427,9 @@ 

      usage = _("usage: %prog watch-logs [options] <task id> [<task id> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--log", help=_("Watch only a specific log"))

-     parser.add_option("--mine", action="store_true", help=_("Watch logs for "

-         "all your tasks, task_id arguments are forbidden in this case."))

+     parser.add_option("--mine", action="store_true",

+                       help=_("Watch logs for all your tasks, task_id arguments are forbidden in "

+                              "this case."))

      parser.add_option("--follow", action="store_true", help=_("Follow spawned child tasks"))

      (options, args) = parser.parse_args(args)

      activate_session(session, goptions)
@@ -6360,7 +6482,7 @@ 

      else:

          session.logout()

          return watch_tasks(session, [task_id], quiet=goptions.quiet,

-                 poll_interval=goptions.poll_interval)

+                            poll_interval=goptions.poll_interval)

  

  

  def handle_tag_build(opts, session, args):
@@ -6371,12 +6493,14 @@ 

      parser.add_option("--nowait", action="store_true", help=_("Do not wait on task"))

      (options, args) = parser.parse_args(args)

      if len(args) < 2:

-         parser.error(_("This command takes at least two arguments: a tag name/ID and one or more package n-v-r's"))

+         parser.error(

+             _("This command takes at least two arguments: a tag name/ID and one or more package "

+               "n-v-r's"))

      activate_session(session, opts)

      tasks = []

      for pkg in args[1:]:

          task_id = session.tagBuild(args[0], pkg, force=options.force)

-         #XXX - wait on task

+         # XXX - wait on task

          tasks.append(task_id)

          print("Created task %d" % task_id)

      if _running_in_bg() or options.nowait:
@@ -6384,7 +6508,7 @@ 

      else:

          session.logout()

          return watch_tasks(session, tasks, quiet=opts.quiet,

-                 poll_interval=opts.poll_interval)

+                            poll_interval=opts.poll_interval)

  

  

  def handle_move_build(opts, session, args):
@@ -6393,13 +6517,18 @@ 

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--force", action="store_true", help=_("force operation"))

      parser.add_option("--nowait", action="store_true", help=_("do not wait on tasks"))

-     parser.add_option("--all", action="store_true", help=_("move all instances of a package, <pkg>'s are package names"))

+     parser.add_option("--all", action="store_true",

+                       help=_("move all instances of a package, <pkg>'s are package names"))

      (options, args) = parser.parse_args(args)

      if len(args) < 3:

          if options.all:

-             parser.error(_("This command, with --all, takes at least three arguments: two tags and one or more package names"))

+             parser.error(

+                 _("This command, with --all, takes at least three arguments: two tags and one or "

+                   "more package names"))

          else:

-             parser.error(_("This command takes at least three arguments: two tags and one or more package n-v-r's"))

+             parser.error(

+                 _("This command takes at least three arguments: two tags and one or more package "

+                   "n-v-r's"))

      activate_session(session, opts)

      tasks = []

      builds = []
@@ -6418,7 +6547,7 @@ 

              if not build:

                  print(_("Invalid build %s, skipping." % arg))

                  continue

-             if not build in builds:

+             if build not in builds:

                  builds.append(build)

  

          for build in builds:
@@ -6430,15 +6559,17 @@ 

      else:

          session.logout()

          return watch_tasks(session, tasks, quiet=opts.quiet,

-                 poll_interval=opts.poll_interval)

+                            poll_interval=opts.poll_interval)

  

  

  def handle_untag_build(goptions, session, args):

      "[bind] Remove a tag from one or more builds"

      usage = _("usage: %prog untag-build [options] <tag> <pkg> [<pkg> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--all", action="store_true", help=_("untag all versions of the package in this tag"))

-     parser.add_option("--non-latest", action="store_true", help=_("untag all versions of the package in this tag except the latest"))

+     parser.add_option("--all", action="store_true",

+                       help=_("untag all versions of the package in this tag"))

+     parser.add_option("--non-latest", action="store_true",

+                       help=_("untag all versions of the package in this tag except the latest"))

      parser.add_option("-n", "--test", action="store_true", help=_("test mode"))

      parser.add_option("-v", "--verbose", action="store_true", help=_("print details"))

      parser.add_option("--force", action="store_true", help=_("force operation"))
@@ -6447,7 +6578,9 @@ 

          if len(args) < 1:

              parser.error(_("Please specify a tag"))

      elif len(args) < 2:

-         parser.error(_("This command takes at least two arguments: a tag name/ID and one or more package n-v-r's"))

+         parser.error(

+             _("This command takes at least two arguments: a tag name/ID and one or more package "

+               "n-v-r's"))

      activate_session(session, goptions)

      tag = session.getTag(args[0])

      if not tag:
@@ -6468,7 +6601,7 @@ 

          builds = []

          for binfo in tagged:

              if binfo['name'] not in seen_pkg:

-                 #latest for this package

+                 # latest for this package

                  if options.verbose:

                      print(_("Leaving latest build for package %(name)s: %(nvr)s") % binfo)

              else:
@@ -6512,7 +6645,7 @@ 

      tag = args[0]

      with session.multicall(strict=True) as m:

          for package in args[1:]:

-             m.packageListUnblock(tag,package)

+             m.packageListUnblock(tag, package)

  

  

  def anon_handle_download_build(options, session, args):
@@ -6521,8 +6654,11 @@ 

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--arch", "-a", dest="arches", metavar="ARCH", action="append", default=[],

                        help=_("Only download packages for this arch (may be used multiple times)"))

-     parser.add_option("--type", help=_("Download archives of the given type, rather than rpms (maven, win, or image)"))

-     parser.add_option("--latestfrom", dest="latestfrom", help=_("Download the latest build from this tag"))

+     parser.add_option("--type",

+                       help=_("Download archives of the given type, rather than rpms "

+                              "(maven, win, or image)"))

+     parser.add_option("--latestfrom", dest="latestfrom",

+                       help=_("Download the latest build from this tag"))

      parser.add_option("--debuginfo", action="store_true", help=_("Also download -debuginfo rpms"))

      parser.add_option("--task-id", action="store_true", help=_("Interperet id as a task id"))

      parser.add_option("--rpm", action="store_true", help=_("Download the given rpm"))
@@ -6557,7 +6693,8 @@ 

      if suboptions.latestfrom:

          # We want the latest build, not a specific build

          try:

-             builds = session.listTagged(suboptions.latestfrom, latest=True, package=build, type=suboptions.type)

+             builds = session.listTagged(suboptions.latestfrom, latest=True, package=build,

+                                         type=suboptions.type)

          except koji.GenericError as data:

              print("Error finding latest build: %s" % data)

              return 1
@@ -6625,7 +6762,8 @@ 

              rpms = session.listRPMs(buildID=info['id'], arches=arches)

          if not rpms:

              if arches:

-                 print("No %s packages available for %s" % (" or ".join(arches), koji.buildLabel(info)))

+                 print("No %s packages available for %s" %

+                       (" or ".join(arches), koji.buildLabel(info)))

              else:

                  print("No packages available for %s" % koji.buildLabel(info))

              return 1
@@ -6701,7 +6839,8 @@ 

              offset = 0

          try:

              while contents:

-                 contents = session.downloadTaskOutput(task_id, filename, offset=offset, size=blocksize, volume=volume)

+                 contents = session.downloadTaskOutput(task_id, filename, offset=offset,

+                                                       size=blocksize, volume=volume)

                  offset += len(contents)

                  if contents:

                      fd.write(contents)
@@ -6714,7 +6853,7 @@ 

          if task_info is None:

              error(_("No such task id: %i" % task_id))

          files = list_task_output_all_volumes(session, task_id)

-         logs = [] # list of tuples (filename, volume)

+         logs = []  # list of tuples (filename, volume)

          for filename in files:

              if not filename.endswith(".log"):

                  continue
@@ -6726,7 +6865,7 @@ 

                                      "%s-%s" % (task_info["arch"], task_id))

  

          count = 0

-         state =  koji.TASK_STATES[task_info['state']]

+         state = koji.TASK_STATES[task_info['state']]

          if state == 'FAILED':

              if not match or koji.util.multi_fnmatch(FAIL_LOG, match):

                  write_fail_log(task_log_dir, task_id)
@@ -6769,7 +6908,8 @@ 

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--arch", dest="arches", metavar="ARCH", action="append", default=[],

                        help=_("Only download packages for this arch (may be used multiple times)"))

-     parser.add_option("--logs", dest="logs", action="store_true", default=False, help=_("Also download build logs"))

+     parser.add_option("--logs", dest="logs", action="store_true", default=False,

+                       help=_("Also download build logs"))

      parser.add_option("--topurl", metavar="URL", default=options.topurl,

                        help=_("URL under which Koji files are accessible"))

      parser.add_option("--noprogress", action="store_true",
@@ -6795,7 +6935,9 @@ 

      if not base_task:

          error(_('No such task: #%i') % base_task_id)

  

-     check_downloadable = lambda task: task["method"] == "buildArch"

+     def check_downloadable(task):

+         return task["method"] == "buildArch"

+ 

      downloadable_tasks = []

  

      if check_downloadable(base_task):
@@ -6847,7 +6989,8 @@ 

          if '..' in filename:

              error(_('Invalid file name: %s') % filename)

          url = '%s/%s/%s' % (pathinfo.work(volume), pathinfo.taskrelpath(task["id"]), filename)

-         download_file(url, new_filename, suboptions.quiet, suboptions.noprogress, len(downloads), number)

+         download_file(url, new_filename, suboptions.quiet, suboptions.noprogress, len(downloads),

+                       number)

  

  

  def anon_handle_wait_repo(options, session, args):
@@ -6855,10 +6998,16 @@ 

      usage = _("usage: %prog wait-repo [options] <tag>")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--build", metavar="NVR", dest="builds", action="append", default=[],

-                       help=_("Check that the given build is in the newly-generated repo (may be used multiple times)"))

-     parser.add_option("--target", action="store_true", help=_("Interpret the argument as a build target name"))

-     parser.add_option("--timeout", type="int", help=_("Amount of time to wait (in minutes) before giving up (default: 120)"), default=120)

-     parser.add_option("--quiet", action="store_true", help=_("Suppress output, success or failure will be indicated by the return value only"), default=options.quiet)

+                       help=_("Check that the given build is in the newly-generated repo "

+                              "(may be used multiple times)"))

+     parser.add_option("--target", action="store_true",

+                       help=_("Interpret the argument as a build target name"))

+     parser.add_option("--timeout", type="int", default=120,

+                       help=_("Amount of time to wait (in minutes) before giving up "

+                              "(default: 120)"))

+     parser.add_option("--quiet", action="store_true", default=options.quiet,

+                       help=_("Suppress output, success or failure will be indicated by the return "

+                              "value only"))

      (suboptions, args) = parser.parse_args(args)

  

      start = time.time()
@@ -6892,7 +7041,6 @@ 

              return 1

          tag_id = tag_info['id']

  

- 

      for nvr in builds:

          data = session.getLatestBuilds(tag_id, package=nvr["name"])

          if len(data) == 0:
@@ -6900,24 +7048,30 @@ 

          else:

              present_nvr = [x["nvr"] for x in data][0]

              if present_nvr != "%s-%s-%s" % (nvr["name"], nvr["version"], nvr["release"]):

-                 print("Warning: nvr %s-%s-%s is not current in tag %s\n  latest build in %s is %s" % (nvr["name"], nvr["version"], nvr["release"], tag, tag, present_nvr))

+                 print(

+                     "Warning: nvr %s-%s-%s is not current in tag %s\n  latest build in %s is %s" %

+                     (nvr["name"], nvr["version"], nvr["release"], tag, tag, present_nvr))

  

      last_repo = None

      repo = session.getRepo(tag_id)

  

      while True:

          if builds and repo and repo != last_repo:

-             if koji.util.checkForBuilds(session, tag_id, builds, repo['create_event'], latest=True):

+             if koji.util.checkForBuilds(session, tag_id, builds, repo['create_event'],

+                                         latest=True):

                  if not suboptions.quiet:

-                     print("Successfully waited %s for %s to appear in the %s repo" % (koji.util.duration(start), koji.util.printList(suboptions.builds), tag))

+                     print("Successfully waited %s for %s to appear in the %s repo" %

+                           (koji.util.duration(start), koji.util.printList(suboptions.builds), tag))

                  return

  

          if (time.time() - start) >= (suboptions.timeout * 60.0):

              if not suboptions.quiet:

                  if builds:

-                     print("Unsuccessfully waited %s for %s to appear in the %s repo" % (koji.util.duration(start), koji.util.printList(suboptions.builds), tag))

+                     print("Unsuccessfully waited %s for %s to appear in the %s repo" %

+                           (koji.util.duration(start), koji.util.printList(suboptions.builds), tag))

                  else:

-                     print("Unsuccessfully waited %s for a new %s repo" % (koji.util.duration(start), tag))

+                     print("Unsuccessfully waited %s for a new %s repo" %

+                           (koji.util.duration(start), tag))

              return 1

  

          time.sleep(options.poll_interval)
@@ -6927,7 +7081,8 @@ 

          if not builds:

              if repo != last_repo:

                  if not suboptions.quiet:

-                     print("Successfully waited %s for a new %s repo" % (koji.util.duration(start), tag))

+                     print("Successfully waited %s for a new %s repo" %

+                           (koji.util.duration(start), tag))

                  return

  

  
@@ -6935,11 +7090,14 @@ 

      "[admin] Force a repo to be regenerated"

      usage = _("usage: %prog regen-repo [options] <tag>")

      parser = OptionParser(usage=get_usage_str(usage))

-     parser.add_option("--target", action="store_true", help=_("Interpret the argument as a build target name"))

+     parser.add_option("--target", action="store_true",

+                       help=_("Interpret the argument as a build target name"))

      parser.add_option("--nowait", action="store_true", help=_("Don't wait on for regen to finish"))

      parser.add_option("--debuginfo", action="store_true", help=_("Include debuginfo rpms in repo"))

-     parser.add_option("--source", "--src", action="store_true", help=_("Include source rpms in each of repos"))

-     parser.add_option("--separate-source", "--separate-src", action="store_true", help=_("Include source rpms in separate src repo"))

+     parser.add_option("--source", "--src", action="store_true",

+                       help=_("Include source rpms in each of repos"))

+     parser.add_option("--separate-source", "--separate-src", action="store_true",

+                       help=_("Include source rpms in separate src repo"))

      (suboptions, args) = parser.parse_args(args)

      if len(args) == 0:

          parser.error(_("A tag name must be specified"))
@@ -6982,7 +7140,7 @@ 

      else:

          session.logout()

          return watch_tasks(session, [task_id], quiet=options.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

  

  

  def handle_dist_repo(options, session, args):
@@ -6996,59 +7154,60 @@ 

      usage += _("\n(Specify the --help option for a list of other options)")

      parser = OptionParser(usage=usage)

      parser.add_option('--allow-missing-signatures', action='store_true',

-         default=False,

-         help=_('For RPMs not signed with a desired key, fall back to the '

-             'primary copy'))

+                       default=False,

+                       help=_('For RPMs not signed with a desired key, fall back to the '

+                              'primary copy'))

      parser.add_option("-a", "--arch", action='append', default=[],

-         help=_("Indicate an architecture to consider. The default is all " +

-             "architectures associated with the given tag. This option may " +

-             "be specified multiple times."))

+                       help=_("Indicate an architecture to consider. The default is all "

+                              "architectures associated with the given tag. This option may "

+                              "be specified multiple times."))

      parser.add_option("--with-src", action='store_true', help='Also generate a src repo')

      parser.add_option("--split-debuginfo", action='store_true', default=False,

-             help='Split debuginfo info a separate repo for each arch')

+                       help='Split debuginfo info a separate repo for each arch')

      parser.add_option('--comps', help='Include a comps file in the repodata')

-     parser.add_option('--delta-rpms', metavar='REPO',default=[],

-         action='append',

-         help=_('Create delta rpms. REPO can be the id of another dist repo '

-             'or the name of a tag that has a dist repo. May be specified '

-             'multiple times.'))

+     parser.add_option('--delta-rpms', metavar='REPO', default=[],

+                       action='append',

+                       help=_('Create delta rpms. REPO can be the id of another dist repo '

+                              'or the name of a tag that has a dist repo. May be specified '

+                              'multiple times.'))

      parser.add_option('--event', type='int',

-         help=_('Use tag content at event'))

+                       help=_('Use tag content at event'))

      parser.add_option("--volume", help=_("Generate repo on given volume"))

      parser.add_option('--non-latest', dest='latest', default=True,

-         action='store_false', help='Include older builds, not just the latest')

+                       action='store_false', help='Include older builds, not just the latest')

      parser.add_option('--multilib', default=None, metavar="CONFIG",

-         help=_('Include multilib packages in the repository using the given '

-             'config file'))

+                       help=_('Include multilib packages in the repository using the given '

+                              'config file'))

      parser.add_option("--noinherit", action='store_true', default=False,

-         help=_('Do not consider tag inheritance'))

+                       help=_('Do not consider tag inheritance'))

      parser.add_option("--nowait", action='store_true', default=False,

-         help=_('Do not wait for the task to complete'))

+                       help=_('Do not wait for the task to complete'))

      parser.add_option('--skip-missing-signatures', action='store_true', default=False,

-         help=_('Skip RPMs not signed with the desired key(s)'))

+                       help=_('Skip RPMs not signed with the desired key(s)'))

      parser.add_option('--zck', action='store_true', default=False,

-         help=_('Generate zchunk files as well as the standard repodata'))

+                       help=_('Generate zchunk files as well as the standard repodata'))

      parser.add_option('--zck-dict-dir', action='store', default=None,

-         help=_('Directory containing compression dictionaries for use by zchunk (on builder)'))

+                       help=_('Directory containing compression dictionaries for use by zchunk '

+                              '(on builder)'))

      task_opts, args = parser.parse_args(args)

      if len(args) < 1:

          parser.error(_('You must provide a tag to generate the repo from'))

      if len(args) < 2 and not task_opts.allow_missing_signatures:

          parser.error(_('Please specify one or more GPG key IDs (or '

-                 '--allow-missing-signatures)'))

+                        '--allow-missing-signatures)'))

      if task_opts.allow_missing_signatures and task_opts.skip_missing_signatures:

          parser.error(_('allow_missing_signatures and skip_missing_signatures '

-                 'are mutually exclusive'))

+                        'are mutually exclusive'))

      activate_session(session, options)

      stuffdir = unique_path('cli-dist-repo')

      if task_opts.comps:

          if not os.path.exists(task_opts.comps):

              parser.error(_('could not find %s') % task_opts.comps)

          session.uploadWrapper(task_opts.comps, stuffdir,

-             callback=_progress_callback)

+                               callback=_progress_callback)

          print('')

          task_opts.comps = os.path.join(stuffdir,

-             os.path.basename(task_opts.comps))

+                                        os.path.basename(task_opts.comps))

      old_repos = []

      if len(task_opts.delta_rpms) > 0:

          for repo in task_opts.delta_rpms:
@@ -7060,7 +7219,7 @@ 

                  if not rinfo:

                      # maybe there is an expired one

                      rinfo = session.getRepo(repo,

-                             state=koji.REPO_STATES['EXPIRED'], dist=True)

+                                             state=koji.REPO_STATES['EXPIRED'], dist=True)

                  if not rinfo:

                      parser.error(_("Can't find repo for tag: %s") % repo)

              old_repos.append(rinfo['id'])
@@ -7081,16 +7240,16 @@ 

      if task_opts.multilib:

          if not os.path.exists(task_opts.multilib):

              parser.error(_('could not find %s') % task_opts.multilib)

-         if 'x86_64' in task_opts.arch and not 'i686' in task_opts.arch:

+         if 'x86_64' in task_opts.arch and 'i686' not in task_opts.arch:

              parser.error(_('The multilib arch (i686) must be included'))

-         if 's390x' in task_opts.arch and not 's390' in task_opts.arch:

+         if 's390x' in task_opts.arch and 's390' not in task_opts.arch:

              parser.error(_('The multilib arch (s390) must be included'))

-         if 'ppc64' in task_opts.arch and not 'ppc' in task_opts.arch:

+         if 'ppc64' in task_opts.arch and 'ppc' not in task_opts.arch:

              parser.error(_('The multilib arch (ppc) must be included'))

          session.uploadWrapper(task_opts.multilib, stuffdir,

-             callback=_progress_callback)

+                               callback=_progress_callback)

          task_opts.multilib = os.path.join(stuffdir,

-             os.path.basename(task_opts.multilib))

+                                           os.path.basename(task_opts.multilib))

          print('')

      if 'noarch' in task_opts.arch:

          task_opts.arch.remove('noarch')
@@ -7120,12 +7279,13 @@ 

      else:

          session.logout()

          return watch_tasks(session, [task_id], quiet=options.quiet,

-                 poll_interval=options.poll_interval)

+                            poll_interval=options.poll_interval)

  

  

  _search_types = ('package', 'build', 'tag', 'target', 'user', 'host', 'rpm',

                   'maven', 'win')

  

+ 

  def anon_handle_search(options, session, args):

      "[search] Search the system"

      usage = _("usage: %prog search [options] <search_type> <pattern>")
@@ -7163,7 +7323,7 @@ 

      u = session.getLoggedInUser()

      if not u:

          print("Not authenticated")

-         u = {'name' : 'anonymous user'}

+         u = {'name': 'anonymous user'}

      print("%s, %s!" % (_printable_unicode(random.choice(greetings)), u["name"]))

      print("")

      print("You are using the hub at %s" % session.baseurl)
@@ -7203,7 +7363,11 @@ 

          user_id = None

  

      mask = "%(id)6s %(tag)-25s %(package)-25s %(email)-20s %(success)-12s"

-     headers = {'id': 'ID', 'tag': 'Tag', 'package': 'Package', 'email': 'E-mail', 'success': 'Success-only'}

+     headers = {'id': 'ID',

+                'tag': 'Tag',

+                'package': 'Package',

+                'email': 'E-mail',

+                'success': 'Success-only'}

      head = mask % headers

      notifications = session.getBuildNotifications(user_id)

      if notifications:
@@ -7293,7 +7457,8 @@ 

  

  def handle_remove_notification(goptions, session, args):

      "[monitor] Remove user's notifications"

-     usage = _("usage: %prog remove-notification [options] <notification_id> [<notification_id> ...]")

+     usage = _("usage: %prog remove-notification [options] <notification_id> "

+               "[<notification_id> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

      (options, args) = parser.parse_args(args)

  
@@ -7318,13 +7483,13 @@ 

      usage = _("usage: %prog edit-notification [options] <notification_id>")

      parser = OptionParser(usage=get_usage_str(usage))

      parser.add_option("--package",

-             help=_("Notifications for this package, '*' for all"))

+                       help=_("Notifications for this package, '*' for all"))

      parser.add_option("--tag",

-             help=_("Notifications for this tag, '*' for all"))

+                       help=_("Notifications for this tag, '*' for all"))

      parser.add_option("--success-only", action="store_true", default=None,

-             dest='success_only', help=_("Notify only on successful events"))

+                       dest='success_only', help=_("Notify only on successful events"))

      parser.add_option("--no-success-only", action="store_false",

-             default=None, dest='success_only', help=_("Notify on all events"))

+                       default=None, dest='success_only', help=_("Notify on all events"))

      (options, args) = parser.parse_args(args)

  

      if len(args) != 1:
@@ -7415,7 +7580,7 @@ 

          tag_id = None

  

      for block in session.getBuildNotificationBlocks(user_id):

-         if (block['package_id'] == package_id and block['tag_id'] == tag_id):

+         if block['package_id'] == package_id and block['tag_id'] == tag_id:

              parser.error('Notification already exists.')

  

      session.createNotificationBlock(user_id, package_id, tag_id)
@@ -7423,7 +7588,8 @@ 

  

  def handle_unblock_notification(goptions, session, args):

      "[monitor] Unblock user's notification"

-     usage = _("usage: %prog unblock-notification [options] <notification_id> [<notification_id> ...]")

+     usage = _("usage: %prog unblock-notification [options] <notification_id> "

+               "[<notification_id> ...]")

      parser = OptionParser(usage=get_usage_str(usage))

      (options, args) = parser.parse_args(args)

  

file modified
+75 -65
@@ -34,22 +34,22 @@ 

               'hallo',

               'ciao',

               'hola',

-             u'olá',

-             u'dobrý den',

-             u'zdravstvuite',

-             u'góðan daginn',

+              u'olá',

+              u'dobrý den',

+              u'zdravstvuite',

+              u'góðan daginn',

               'hej',

               'tervehdys',

-             u'grüezi',

-             u'céad míle fáilte',

-             u'hylô',

-             u'bună ziua',

-             u'jó napot',

+              u'grüezi',

+              u'céad míle fáilte',

+              u'hylô',

+              u'bună ziua',

+              u'jó napot',

               'dobre dan',

-             u'你好',

-             u'こんにちは',

-             u'नमस्कार',

-             u'안녕하세요')

+              u'你好',

+              u'こんにちは',

+              u'नमस्कार',

+              u'안녕하세요')

  

  ARGMAP = {'None': None,

            'True': True,
@@ -72,26 +72,26 @@ 

          pass

      if arg in ARGMAP:

          return ARGMAP[arg]

-     #handle lists/dicts?

+     # handle lists/dicts?

      return arg

  

  

  categories = {

-     'admin' : 'admin commands',

-     'build' : 'build commands',

-     'search' : 'search commands',

-     'download' : 'download commands',

-     'monitor'  : 'monitor commands',

-     'info' : 'info commands',

-     'bind' : 'bind commands',

-     'misc' : 'miscellaneous commands',

+     'admin': 'admin commands',

+     'build': 'build commands',

+     'search': 'search commands',

+     'download': 'download commands',

+     'monitor': 'monitor commands',

+     'info': 'info commands',

+     'bind': 'bind commands',

+     'misc': 'miscellaneous commands',

  }

  

  

  def get_epilog_str(progname=None):

      if progname is None:

          progname = os.path.basename(sys.argv[0]) or 'koji'

-     categories_ordered=', '.join(sorted(['all'] + to_list(categories.keys())))

+     categories_ordered = ', '.join(sorted(['all'] + to_list(categories.keys())))

      epilog_str = '''

  Try "%(progname)s --help" for help about global options

  Try "%(progname)s help" to get all available commands
@@ -105,13 +105,15 @@ 

  def get_usage_str(usage):

      return usage + _("\n(Specify the --help global option for a list of other help options)")

  

+ 

  def ensure_connection(session):

      try:

          ret = session.getAPIVersion()

      except requests.exceptions.ConnectionError:

          error(_("Error: Unable to connect to server"))

      if ret != koji.API_VERSION:

-         warn(_("WARNING: The server is at API version %d and the client is at %d" % (ret, koji.API_VERSION)))

+         warn(_("WARNING: The server is at API version %d and "

+                "the client is at %d" % (ret, koji.API_VERSION)))

  

  

  def print_task_headers():
@@ -119,36 +121,36 @@ 

      print("ID       Pri  Owner                State    Arch       Name")

  

  

- def print_task(task,depth=0):

+ def print_task(task, depth=0):

      """Print a task"""

      task = task.copy()

-     task['state'] = koji.TASK_STATES.get(task['state'],'BADSTATE')

+     task['state'] = koji.TASK_STATES.get(task['state'], 'BADSTATE')

      fmt = "%(id)-8s %(priority)-4s %(owner_name)-20s %(state)-8s %(arch)-10s "

      if depth:

-         indent = "  "*(depth-1) + " +"

+         indent = "  " * (depth - 1) + " +"

      else:

          indent = ''

      label = koji.taskLabel(task)

      print(''.join([fmt % task, indent, label]))

  

  

- def print_task_recurse(task,depth=0):

+ def print_task_recurse(task, depth=0):

      """Print a task and its children"""

-     print_task(task,depth)

-     for child in task.get('children',()):

-         print_task_recurse(child,depth+1)

+     print_task(task, depth)

+     for child in task.get('children', ()):

+         print_task_recurse(child, depth + 1)

  

  

  class TaskWatcher(object):

  

-     def __init__(self,task_id,session,level=0,quiet=False):

+     def __init__(self, task_id, session, level=0, quiet=False):

          self.id = task_id

          self.session = session

          self.info = None

          self.level = level

          self.quiet = quiet

  

-     #XXX - a bunch of this stuff needs to adapt to different tasks

+     # XXX - a bunch of this stuff needs to adapt to different tasks

  

      def str(self):

          if self.info:
@@ -167,7 +169,7 @@ 

          error = None

          try:

              self.session.getTaskResult(self.id)

-         except (six.moves.xmlrpc_client.Fault,koji.GenericError) as e:

+         except (six.moves.xmlrpc_client.Fault, koji.GenericError) as e:

              error = e

          if error is None:

              # print("%s: complete" % self.str())
@@ -189,11 +191,12 @@ 

              sys.exit(1)

          state = self.info['state']

          if last:

-             #compare and note status changes

+             # compare and note status changes

              laststate = last['state']

              if laststate != state:

                  if not self.quiet:

-                     print("%s: %s -> %s" % (self.str(), self.display_state(last), self.display_state(self.info)))

+                     print("%s: %s -> %s" % (self.str(), self.display_state(last),

+                                             self.display_state(self.info)))

                  return True

              return False

          else:
@@ -206,7 +209,7 @@ 

          if self.info is None:

              return False

          state = koji.TASK_STATES[self.info['state']]

-         return (state in ['CLOSED','CANCELED','FAILED'])

+         return (state in ['CLOSED', 'CANCELED', 'FAILED'])

  

      def is_success(self):

          if self.info is None:
@@ -276,9 +279,9 @@ 

                  tlist = ['%s: %s' % (t.str(), t.display_state(t.info))

                           for t in tasks.values() if not t.is_done()]

                  print(

- """Tasks still running. You can continue to watch with the '%s watch-task' command.

- Running Tasks:

- %s""" % (progname, '\n'.join(tlist)))

+                     "Tasks still running. You can continue to watch with the"

+                     " '%s watch-task' command.\n"

+                     "Running Tasks:\n%s" % (progname, '\n'.join(tlist)))

      sys.stdout.flush()

      rv = 0

      try:
@@ -300,8 +303,9 @@ 

                          rv = 1

                  for child in session.getTaskChildren(task_id):

                      child_id = child['id']

-                     if not child_id in tasks.keys():

-                         tasks[child_id] = TaskWatcher(child_id, session, task.level + 1, quiet=quiet)

+                     if child_id not in tasks.keys():

+                         tasks[child_id] = TaskWatcher(child_id, session, task.level + 1,

+                                                       quiet=quiet)

                          tasks[child_id].update()

                          # If we found new children, go through the list again,

                          # in case they have children also
@@ -339,7 +343,7 @@ 

              print("No such task id: %i" % taskId)

              sys.exit(1)

          state = koji.TASK_STATES[info['state']]

-         return (state in ['CLOSED','CANCELED','FAILED'])

+         return (state in ['CLOSED', 'CANCELED', 'FAILED'])

  

      offsets = {}

      for task_id in tasklist:
@@ -369,7 +373,8 @@ 

                      if (log, volume) not in taskoffsets:

                          taskoffsets[(log, volume)] = 0

  

-                     contents = session.downloadTaskOutput(task_id, log, taskoffsets[(log, volume)], 16384, volume=volume)

+                     contents = session.downloadTaskOutput(task_id, log, taskoffsets[(log, volume)],

+                                                           16384, volume=volume)

                      taskoffsets[(log, volume)] += len(contents)

                      if contents:

                          currlog = "%d:%s:%s:" % (task_id, volume, log)
@@ -380,7 +385,6 @@ 

                              lastlog = currlog

                          bytes_to_stdout(contents)

  

- 

              if opts.follow:

                  for child in session.getTaskChildren(task_id):

                      if child['id'] not in tasklist:
@@ -414,7 +418,7 @@ 

      # For some reason repr(time.time()) includes 4 or 5

      # more digits of precision than str(time.time())

      return '%s/%r.%s' % (prefix, time.time(),

-                       ''.join([random.choice(string.ascii_letters) for i in range(8)]))

+                          ''.join([random.choice(string.ascii_letters) for i in range(8)]))

  

  

  def _format_size(size):
@@ -422,7 +426,7 @@ 

          return "%0.2f GiB" % (size / 1073741824.0)

      if (size / 1048576 >= 1):

          return "%0.2f MiB" % (size / 1048576.0)

-     if (size / 1024 >=1):

+     if (size / 1024 >= 1):

          return "%0.2f KiB" % (size / 1024.0)

      return "%0.2f B" % (size)

  
@@ -439,7 +443,7 @@ 

      if total == 0:

          percent_done = 0.0

      else:

-         percent_done = float(uploaded)/float(total)

+         percent_done = float(uploaded) / float(total)

      percent_done_str = "%02d%%" % (percent_done * 100)

      data_done = _format_size(uploaded)

      elapsed = _format_secs(total_time)
@@ -447,12 +451,14 @@ 

      speed = "- B/sec"

      if (time):

          if (uploaded != total):

-             speed = _format_size(float(piece)/float(time)) + "/sec"

+             speed = _format_size(float(piece) / float(time)) + "/sec"

          else:

-             speed = _format_size(float(total)/float(total_time)) + "/sec"

+             speed = _format_size(float(total) / float(total_time)) + "/sec"

  

      # write formated string and flush

-     sys.stdout.write("[% -36s] % 4s % 8s % 10s % 14s\r" % ('='*(int(percent_done*36)), percent_done_str, elapsed, data_done, speed))

+     sys.stdout.write("[% -36s] % 4s % 8s % 10s % 14s\r" % ('=' * (int(percent_done * 36)),

+                                                            percent_done_str, elapsed, data_done,

+                                                            speed))

      sys.stdout.flush()

  

  
@@ -498,14 +504,14 @@ 

          response.raise_for_status()

          length = int(response.headers.get('content-length') or 0)

          with open(relpath, 'wb') as f:

-             l = 0

+             pos = 0

              for chunk in response.iter_content(chunk_size=65536):

-                 l += len(chunk)

+                 pos += len(chunk)

                  f.write(chunk)

                  if not (quiet or noprogress):

-                     _download_progress(length, l)

+                     _download_progress(length, pos)

              if not length and not (quiet or noprogress):

-                 _download_progress(l, l)

+                 _download_progress(pos, pos)

  

      if not (quiet or noprogress):

          print('')
@@ -520,7 +526,8 @@ 

          percent_done_str = "%3d%%" % (percent_done * 100)

      data_done = _format_size(download_d)

  

-     sys.stdout.write("[% -36s] % 4s % 10s\r" % ('=' * (int(percent_done * 36)), percent_done_str, data_done))

+     sys.stdout.write("[% -36s] % 4s % 10s\r" % ('=' * (int(percent_done * 36)), percent_done_str,

+                                                 data_done))

      sys.stdout.flush()

  

  
@@ -555,18 +562,21 @@ 

      noauth = options.authtype == "noauth" or getattr(options, 'noauth', False)

      runas = getattr(options, 'runas', None)

      if noauth:

-         #skip authentication

+         # skip authentication

          pass

      elif options.authtype == "ssl" or os.path.isfile(options.cert) and options.authtype is None:

          # authenticate using SSL client cert

          session.ssl_login(options.cert, None, options.serverca, proxyuser=runas)

-     elif options.authtype == "password" or getattr(options, 'user', None) and options.authtype is None:

+     elif options.authtype == "password" \

+             or getattr(options, 'user', None) \

+             and options.authtype is None:

          # authenticate using user/password

          session.login()

      elif options.authtype == "kerberos" or has_krb_creds() and options.authtype is None:

          try:

              if getattr(options, 'keytab', None) and getattr(options, 'principal', None):

-                 session.krb_login(principal=options.principal, keytab=options.keytab, proxyuser=runas)

+                 session.krb_login(principal=options.principal, keytab=options.keytab,

+                                   proxyuser=runas)

              else:

                  session.krb_login(proxyuser=runas)

          except socket.error as e:
@@ -587,8 +597,8 @@ 

      "Retrieve a list of tasks"

  

      callopts = {

-         'state' : [koji.TASK_STATES[s] for s in ('FREE', 'OPEN', 'ASSIGNED')],

-         'decode' : True,

+         'state': [koji.TASK_STATES[s] for s in ('FREE', 'OPEN', 'ASSIGNED')],

+         'decode': True,

      }

  

      if getattr(options, 'mine', False):
@@ -622,16 +632,16 @@ 

              sys.exit(1)

          callopts['host_id'] = host['id']

  

-     qopts = {'order' : 'priority,create_time'}

+     qopts = {'order': 'priority,create_time'}

      tasklist = session.listTasks(callopts, qopts)

      tasks = dict([(x['id'], x) for x in tasklist])

  

-     #thread the tasks

+     # thread the tasks

      for t in tasklist:

          if t['parent'] is not None:

              parent = tasks.get(t['parent'])

              if parent:

-                 parent.setdefault('children',[])

+                 parent.setdefault('children', [])

                  parent['children'].append(t)

                  t['sub'] = True

  
@@ -641,7 +651,7 @@ 

  def format_inheritance_flags(parent):

      """Return a human readable string of inheritance flags"""

      flags = ''

-     for code,expr in (

+     for code, expr in (

              ('M', parent['maxdepth'] is not None),

              ('F', parent['pkg_filter']),

              ('I', parent['intransitive']),

@@ -614,8 +614,8 @@ 

       - ``tests/test_cli/*``

  

  - Check, that unit tests are not broken. Simply run ``make test`` in main

-   directory of your branch. For python3 compatible-code we have also ``make

-   test3`` target.

+   directory of your branch to check both python2/3 compatible-code. Or you can

+   also use ``make test2`` or ``make test3`` target for each of them.

  

  Note that the core development team for Koji is small, so it may take a few

  days for someone to reply to your request.
@@ -657,3 +657,14 @@ 

  Unit tests are run automatically for any commit in master branch. We use

  Fedora's jenkins instance for that. Details are given here: :doc:`Unit tests

  in Fedora's Jenkins <configuring_jenkins>`.

+ 

+ Code Style

+ ==========

+ 

+ We are using ``flake8`` to check the code style. Please refer to ``.flake8`` to

+ find the PEP8 and extra rules we are following/ignoring.

+ 

+ You will need to install the packages below to run the check.

+ 

+  * ``python-flake8``

+  * ``python-flake8-import-order``

file modified
+1347 -1003
@@ -60,8 +60,16 @@ 

  import koji.tasks

  import koji.xmlrpcplus

  from koji.context import context

- from koji.util import (base64encode, decode_bytes, dslice, joinpath,

-                        move_and_symlink, multi_fnmatch, safer_move, to_list)

+ from koji.util import (

+     base64encode,

+     decode_bytes,

+     dslice,

+     joinpath,

+     move_and_symlink,

+     multi_fnmatch,

+     safer_move,

+     to_list

+ )

  

  try:

      # py 3.6+
@@ -76,6 +84,7 @@ 

  

  NUMERIC_TYPES = tuple(list(six.integer_types) + [float])

  

+ 

  def log_error(msg):

      logger.error(msg)

  
@@ -84,29 +93,30 @@ 

      entry['krb_principals'] = [x for x in entry['krb_principals'] if x is not None]

      return entry

  

+ 

  class Task(object):

      """A task for the build hosts"""

  

      fields = (

-                 ('task.id', 'id'),

-                 ('task.state', 'state'),

-                 ('task.create_time', 'create_time'),

-                 ('EXTRACT(EPOCH FROM create_time)', 'create_ts'),

-                 ('task.start_time', 'start_time'),

-                 ('EXTRACT(EPOCH FROM task.start_time)', 'start_ts'),

-                 ('task.completion_time', 'completion_time'),

-                 ('EXTRACT(EPOCH FROM completion_time)', 'completion_ts'),

-                 ('task.channel_id', 'channel_id'),

-                 ('task.host_id', 'host_id'),

-                 ('task.parent', 'parent'),

-                 ('task.label', 'label'),

-                 ('task.waiting', 'waiting'),

-                 ('task.awaited', 'awaited'),

-                 ('task.owner', 'owner'),

-                 ('task.method', 'method'),

-                 ('task.arch', 'arch'),

-                 ('task.priority', 'priority'),

-                 ('task.weight', 'weight'))

+         ('task.id', 'id'),

+         ('task.state', 'state'),

+         ('task.create_time', 'create_time'),

+         ('EXTRACT(EPOCH FROM create_time)', 'create_ts'),

+         ('task.start_time', 'start_time'),

+         ('EXTRACT(EPOCH FROM task.start_time)', 'start_ts'),

+         ('task.completion_time', 'completion_time'),

+         ('EXTRACT(EPOCH FROM completion_time)', 'completion_ts'),

+         ('task.channel_id', 'channel_id'),

+         ('task.host_id', 'host_id'),

+         ('task.parent', 'parent'),

+         ('task.label', 'label'),

+         ('task.waiting', 'waiting'),

+         ('task.awaited', 'awaited'),

+         ('task.owner', 'owner'),

+         ('task.method', 'method'),

+         ('task.arch', 'arch'),

+         ('task.priority', 'priority'),

+         ('task.weight', 'weight'))

  

      def __init__(self, id):

          self.id = id
@@ -128,8 +138,8 @@ 

          if host_id is None:

              return False

          task_id = self.id

-         #getting a row lock on this task to ensure task assignment sanity

-         #no other concurrent transaction should be altering this row

+         # getting a row lock on this task to ensure task assignment sanity

+         # no other concurrent transaction should be altering this row

          q = """SELECT state,host_id FROM task WHERE id=%(task_id)s FOR UPDATE"""

          r = _fetchSingle(q, locals())

          if not r:
@@ -153,7 +163,7 @@ 

          if user_id is None:

              return False

          task_id = self.id

-         #getting a row lock on this task to ensure task state sanity

+         # getting a row lock on this task to ensure task state sanity

          q = """SELECT owner FROM task WHERE id=%(task_id)s FOR UPDATE"""

          r = _fetchSingle(q, locals())

          if not r:
@@ -172,8 +182,8 @@ 

          info = self.getInfo(request=True)

          self.runCallbacks('preTaskStateChange', info, 'state', koji.TASK_STATES[newstate])

          self.runCallbacks('preTaskStateChange', info, 'host_id', host_id)

-         #we use row-level locks to keep things sane

-         #note the SELECT...FOR UPDATE

+         # we use row-level locks to keep things sane

+         # note the SELECT...FOR UPDATE

          task_id = self.id

          if not force:

              q = """SELECT state,host_id FROM task WHERE id=%(task_id)i FOR UPDATE"""
@@ -184,23 +194,23 @@ 

              if state == koji.TASK_STATES['FREE']:

                  if otherhost is not None:

                      log_error("Error: task %i is both free and locked (host %i)"

-                         % (task_id, otherhost))

+                               % (task_id, otherhost))

                      return False

              elif state == koji.TASK_STATES['ASSIGNED']:

                  if otherhost is None:

                      log_error("Error: task %i is assigned, but has no assignee"

-                         % (task_id))

+                               % (task_id))

                      return False

                  elif otherhost != host_id:

-                     #task is assigned to someone else

+                     # task is assigned to someone else

                      return False

-                 #otherwise the task is assigned to host_id, so keep going

+                 # otherwise the task is assigned to host_id, so keep going

              else:

                  if otherhost is None:

                      log_error("Error: task %i is non-free but unlocked (state %i)"

-                         % (task_id, state))

+                               % (task_id, state))

                  return False

-         #if we reach here, task is either

+         # if we reach here, task is either

          #  - free and unlocked

          #  - assigned to host_id

          #  - force option is enabled
@@ -228,7 +238,7 @@ 

              # get more complete data to return

              fields = self.fields + (('task.request', 'request'),)

              query = QueryProcessor(tables=['task'], clauses=['id=%(id)i'], values=vars(self),

-                             columns=[f[0] for f in fields], aliases=[f[1] for f in fields])

+                                    columns=[f[0] for f in fields], aliases=[f[1] for f in fields])

              ret = query.executeOne()

              if ret['request'].find('<?xml', 0, 10) == -1:

                  # handle older base64 encoded data
@@ -252,8 +262,8 @@ 

              raise koji.GenericError("No such task: %i" % self.id)

          oldstate = row[0]

          if koji.TASK_STATES[oldstate] in ['CLOSED', 'CANCELED', 'FAILED']:

-             raise koji.GenericError("Cannot free task %i, state is %s" % \

-                     (self.id, koji.TASK_STATES[oldstate]))

+             raise koji.GenericError("Cannot free task %i, state is %s" %

+                                     (self.id, koji.TASK_STATES[oldstate]))

          newstate = koji.TASK_STATES['FREE']

          newhost = None

          q = """UPDATE task SET state=%(newstate)s,host_id=%(newhost)s
@@ -360,8 +370,8 @@ 

          _dml(update, locals())

          self.runCallbacks('postTaskStateChange', info, 'state', koji.TASK_STATES['CANCELED'])

          self.runCallbacks('postTaskStateChange', info, 'completion_ts', now)

-         #cancel associated builds (only if state is 'BUILDING')

-         #since we check build state, we avoid loops with cancel_build on our end

+         # cancel associated builds (only if state is 'BUILDING')

+         # since we check build state, we avoid loops with cancel_build on our end

          b_building = koji.BUILD_STATES['BUILDING']

          q = """SELECT id FROM build WHERE task_id = %(task_id)i

          AND state = %(b_building)i
@@ -369,7 +379,7 @@ 

          for (build_id,) in _fetchMulti(q, locals()):

              cancel_build(build_id, cancel_task=False)

          if recurse:

-             #also cancel child tasks

+             # also cancel child tasks

              self.cancelChildren()

          return True

  
@@ -392,8 +402,8 @@ 

          if parent is not None:

              if strict:

                  raise koji.GenericError("Task %d is not top-level (parent=%d)" % (task_id, parent))

-             #otherwise, find the top-level task and go from there

-             seen = {task_id:1}

+             # otherwise, find the top-level task and go from there

+             seen = {task_id: 1}

              while parent is not None:

                  if parent in seen:

                      raise koji.GenericError("Task LOOP at task %i" % task_id)
@@ -401,15 +411,15 @@ 

                  seen[task_id] = 1

                  parent = _singleValue(q, locals())

              return Task(task_id).cancelFull(strict=True)

-         #We handle the recursion ourselves, since self.cancel will stop at

-         #canceled or closed tasks.

+         # We handle the recursion ourselves, since self.cancel will stop at

+         # canceled or closed tasks.

          tasklist = [task_id]

          seen = {}

-         #query for use in loop

+         # query for use in loop

          q_children = """SELECT id FROM task WHERE parent = %(task_id)i"""

          for task_id in tasklist:

              if task_id in seen:

-                 #shouldn't happen

+                 # shouldn't happen

                  raise koji.GenericError("Task LOOP at task %i" % task_id)

              seen[task_id] = 1

              Task(task_id).cancel(recurse=False)
@@ -504,6 +514,7 @@ 

          koji.plugin.run_callbacks(cbtype, attribute=attr, old=old_val, new=new_val,

                                    info=info)

  

+ 

  def make_task(method, arglist, **opts):

      """Create a task

  
@@ -527,14 +538,14 @@ 

          pdata = dict(zip(fields, r))

          if pdata['state'] != koji.TASK_STATES['OPEN']:

              raise koji.GenericError("Parent task (id %(parent)s) is not open" % opts)

-         #default to a higher priority than parent

+         # default to a higher priority than parent

          opts.setdefault('priority', pdata['priority'] - 1)

          for f in ('owner', 'arch'):

              opts.setdefault(f, pdata[f])

          opts.setdefault('label', None)

      else:

          opts.setdefault('priority', koji.PRIO_DEFAULT)

-         #calling function should enforce priority limitations, if applicable

+         # calling function should enforce priority limitations, if applicable

          opts.setdefault('arch', 'noarch')

          if not context.session.logged_in:

              raise koji.GenericError('task must have an owner')
@@ -542,7 +553,7 @@ 

              opts['owner'] = context.session.user_id

          opts['label'] = None

          opts['parent'] = None

-     #determine channel from policy

+     # determine channel from policy

      policy_data = {}

      policy_data['method'] = method

      for key in 'arch', 'parent', 'label', 'owner':
@@ -605,13 +616,13 @@ 

              elif parts[0] == "parent":

                  if not opts.get('parent'):

                      logger.error("Invalid channel policy result (no parent task): %s",

-                                     ruleset.last_rule())

+                                  ruleset.last_rule())

                      raise koji.GenericError("invalid channel policy")

                  opts['channel_id'] = pdata['channel_id']

              elif parts[0] == "req":

                  if 'channel' not in opts:

                      logger.error('Invalid channel policy result (no channel requested): %s',

-                                     ruleset.last_rule())

+                                  ruleset.last_rule())

                      raise koji.GenericError("invalid channel policy")

                  opts['channel_id'] = req_channel_id

              else:
@@ -625,10 +636,12 @@ 

      opts['request'] = koji.xmlrpcplus.dumps(tuple(arglist), methodname=method)

      opts['state'] = koji.TASK_STATES['FREE']

      opts['method'] = method

-     koji.plugin.run_callbacks('preTaskStateChange', attribute='state', old=None, new='FREE', info=opts)

+     koji.plugin.run_callbacks(

+         'preTaskStateChange', attribute='state', old=None, new='FREE', info=opts)

      # stick it in the database

  

-     idata = dslice(opts, ['state', 'owner', 'method', 'request', 'priority', 'parent', 'label', 'channel_id', 'arch'])

+     idata = dslice(opts, ['state', 'owner', 'method', 'request', 'priority', 'parent', 'label',

+                           'channel_id', 'arch'])

      if opts.get('assign'):

          idata['state'] = koji.TASK_STATES['ASSIGNED']

          idata['host_id'] = opts['assign']
@@ -636,9 +649,11 @@ 

      insert.execute()

      task_id = _singleValue("SELECT currval('task_id_seq')", strict=True)

      opts['id'] = task_id

-     koji.plugin.run_callbacks('postTaskStateChange', attribute='state', old=None, new='FREE', info=opts)

+     koji.plugin.run_callbacks(

+         'postTaskStateChange', attribute='state', old=None, new='FREE', info=opts)

      return task_id

  

+ 

  def eventCondition(event, table=None):

      """return the proper WHERE condition to select data at the time specified by event. """

      if not table:
@@ -648,47 +663,52 @@ 

      if event is None:

          return """(%(table)sactive = TRUE)""" % locals()

      elif isinstance(event, six.integer_types):

-         return """(%(table)screate_event <= %(event)d AND ( %(table)srevoke_event IS NULL OR %(event)d < %(table)srevoke_event ))""" \

-             % locals()

+         return "(%(table)screate_event <= %(event)d AND ( %(table)srevoke_event IS NULL OR " \

+                "%(event)d < %(table)srevoke_event ))" % locals()

      else:

          raise koji.GenericError("Invalid event: %r" % event)

  

+ 

  def readGlobalInheritance(event=None):

      c = context.cnx.cursor()

      fields = ('tag_id', 'parent_id', 'name', 'priority', 'maxdepth', 'intransitive',

-                 'noconfig', 'pkg_filter')

+               'noconfig', 'pkg_filter')

      q = """SELECT %s FROM tag_inheritance JOIN tag ON parent_id = id

      WHERE %s

      ORDER BY priority

      """ % (",".join(fields), eventCondition(event))

      c.execute(q, locals())

-     #convert list of lists into a list of dictionaries

+     # convert list of lists into a list of dictionaries

      return [dict(zip(fields, x)) for x in c.fetchall()]

  

+ 

  def readInheritanceData(tag_id, event=None):

      c = context.cnx.cursor()

-     fields = ('parent_id', 'name', 'priority', 'maxdepth', 'intransitive', 'noconfig', 'pkg_filter')

+     fields = ('parent_id', 'name', 'priority', 'maxdepth', 'intransitive', 'noconfig',

+               'pkg_filter')

      q = """SELECT %s FROM tag_inheritance JOIN tag ON parent_id = id

      WHERE %s AND tag_id = %%(tag_id)i

      ORDER BY priority

      """ % (",".join(fields), eventCondition(event))

      c.execute(q, locals())

-     #convert list of lists into a list of dictionaries

+     # convert list of lists into a list of dictionaries

      data = [dict(zip(fields, x)) for x in c.fetchall()]

      # include the current tag_id as child_id, so we can retrace the inheritance chain later

      for datum in data:

          datum['child_id'] = tag_id

      return data

  

+ 

  def readDescendantsData(tag_id, event=None):

      c = context.cnx.cursor()

-     fields = ('tag_id', 'parent_id', 'name', 'priority', 'maxdepth', 'intransitive', 'noconfig', 'pkg_filter')

+     fields = ('tag_id', 'parent_id', 'name', 'priority', 'maxdepth', 'intransitive', 'noconfig',

+               'pkg_filter')

      q = """SELECT %s FROM tag_inheritance JOIN tag ON tag_id = id

      WHERE %s AND parent_id = %%(tag_id)i

      ORDER BY priority

      """ % (",".join(fields), eventCondition(event))

      c.execute(q, locals())

-     #convert list of lists into a list of dictionaries

+     # convert list of lists into a list of dictionaries

      data = [dict(zip(fields, x)) for x in c.fetchall()]

      return data

  
@@ -733,7 +753,7 @@ 

          elif not orig or clear:

              data[parent_id] = link

          else:

-             #not a delete request and we have a previous link to parent

+             # not a delete request and we have a previous link to parent

              for f in fields:

                  if orig[f] != link[f]:

                      data[parent_id] = link
@@ -752,7 +772,7 @@ 

          # nothing to do

          log_error("No inheritance changes")

          return

-     #check for duplicate priorities

+     # check for duplicate priorities

      pri_index = {}

      for link in six.itervalues(data):

          if link.get('delete link'):
@@ -761,15 +781,16 @@ 

      for pri, dups in six.iteritems(pri_index):

          if len(dups) <= 1:

              continue

-         #oops, duplicate entries for a single priority

+         # oops, duplicate entries for a single priority

          dup_ids = [link['parent_id'] for link in dups]

-         raise koji.GenericError("Inheritance priorities must be unique (pri %s: %r )" % (pri, dup_ids))

+         raise koji.GenericError("Inheritance priorities must be unique (pri %s: %r )" %

+                                 (pri, dup_ids))

      for parent_id, link in six.iteritems(data):

          if not link.get('is_update'):

              continue

          # revoke old values

          update = UpdateProcessor('tag_inheritance', values=locals(),

-                     clauses=['tag_id=%(tag_id)s', 'parent_id = %(parent_id)s'])

+                                  clauses=['tag_id=%(tag_id)s', 'parent_id = %(parent_id)s'])

          update.make_revoke()

          update.execute()

      for parent_id, link in six.iteritems(data):
@@ -786,6 +807,7 @@ 

          insert.make_create()

          insert.execute()

  

+ 

  def readFullInheritance(tag_id, event=None, reverse=False, stops=None, jumps=None):

      """Returns a list representing the full, ordered inheritance from tag"""

      if stops is None:
@@ -793,14 +815,17 @@ 

      if jumps is None:

          jumps = {}

      order = []

-     readFullInheritanceRecurse(tag_id, event, order, stops, {}, {}, 0, None, False, [], reverse, jumps)

+     readFullInheritanceRecurse(tag_id, event, order, stops, {}, {}, 0, None, False, [], reverse,

+                                jumps)

      return order

  

- def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdepth, maxdepth, noconfig, pfilter, reverse, jumps):

+ 

+ def readFullInheritanceRecurse(tag_id, event, order, prunes, top, hist, currdepth, maxdepth,

+                                noconfig, pfilter, reverse, jumps):

      if maxdepth is not None and maxdepth < 1:

          return

-     #note: maxdepth is relative to where we are, but currdepth is absolute from

-     #the top.

+     # note: maxdepth is relative to where we are, but currdepth is absolute from

+     # the top.

      currdepth += 1

      top = top.copy()

      top[tag_id] = 1
@@ -816,11 +841,11 @@ 

          if id in jumps:

              id = jumps[id]

          if id in top:

-             #LOOP!

+             # LOOP!

              if event is None:

                  # only log if the issue is current

                  log_error("Warning: INHERITANCE LOOP detected at %s -> %s, pruning" % (tag_id, id))

-             #auto prune

+             # auto prune

              continue

          if id in prunes:

              # ignore pruned tags
@@ -829,16 +854,16 @@ 

              # ignore intransitive inheritance links, except at root

              continue

          if link['priority'] < 0:

-             #negative priority indicates pruning, rather than inheritance

+             # negative priority indicates pruning, rather than inheritance

              prunes[id] = 1

              continue

          if reverse:

-             #maxdepth logic is different in this case. no propagation

+             # maxdepth logic is different in this case. no propagation

              if link['maxdepth'] is not None and link['maxdepth'] < currdepth - 1:

                  continue

              nextdepth = None

          else:

-             #propagate maxdepth

+             # propagate maxdepth

              nextdepth = link['maxdepth']

              if nextdepth is None:

                  if maxdepth is not None:
@@ -847,7 +872,7 @@ 

                  nextdepth = min(nextdepth, maxdepth) - 1

          link['nextdepth'] = nextdepth

          link['currdepth'] = currdepth

-         #propagate noconfig and pkg_filter controls

+         # propagate noconfig and pkg_filter controls

          if link['noconfig']:

              noconfig = True

          filter = list(pfilter)  # copy
@@ -857,10 +882,10 @@ 

          link['filter'] = filter

          # check history to avoid redundant entries

          if id in hist:

-             #already been there

-             #BUT, options may have been different

+             # already been there

+             # BUT, options may have been different

              rescan = True

-             #since rescans are possible, we might have to consider more than one previous hit

+             # since rescans are possible, we might have to consider more than one previous hit

              for previous in hist[id]:

                  sufficient = True       # is previous sufficient?

                  # if last depth was less than current, then previous insufficient
@@ -883,12 +908,13 @@ 

                  continue

          else:

              hist[id] = []

-         hist[id].append(link)   #record history

+         hist[id].append(link)  # record history

          order.append(link)

          if link['intransitive'] and reverse:

              # add link, but don't follow it

              continue

-         readFullInheritanceRecurse(id, event, order, prunes, top, hist, currdepth, nextdepth, noconfig, filter, reverse, jumps)

+         readFullInheritanceRecurse(id, event, order, prunes, top, hist, currdepth, nextdepth,

+                                    noconfig, filter, reverse, jumps)

  

  # tag-package operations

  #       add
@@ -902,22 +928,25 @@ 

  def _pkglist_remove(tag_id, pkg_id):

      clauses = ('package_id=%(pkg_id)i', 'tag_id=%(tag_id)i')

      update = UpdateProcessor('tag_packages', values=locals(), clauses=clauses)

-     update.make_revoke()  #XXX user_id?

+     update.make_revoke()  # XXX user_id?

      update.execute()

  

+ 

  def _pkglist_owner_remove(tag_id, pkg_id):

      clauses = ('package_id=%(pkg_id)i', 'tag_id=%(tag_id)i')

      update = UpdateProcessor('tag_package_owners', values=locals(), clauses=clauses)

-     update.make_revoke()  #XXX user_id?

+     update.make_revoke()  # XXX user_id?

      update.execute()

  

+ 

  def _pkglist_owner_add(tag_id, pkg_id, owner):

      _pkglist_owner_remove(tag_id, pkg_id)

      data = {'tag_id': tag_id, 'package_id': pkg_id, 'owner': owner}

      insert = InsertProcessor('tag_package_owners', data=data)

-     insert.make_create()  #XXX user_id?

+     insert.make_create()  # XXX user_id?

      insert.execute()

  

+ 

  def _pkglist_add(tag_id, pkg_id, owner, block, extra_arches):

      # revoke old entry (if present)

      _pkglist_remove(tag_id, pkg_id)
@@ -928,21 +957,23 @@ 

          'extra_arches': koji.parse_arches(extra_arches, strict=True, allow_none=True)

      }

      insert = InsertProcessor('tag_packages', data=data)

-     insert.make_create()  #XXX user_id?

+     insert.make_create()  # XXX user_id?

      insert.execute()

      _pkglist_owner_add(tag_id, pkg_id, owner)

  

- def pkglist_add(taginfo, pkginfo, owner=None, block=None, extra_arches=None, force=False, update=False):

+ 

+ def pkglist_add(taginfo, pkginfo, owner=None, block=None, extra_arches=None, force=False,

+                 update=False):

      """Add to (or update) package list for tag"""

      return _direct_pkglist_add(taginfo, pkginfo, owner, block, extra_arches,

-             force, update, policy=True)

+                                force, update, policy=True)

  

  

  def _direct_pkglist_add(taginfo, pkginfo, owner, block, extra_arches, force,

-         update, policy=False):

+                         update, policy=False):

      """Like pkglist_add, but without policy or access check"""

-     #access control comes a little later (via an assert_policy)

-     #should not make any changes until after policy is checked

+     # access control comes a little later (via an assert_policy)

+     # should not make any changes until after policy is checked

      tag = get_tag(taginfo, strict=True)

      tag_id = tag['id']

      pkg = lookup_package(pkginfo, strict=False)
@@ -958,8 +989,8 @@ 

          action = 'block'

      if policy:

          context.session.assertLogin()

-         policy_data = {'tag' : tag_id, 'action' : action, 'package' : pkginfo, 'force' : force}

-         #don't check policy for admins using force

+         policy_data = {'tag': tag_id, 'action': action, 'package': pkginfo, 'force': force}

+         # don't check policy for admins using force

          if not (force and context.session.hasPerm('admin')):

              assert_policy('package_list', policy_data)

      if not pkg:
@@ -981,11 +1012,11 @@ 

      if previous is None:

          block = bool(block)

          if update and not force:

-             #if update flag is true, require that there be a previous entry

-             raise koji.GenericError("cannot update: tag %s has no data for package %s" \

-                     % (tag['name'], pkg['name']))

+             # if update flag is true, require that there be a previous entry

+             raise koji.GenericError("cannot update: tag %s has no data for package %s"

+                                     % (tag['name'], pkg['name']))

      else:

-         #already there (possibly via inheritance)

+         # already there (possibly via inheritance)

          if owner is None:

              owner = previous['owner_id']

          changed_owner = previous['owner_id'] != owner
@@ -995,14 +1026,14 @@ 

              block = bool(block)

          if extra_arches is None:

              extra_arches = previous['extra_arches']

-         #see if the data is the same

+         # see if the data is the same

          for key, value in (('blocked', block),

                             ('extra_arches', extra_arches)):

              if previous[key] != value:

                  changed = True

                  break

          if not changed and not changed_owner and not force:

-             #no point in adding it again with the same data

+             # no point in adding it again with the same data

              return

          if previous['blocked'] and not block and not force:

              raise koji.GenericError("package %s is blocked in tag %s" % (pkg['name'], tag['name']))
@@ -1020,6 +1051,7 @@ 

                                block=block, extra_arches=extra_arches,

                                force=force, update=update, user=user)

  

+ 

  def pkglist_remove(taginfo, pkginfo, force=False):

      """Remove package from the list for tag

  
@@ -1037,14 +1069,16 @@ 

      pkg = lookup_package(pkginfo, strict=True)

      if policy:

          context.session.assertLogin()

-         policy_data = {'tag' : tag['id'], 'action' : 'remove', 'package' : pkg['id'], 'force' : force}

-         #don't check policy for admins using force

+         policy_data = {'tag': tag['id'], 'action': 'remove', 'package': pkg['id'], 'force': force}

+         # don't check policy for admins using force

          if not (force and context.session.hasPerm('admin')):

              assert_policy('package_list', policy_data)

      user = get_user(context.session.user_id)

-     koji.plugin.run_callbacks('prePackageListChange', action='remove', tag=tag, package=pkg, user=user)

+     koji.plugin.run_callbacks(

+         'prePackageListChange', action='remove', tag=tag, package=pkg, user=user)

      _pkglist_remove(tag['id'], pkg['id'])

-     koji.plugin.run_callbacks('postPackageListChange', action='remove', tag=tag, package=pkg, user=user)

+     koji.plugin.run_callbacks(

+         'postPackageListChange', action='remove', tag=tag, package=pkg, user=user)

  

  

  def pkglist_block(taginfo, pkginfo, force=False):
@@ -1053,10 +1087,11 @@ 

      tag = get_tag(taginfo, strict=True)

      pkg = lookup_package(pkginfo, strict=True)

      if not readPackageList(tag['id'], pkgID=pkg['id'], inherit=True):

-         raise koji.GenericError("Package %s is not in tag listing for %s" % \

+         raise koji.GenericError("Package %s is not in tag listing for %s" %

                                  (pkg['name'], tag['name']))

      pkglist_add(taginfo, pkginfo, block=True, force=force)

  

+ 

  def pkglist_unblock(taginfo, pkginfo, force=False):

      """Unblock the package in tag

  
@@ -1066,42 +1101,48 @@ 

      tag = get_tag(taginfo, strict=True)

      pkg = lookup_package(pkginfo, strict=True)

      context.session.assertLogin()

-     policy_data = {'tag' : tag['id'], 'action' : 'unblock', 'package' : pkg['id'], 'force' : force}

-     #don't check policy for admins using force

+     policy_data = {'tag': tag['id'], 'action': 'unblock', 'package': pkg['id'], 'force': force}

+     # don't check policy for admins using force

      if not (force and context.session.hasPerm('admin')):

          assert_policy('package_list', policy_data)

      user = get_user(context.session.user_id)

-     koji.plugin.run_callbacks('prePackageListChange', action='unblock', tag=tag, package=pkg, user=user)

+     koji.plugin.run_callbacks(

+         'prePackageListChange', action='unblock', tag=tag, package=pkg, user=user)

      tag_id = tag['id']

      pkg_id = pkg['id']

      pkglist = readPackageList(tag_id, pkgID=pkg_id, inherit=True)

      previous = pkglist.get(pkg_id, None)

      if previous is None:

-         raise koji.GenericError("no data (blocked or otherwise) for package %s in tag %s" \

-                 % (pkg['name'], tag['name']))

+         raise koji.GenericError("no data (blocked or otherwise) for package %s in tag %s"

+                                 % (pkg['name'], tag['name']))

      if not previous['blocked']:

          raise koji.GenericError("package %s NOT blocked in tag %s" % (pkg['name'], tag['name']))

      if previous['tag_id'] != tag_id:

          _pkglist_add(tag_id, pkg_id, previous['owner_id'], False, previous['extra_arches'])

      else:

-         #just remove the blocking entry

+         # just remove the blocking entry

          _pkglist_remove(tag_id, pkg_id)

-         #it's possible this was the only entry in the inheritance or that the next entry

-         #back is also a blocked entry. if so, we need to add it back as unblocked

+         # it's possible this was the only entry in the inheritance or that the next entry

+         # back is also a blocked entry. if so, we need to add it back as unblocked

          pkglist = readPackageList(tag_id, pkgID=pkg_id, inherit=True)

          if pkg_id not in pkglist or pkglist[pkg_id]['blocked']:

              _pkglist_add(tag_id, pkg_id, previous['owner_id'], False, previous['extra_arches'])

-     koji.plugin.run_callbacks('postPackageListChange', action='unblock', tag=tag, package=pkg, user=user)

+     koji.plugin.run_callbacks(

+         'postPackageListChange', action='unblock', tag=tag, package=pkg, user=user)

+ 

  

  def pkglist_setowner(taginfo, pkginfo, owner, force=False):

      """Set the owner for package in tag"""

      pkglist_add(taginfo, pkginfo, owner=owner, force=force, update=True)

  

+ 

  def pkglist_setarches(taginfo, pkginfo, arches, force=False):

      """Set extra_arches for package in tag"""

      pkglist_add(taginfo, pkginfo, extra_arches=arches, force=force, update=True)

  

- def readPackageList(tagID=None, userID=None, pkgID=None, event=None, inherit=False, with_dups=False):

+ 

+ def readPackageList(tagID=None, userID=None, pkgID=None, event=None, inherit=False,

+                     with_dups=False):

      """Returns the package list for the specified tag or user.

  

      One of (tagID,userID,pkgID) must be specified
@@ -1130,13 +1171,13 @@ 

          tag_packages.package_id = tag_package_owners.package_id

      JOIN users ON users.id = tag_package_owners.owner

      WHERE %(cond1)s AND %(cond2)s"""

-     if tagID != None:

+     if tagID is not None:

          q += """

          AND tag.id = %%(tagID)i"""

-     if userID != None:

+     if userID is not None:

          q += """

          AND users.id = %%(userID)i"""

-     if pkgID != None:

+     if pkgID is not None:

          if isinstance(pkgID, six.integer_types):

              q += """

              AND package.id = %%(pkgID)i"""
@@ -1174,7 +1215,7 @@ 

          for p in _multiRow(q, locals(), [pair[1] for pair in fields]):

              pkgid = p['package_id']

              if not with_dups and pkgid in packages:

-                 #previous data supercedes

+                 # previous data supercedes

                  continue

              # apply package filters

              skip = False
@@ -1192,6 +1233,7 @@ 

                  packages[pkgid] = p

      return packages

  

+ 

  def list_tags(build=None, package=None, perms=True, queryOpts=None):

      """List tags.  If build is specified, only return tags associated with the

      given build.  If package is specified, only return tags associated with the
@@ -1241,7 +1283,8 @@ 

          packageinfo = lookup_package(package)

          if not packageinfo:

              raise koji.GenericError('invalid package: %s' % package)

-         fields.extend(['users.id', 'users.name', 'tag_packages.blocked', 'tag_packages.extra_arches'])

+         fields.extend(

+             ['users.id', 'users.name', 'tag_packages.blocked', 'tag_packages.extra_arches'])

          aliases.extend(['owner_id', 'owner_name', 'blocked', 'extra_arches'])

          joins.append('tag_packages ON tag.id = tag_packages.tag_id')

          clauses.append('tag_packages.active = true')
@@ -1258,7 +1301,9 @@ 

                             opts=queryOpts)

      return query.iterate()

  

- def readTaggedBuilds(tag, event=None, inherit=False, latest=False, package=None, owner=None, type=None):

+ 

+ def readTaggedBuilds(tag, event=None, inherit=False, latest=False, package=None, owner=None,

+                      type=None):

      """Returns a list of builds for specified tag

  

      set inherit=True to follow inheritance
@@ -1279,14 +1324,15 @@ 

      if inherit:

          taglist += [link['parent_id'] for link in readFullInheritance(tag, event)]

  

-     #regardless of inherit setting, we need to use inheritance to read the

-     #package list

+     # regardless of inherit setting, we need to use inheritance to read the

+     # package list

      packages = readPackageList(tagID=tag, event=event, inherit=True, pkgID=package)

  

-     #these values are used for each iteration

+     # these values are used for each iteration

      fields = [('tag.id', 'tag_id'), ('tag.name', 'tag_name'), ('build.id', 'id'),

                ('build.id', 'build_id'), ('build.version', 'version'), ('build.release', 'release'),

-               ('build.epoch', 'epoch'), ('build.state', 'state'), ('build.completion_time', 'completion_time'),

+               ('build.epoch', 'epoch'), ('build.state', 'state'),

+               ('build.completion_time', 'completion_time'),

                ('build.start_time', 'start_time'),

                ('build.task_id', 'task_id'),

                ('events.id', 'creation_event_id'), ('events.time', 'creation_time'),
@@ -1317,7 +1363,7 @@ 

              raise koji.GenericError('unsupported build type: %s' % type)

          btype_id = btype['id']

          type_join = ('JOIN build_types ON build.id = build_types.build_id '

-                 'AND btype_id = %(btype_id)s')

+                      'AND btype_id = %(btype_id)s')

  

      q = """SELECT %s

      FROM tag_listing
@@ -1330,7 +1376,8 @@ 

      JOIN volume ON volume.id = build.volume_id

      WHERE %s AND tag_id=%%(tagid)s

          AND build.state=%%(st_complete)i

-     """ % (', '.join([pair[0] for pair in fields]), type_join, eventCondition(event, 'tag_listing'))

+     """ % (', '.join([pair[0] for pair in fields]), type_join,

+            eventCondition(event, 'tag_listing'))

      if package:

          q += """AND package.name = %(package)s

          """
@@ -1344,7 +1391,7 @@ 

      builds = []

      seen = {}   # used to enforce the 'latest' option

      for tagid in taglist:

-         #log_error(koji.db._quoteparams(q,locals()))

+         # log_error(koji.db._quoteparams(q,locals()))

          for build in _multiRow(q, locals(), [pair[1] for pair in fields]):

              pkgid = build['package_id']

              pinfo = packages.get(pkgid, None)
@@ -1364,7 +1411,9 @@ 

  

      return builds

  

- def readTaggedRPMS(tag, package=None, arch=None, event=None, inherit=False, latest=True, rpmsigs=False, owner=None, type=None):

+ 

+ def readTaggedRPMS(tag, package=None, arch=None, event=None, inherit=False, latest=True,

+                    rpmsigs=False, owner=None, type=None):

      """Returns a list of rpms for specified tag

  

      set inherit=True to follow inheritance
@@ -1377,15 +1426,16 @@ 

      """

      taglist = [tag]

      if inherit:

-         #XXX really should cache this - it gets called several places

+         # XXX really should cache this - it gets called several places

          #   (however, it is fairly quick)

          taglist += [link['parent_id'] for link in readFullInheritance(tag, event)]

  

-     builds = readTaggedBuilds(tag, event=event, inherit=inherit, latest=latest, package=package, owner=owner, type=type)

-     #index builds

+     builds = readTaggedBuilds(tag, event=event, inherit=inherit, latest=latest, package=package,

+                               owner=owner, type=type)

+     # index builds

      build_idx = dict([(b['build_id'], b) for b in builds])

  

-     #the following query is run for each tag in the inheritance

+     # the following query is run for each tag in the inheritance

      fields = [('rpminfo.name', 'name'),

                ('rpminfo.version', 'version'),

                ('rpminfo.release', 'release'),
@@ -1399,11 +1449,11 @@ 

                ('rpminfo.build_id', 'build_id'),

                ('rpminfo.metadata_only', 'metadata_only'),

                ('rpminfo.extra', 'extra'),

-             ]

+               ]

      tables = ['rpminfo']

      joins = ['tag_listing ON rpminfo.build_id = tag_listing.build_id']

      clauses = [eventCondition(event, 'tag_listing'), 'tag_id=%(tagid)s']

-     data = {}  #tagid added later

+     data = {}  # tagid added later

      if package:

          joins.append('build ON rpminfo.build_id = build.id')

          joins.append('package ON package.id = build.pkg_id')
@@ -1429,20 +1479,21 @@ 

      # duplicate rpminfo entries, BUT since we make the query multiple times,

      # we can get duplicates if a package is multiply tagged.

      tags_seen = {}

+ 

      def _iter_rpms():

          for tagid in taglist:

              if tagid in tags_seen:

-                 #certain inheritance trees can (legitimately) have the same tag

-                 #appear more than once (perhaps once with a package filter and once

-                 #without). The hard part of that was already done by readTaggedBuilds.

-                 #We only need consider each tag once. Note how we use build_idx below.

-                 #(Without this, we could report the same rpm twice)

+                 # certain inheritance trees can (legitimately) have the same tag

+                 # appear more than once (perhaps once with a package filter and once

+                 # without). The hard part of that was already done by readTaggedBuilds.

+                 # We only need consider each tag once. Note how we use build_idx below.

+                 # (Without this, we could report the same rpm twice)

                  continue

              else:

                  tags_seen[tagid] = 1

              query.values['tagid'] = tagid

              for rpminfo in query.iterate():

-                 #note: we're checking against the build list because

+                 # note: we're checking against the build list because

                  # it has been filtered by the package list. The tag

                  # tools should endeavor to keep tag_listing sane w.r.t.

                  # the package list, but if there is disagreement the package
@@ -1451,11 +1502,12 @@ 

                  if build is None:

                      continue

                  elif build['tag_id'] != tagid:

-                     #wrong tag

+                     # wrong tag

                      continue

                  yield rpminfo

      return [_iter_rpms(), builds]

  

+ 

  def readTaggedArchives(tag, package=None, event=None, inherit=False, latest=True, type=None):

      """Returns a list of archives for specified tag

  
@@ -1469,16 +1521,17 @@ 

      """

      taglist = [tag]

      if inherit:

-         #XXX really should cache this - it gets called several places

+         # XXX really should cache this - it gets called several places

          #   (however, it is fairly quick)

          taglist += [link['parent_id'] for link in readFullInheritance(tag, event)]

  

      # If type == 'maven', we require that both the build *and* the archive have Maven metadata

-     builds = readTaggedBuilds(tag, event=event, inherit=inherit, latest=latest, package=package, type=type)

-     #index builds

+     builds = readTaggedBuilds(tag, event=event, inherit=inherit, latest=latest, package=package,

+                               type=type)

+     # index builds

      build_idx = dict([(b['build_id'], b) for b in builds])

  

-     #the following query is run for each tag in the inheritance

+     # the following query is run for each tag in the inheritance

      fields = [('archiveinfo.id', 'id'),

                ('archiveinfo.type_id', 'type_id'),

                ('archiveinfo.btype_id', 'btype_id'),
@@ -1491,7 +1544,7 @@ 

                ('archiveinfo.checksum_type', 'checksum_type'),

                ('archiveinfo.metadata_only', 'metadata_only'),

                ('archiveinfo.extra', 'extra'),

-              ]

+               ]

      tables = ['archiveinfo']

      joins = ['tag_listing ON archiveinfo.build_id = tag_listing.build_id',

               'btype ON archiveinfo.btype_id = btype.id']
@@ -1527,17 +1580,17 @@ 

      tags_seen = {}

      for tagid in taglist:

          if tagid in tags_seen:

-             #certain inheritance trees can (legitimately) have the same tag

-             #appear more than once (perhaps once with a package filter and once

-             #without). The hard part of that was already done by readTaggedBuilds.

-             #We only need consider each tag once. Note how we use build_idx below.

-             #(Without this, we could report the same rpm twice)

+             # certain inheritance trees can (legitimately) have the same tag

+             # appear more than once (perhaps once with a package filter and once

+             # without). The hard part of that was already done by readTaggedBuilds.

+             # We only need consider each tag once. Note how we use build_idx below.

+             # (Without this, we could report the same rpm twice)

              continue

          else:

              tags_seen[tagid] = 1

          query.values = {'tagid': tagid, 'package': package}

          for archiveinfo in query.execute():

-             #note: we're checking against the build list because

+             # note: we're checking against the build list because

              # it has been filtered by the package list. The tag

              # tools should endeavor to keep tag_listing sane w.r.t.

              # the package list, but if there is disagreement the package
@@ -1546,11 +1599,12 @@ 

              if build is None:

                  continue

              elif build['tag_id'] != tagid:

-                 #wrong tag

+                 # wrong tag

                  continue

              archives.append(archiveinfo)

      return [archives, builds]

  

+ 

  def check_tag_access(tag_id, user_id=None):

      """Determine if user has access to tag package with tag.

  
@@ -1576,11 +1630,13 @@ 

              return (False, override, "tag requires %s permission" % needed_perm)

      return (True, override, "")

  

+ 

  def assert_tag_access(tag_id, user_id=None, force=False):

      access, override, reason = check_tag_access(tag_id, user_id)

      if not access and not (override and force):

          raise koji.ActionNotAllowed(reason)

  

+ 

  def _tag_build(tag, build, user_id=None, force=False):

      """Tag a build

  
@@ -1600,7 +1656,7 @@ 

      else:

          # use the user associated with the current session

          user = get_user(context.session.user_id, strict=True)

-     #access check

+     # access check

      assert_tag_access(tag['id'], user_id=user_id, force=force)

      return _direct_tag_build(tag, build, user, force)

  
@@ -1621,21 +1677,21 @@ 

      table = 'tag_listing'

      clauses = ('tag_id=%(tag_id)i', 'build_id=%(build_id)i')

      query = QueryProcessor(columns=['build_id'], tables=[table],

-                            clauses=('active = TRUE',)+clauses,

-                            values=locals(), opts={'rowlock':True})

-     #note: tag_listing is unique on (build_id, tag_id, active)

+                            clauses=('active = TRUE',) + clauses,

+                            values=locals(), opts={'rowlock': True})

+     # note: tag_listing is unique on (build_id, tag_id, active)

      if query.executeOne():

-         #already tagged

+         # already tagged

          if not force:

              raise koji.TagError("build %s already tagged (%s)" % (nvr, tag['name']))

-         #otherwise we retag

+         # otherwise we retag

          retag = True

      if retag:

-         #revoke the old tag first

+         # revoke the old tag first

          update = UpdateProcessor(table, values=locals(), clauses=clauses)

          update.make_revoke(user_id=user_id)

          update.execute()

-     #tag the package

+     # tag the package

      insert = InsertProcessor(table)

      insert.set(tag_id=tag_id, build_id=build_id)

      insert.make_create(user_id=user_id)
@@ -1665,16 +1721,18 @@ 

  

  def _direct_untag_build(tag, build, user, strict=True, force=False):

      """Directly untag a build. No access check or value lookup."""

-     koji.plugin.run_callbacks('preUntag', tag=tag, build=build, user=user, force=force, strict=strict)

+     koji.plugin.run_callbacks(

+         'preUntag', tag=tag, build=build, user=user, force=force, strict=strict)

      values = {'tag_id': tag['id'], 'build_id': build['id']}

      update = UpdateProcessor('tag_listing', values=values,

-                 clauses=['tag_id=%(tag_id)i', 'build_id=%(build_id)i'])

+                              clauses=['tag_id=%(tag_id)i', 'build_id=%(build_id)i'])

      update.make_revoke(user_id=user['id'])

      count = update.execute()

      if count == 0 and strict:

          nvr = "%(name)s-%(version)s-%(release)s" % build

          raise koji.TagError("build %s not in tag %s" % (nvr, tag['name']))

-     koji.plugin.run_callbacks('postUntag', tag=tag, build=build, user=user, force=force, strict=strict)

+     koji.plugin.run_callbacks(

+         'postUntag', tag=tag, build=build, user=user, force=force, strict=strict)

  

  

  # tag-group operations
@@ -1687,7 +1745,7 @@ 

  

  def grplist_add(taginfo, grpinfo, block=False, force=False, **opts):

      """Add to (or update) group list for tag"""

-     #only admins....

+     # only admins....

      context.session.assertPerm('tag')

      _grplist_add(taginfo, grpinfo, block, force, **opts)

  
@@ -1702,13 +1760,13 @@ 

      previous = groups.get(group['id'], None)

      cfg_fields = ('exported', 'display_name', 'is_default', 'uservisible',

                    'description', 'langonly', 'biarchonly', 'blocked')

-     #prevent user-provided opts from doing anything strange

+     # prevent user-provided opts from doing anything strange

      opts = dslice(opts, cfg_fields, strict=False)

      if previous is not None:

-         #already there (possibly via inheritance)

+         # already there (possibly via inheritance)

          if previous['blocked'] and not force:

              raise koji.GenericError("group %s is blocked in tag %s" % (group['name'], tag['name']))

-         #check for duplication and grab old data for defaults

+         # check for duplication and grab old data for defaults

          changed = False

          for field in cfg_fields:

              old = previous[field]
@@ -1718,9 +1776,9 @@ 

              else:

                  opts[field] = old

          if not changed:

-             #no point in adding it again with the same data

+             # no point in adding it again with the same data

              return

-     #provide available defaults and sanity check data

+     # provide available defaults and sanity check data

      opts.setdefault('display_name', group['name'])

      opts.setdefault('biarchonly', False)

      opts.setdefault('exported', True)
@@ -1728,12 +1786,12 @@ 

      # XXX ^^^

      opts['tag_id'] = tag['id']

      opts['group_id'] = group['id']

-     #revoke old entry (if present)

+     # revoke old entry (if present)

      update = UpdateProcessor('group_config', values=opts,

-                 clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s'])

+                              clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s'])

      update.make_revoke()

      update.execute()

-     #add new entry

+     # add new entry

      insert = InsertProcessor('group_config', data=opts)

      insert.make_create()

      insert.execute()
@@ -1755,7 +1813,7 @@ 

      Really this shouldn't be used except in special cases

      Most of the time you really want to use the block or unblock functions

      """

-     #only admins....

+     # only admins....

      context.session.assertPerm('tag')

      _grplist_remove(taginfo, grpinfo, force)

  
@@ -1806,8 +1864,8 @@ 

      table = 'group_config'

      clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s')

      query = QueryProcessor(columns=['blocked'], tables=[table],

-                            clauses=('active = TRUE',)+clauses,

-                            values=locals(), opts={'rowlock':True})

+                            clauses=('active = TRUE',) + clauses,

+                            values=locals(), opts={'rowlock': True})

      blocked = query.singleValue(strict=False)

      if not blocked:

          raise koji.GenericError("group %s is NOT blocked in tag %s" % (group['name'], tag['name']))
@@ -1826,7 +1884,7 @@ 

  

  def grp_pkg_add(taginfo, grpinfo, pkg_name, block=False, force=False, **opts):

      """Add package to group for tag"""

-     #only admins....

+     # only admins....

      context.session.assertPerm('tag')

      _grp_pkg_add(taginfo, grpinfo, pkg_name, block, force, **opts)

  
@@ -1845,14 +1903,14 @@ 

          raise koji.GenericError("group %s is blocked in tag %s" % (group['name'], tag['name']))

      previous = grp_cfg['packagelist'].get(pkg_name, None)

      cfg_fields = ('type', 'basearchonly', 'requires')

-     #prevent user-provided opts from doing anything strange

+     # prevent user-provided opts from doing anything strange

      opts = dslice(opts, cfg_fields, strict=False)

      if previous is not None:

-         #already there (possibly via inheritance)

+         # already there (possibly via inheritance)

          if previous['blocked'] and not force:

-             raise koji.GenericError("package %s blocked in group %s, tag %s" \

-                     % (pkg_name, group['name'], tag['name']))

-         #check for duplication and grab old data for defaults

+             raise koji.GenericError("package %s blocked in group %s, tag %s"

+                                     % (pkg_name, group['name'], tag['name']))

+         # check for duplication and grab old data for defaults

          changed = False

          for field in cfg_fields:

              old = previous[field]
@@ -1862,23 +1920,25 @@ 

              else:

                  opts[field] = old

          if block:

-             #from condition above, either previous is not blocked or force is on,

-             #either way, we should add the entry

+             # from condition above, either previous is not blocked or force is on,

+             # either way, we should add the entry

              changed = True

          if not changed and not force:

-             #no point in adding it again with the same data (unless force is on)

+             # no point in adding it again with the same data (unless force is on)

              return

      opts.setdefault('type', 'mandatory')

      opts['group_id'] = group['id']

      opts['tag_id'] = tag['id']

      opts['package'] = pkg_name

      opts['blocked'] = block

-     #revoke old entry (if present)

+     # revoke old entry (if present)

      update = UpdateProcessor('group_package_listing', values=opts,

-                 clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s', 'package=%(package)s'])

+                              clauses=['group_id=%(group_id)s',

+                                       'tag_id=%(tag_id)s',

+                                       'package=%(package)s'])

      update.make_revoke()

      update.execute()

-     #add new entry

+     # add new entry

      insert = InsertProcessor('group_package_listing', data=opts)

      insert.make_create()

      insert.execute()
@@ -1890,7 +1950,7 @@ 

      Really this shouldn't be used except in special cases

      Most of the time you really want to use the block or unblock functions

      """

-     #only admins....

+     # only admins....

      context.session.assertPerm('tag')

      _grp_pkg_remove(taginfo, grpinfo, pkg_name, force)

  
@@ -1900,7 +1960,9 @@ 

      tag_id = get_tag_id(taginfo, strict=True)

      grp_id = get_group_id(grpinfo, strict=True)

      update = UpdateProcessor('group_package_listing', values=locals(),

-                 clauses=['package=%(pkg_name)s', 'tag_id=%(tag_id)s', 'group_id = %(grp_id)s'])

+                              clauses=['package=%(pkg_name)s',

+                                       'tag_id=%(tag_id)s',

+                                       'group_id = %(grp_id)s'])

      update.make_revoke()

      update.execute()

  
@@ -1928,12 +1990,12 @@ 

      grp_id = get_group_id(grpinfo, strict=True)

      clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'package = %(pkg_name)s')

      query = QueryProcessor(columns=['blocked'], tables=[table],

-                            clauses=('active = TRUE',)+clauses,

-                            values=locals(), opts={'rowlock':True})

+                            clauses=('active = TRUE',) + clauses,

+                            values=locals(), opts={'rowlock': True})

      blocked = query.singleValue(strict=False)

      if not blocked:

-         raise koji.GenericError("package %s is NOT blocked in group %s, tag %s" \

-                     % (pkg_name, grp_id, tag_id))

+         raise koji.GenericError("package %s is NOT blocked in group %s, tag %s"

+                                 % (pkg_name, grp_id, tag_id))

      update = UpdateProcessor('group_package_listing', values=locals(), clauses=clauses)

      update.make_revoke()

      update.execute()
@@ -1949,7 +2011,7 @@ 

  

  def grp_req_add(taginfo, grpinfo, reqinfo, block=False, force=False, **opts):

      """Add group requirement to group for tag"""

-     #only admins....

+     # only admins....

      context.session.assertPerm('tag')

      _grp_req_add(taginfo, grpinfo, reqinfo, block, force, **opts)

  
@@ -1969,14 +2031,14 @@ 

          raise koji.GenericError("group %s is blocked in tag %s" % (group['name'], tag['name']))

      previous = grp_cfg['grouplist'].get(req['id'], None)

      cfg_fields = ('type', 'is_metapkg')

-     #prevent user-provided opts from doing anything strange

+     # prevent user-provided opts from doing anything strange

      opts = dslice(opts, cfg_fields, strict=False)

      if previous is not None:

-         #already there (possibly via inheritance)

+         # already there (possibly via inheritance)

          if previous['blocked'] and not force:

-             raise koji.GenericError("requirement on group %s blocked in group %s, tag %s" \

-                     % (req['name'], group['name'], tag['name']))

-         #check for duplication and grab old data for defaults

+             raise koji.GenericError("requirement on group %s blocked in group %s, tag %s"

+                                     % (req['name'], group['name'], tag['name']))

+         # check for duplication and grab old data for defaults

          changed = False

          for field in cfg_fields:

              old = previous[field]
@@ -1986,23 +2048,25 @@ 

              else:

                  opts[field] = old

          if block:

-             #from condition above, either previous is not blocked or force is on,

-             #either way, we should add the entry

+             # from condition above, either previous is not blocked or force is on,

+             # either way, we should add the entry

              changed = True

          if not changed:

-             #no point in adding it again with the same data

+             # no point in adding it again with the same data

              return

      opts.setdefault('type', 'mandatory')

      opts['group_id'] = group['id']

      opts['tag_id'] = tag['id']

      opts['req_id'] = req['id']

      opts['blocked'] = block

-     #revoke old entry (if present)

+     # revoke old entry (if present)

      update = UpdateProcessor('group_req_listing', values=opts,

-                 clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s', 'req_id=%(req_id)s'])

+                              clauses=['group_id=%(group_id)s',

+                                       'tag_id=%(tag_id)s',

+                                       'req_id=%(req_id)s'])

      update.make_revoke()

      update.execute()

-     #add new entry

+     # add new entry

      insert = InsertProcessor('group_req_listing', data=opts)

      insert.make_create()

      insert.execute()
@@ -2014,7 +2078,7 @@ 

      Really this shouldn't be used except in special cases

      Most of the time you really want to use the block or unblock functions

      """

-     #only admins....

+     # only admins....

      context.session.assertPerm('tag')

      _grp_req_remove(taginfo, grpinfo, reqinfo, force)

  
@@ -2025,7 +2089,9 @@ 

      grp_id = get_group_id(grpinfo, strict=True)

      req_id = get_group_id(reqinfo, strict=True)

      update = UpdateProcessor('group_req_listing', values=locals(),

-                 clauses=['req_id=%(req_id)s', 'tag_id=%(tag_id)s', 'group_id = %(grp_id)s'])

+                              clauses=['req_id=%(req_id)s',

+                                       'tag_id=%(tag_id)s',

+                                       'group_id = %(grp_id)s'])

      update.make_revoke()

      update.execute()

  
@@ -2055,12 +2121,12 @@ 

  

      clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'req_id = %(req_id)s')

      query = QueryProcessor(columns=['blocked'], tables=[table],

-                            clauses=('active = TRUE',)+clauses,

-                            values=locals(), opts={'rowlock':True})

+                            clauses=('active = TRUE',) + clauses,

+                            values=locals(), opts={'rowlock': True})

      blocked = query.singleValue(strict=False)

      if not blocked:

-         raise koji.GenericError("group req %s is NOT blocked in group %s, tag %s" \

-                     % (req_id, grp_id, tag_id))

+         raise koji.GenericError("group req %s is NOT blocked in group %s, tag %s"

+                                 % (req_id, grp_id, tag_id))

      update = UpdateProcessor('group_req_listing', values=locals(), clauses=clauses)

      update.make_revoke()

      update.execute()
@@ -2111,11 +2177,11 @@ 

              for grp_pkg in _multiRow(q, locals(), fields):

                  grp_id = grp_pkg['group_id']

                  if grp_id not in groups:

-                     #tag does not have this group

+                     # tag does not have this group

                      continue

                  group = groups[grp_id]

                  if group['blocked']:

-                     #ignore blocked groups

+                     # ignore blocked groups

                      continue

                  pkg_name = grp_pkg['package']

                  group['packagelist'].setdefault(pkg_name, grp_pkg)
@@ -2132,24 +2198,26 @@ 

              for grp_req in _multiRow(q, locals(), fields):

                  grp_id = grp_req['group_id']

                  if grp_id not in groups:

-                     #tag does not have this group

+                     # tag does not have this group

                      continue

                  group = groups[grp_id]

                  if group['blocked']:

-                     #ignore blocked groups

+                     # ignore blocked groups

                      continue

                  req_id = grp_req['req_id']

                  if req_id not in groups:

-                     #tag does not have this group

+                     # tag does not have this group

                      continue

                  elif groups[req_id]['blocked']:

-                     #ignore blocked groups

+                     # ignore blocked groups

                      continue

                  group['grouplist'].setdefault(req_id, grp_req)

  

      return groups

  

- def readTagGroups(tag, event=None, inherit=True, incl_pkgs=True, incl_reqs=True, incl_blocked=False):

+ 

+ def readTagGroups(tag, event=None, inherit=True, incl_pkgs=True, incl_reqs=True,

+                   incl_blocked=False):

      """Return group data for the tag with blocked entries removed

  

      Also scrubs data into an xmlrpc-safe format (no integer keys)
@@ -2159,23 +2227,26 @@ 

      groups = get_tag_groups(tag, event, inherit, incl_pkgs, incl_reqs)

      groups = to_list(groups.values())

      for group in groups:

-         #filter blocked entries and collapse to a list

+         # filter blocked entries and collapse to a list

          if 'packagelist' in group:

              if incl_blocked:

                  group['packagelist'] = to_list(group['packagelist'].values())

              else:

-                 group['packagelist'] = [x for x in group['packagelist'].values() if not x['blocked']]

+                 group['packagelist'] = [x for x in group['packagelist'].values()

+                                         if not x['blocked']]

          if 'grouplist' in group:

              if incl_blocked:

                  group['grouplist'] = to_list(group['grouplist'].values())

              else:

-                 group['grouplist'] = [x for x in group['grouplist'].values() if not x['blocked']]

-     #filter blocked entries and collapse to a list

+                 group['grouplist'] = [x for x in group['grouplist'].values()

+                                       if not x['blocked']]

+     # filter blocked entries and collapse to a list

      if incl_blocked:

          return groups

      else:

          return [x for x in groups if not x['blocked']]

  

+ 

  def set_host_enabled(hostname, enabled=True):

      context.session.assertPerm('host')

      host = get_host(hostname)
@@ -2192,6 +2263,7 @@ 

      insert.make_create()

      insert.execute()

  

+ 

  def add_host_to_channel(hostname, channel_name, create=False):

      """Add the host to the specified channel

  
@@ -2199,29 +2271,31 @@ 

      """

      context.session.assertPerm('host')

      host = get_host(hostname)

-     if host == None:

+     if host is None:

          raise koji.GenericError('host does not exist: %s' % hostname)

      host_id = host['id']

      channel_id = get_channel_id(channel_name, create=create)

-     if channel_id == None:

+     if channel_id is None:

          raise koji.GenericError('channel does not exist: %s' % channel_name)

      channels = list_channels(host_id)

      for channel in channels:

          if channel['id'] == channel_id:

-             raise koji.GenericError('host %s is already subscribed to the %s channel' % (hostname, channel_name))

+             raise koji.GenericError('host %s is already subscribed to the %s channel' %

+                                     (hostname, channel_name))

      insert = InsertProcessor('host_channels')

      insert.set(host_id=host_id, channel_id=channel_id)

      insert.make_create()

      insert.execute()

  

+ 

  def remove_host_from_channel(hostname, channel_name):

      context.session.assertPerm('host')

      host = get_host(hostname)

-     if host == None:

+     if host is None:

          raise koji.GenericError('host does not exist: %s' % hostname)

      host_id = host['id']

      channel_id = get_channel_id(channel_name)

-     if channel_id == None:

+     if channel_id is None:

          raise koji.GenericError('channel does not exist: %s' % channel_name)

      found = False

      channels = list_channels(host_id)
@@ -2230,7 +2304,8 @@ 

              found = True

              break

      if not found:

-         raise koji.GenericError('host %s is not subscribed to the %s channel' % (hostname, channel_name))

+         raise koji.GenericError('host %s is not subscribed to the %s channel' %

+                                 (hostname, channel_name))

  

      values = {'host_id': host_id, 'channel_id': channel_id}

      clauses = ['host_id = %(host_id)i AND channel_id = %(channel_id)i']
@@ -2252,6 +2327,7 @@ 

      update.set(name=new)

      update.execute()

  

+ 

  def remove_channel(channel_name, force=False):

      """Remove a channel

  
@@ -2266,12 +2342,12 @@ 

      channel_id = get_channel_id(channel_name, strict=True)

      # check for task references

      query = QueryProcessor(tables=['task'], clauses=['channel_id=%(channel_id)i'],

-                     values=locals(), columns=['id'], opts={'limit':1})

-     #XXX slow query

+                            values=locals(), columns=['id'], opts={'limit': 1})

+     # XXX slow query

      if query.execute():

          raise koji.GenericError('channel %s has task references' % channel_name)

      query = QueryProcessor(tables=['host_channels'], clauses=['channel_id=%(channel_id)i'],

-                     values=locals(), columns=['host_id'], opts={'limit':1})

+                            values=locals(), columns=['host_id'], opts={'limit': 1})

      if query.execute():

          if not force:

              raise koji.GenericError('channel %s has host references' % channel_name)
@@ -2280,6 +2356,7 @@ 

      delete = """DELETE FROM channels WHERE id=%(channel_id)i"""

      _dml(delete, locals())

  

+ 

  def get_ready_hosts():

      """Return information about hosts that are ready to build.

  
@@ -2309,6 +2386,7 @@ 

          host['channels'] = [row[0] for row in c.fetchall()]

      return hosts

  

+ 

  def get_all_arches():

      """Return a list of all (canonical) arches available from hosts"""

      ret = {}
@@ -2316,11 +2394,12 @@ 

          if arches is None:

              continue

          for arch in arches.split():

-             #in a perfect world, this list would only include canonical

-             #arches, but not all admins will undertand that.

+             # in a perfect world, this list would only include canonical

+             # arches, but not all admins will undertand that.

              ret[koji.canonArch(arch)] = 1

      return to_list(ret.keys())

  

+ 

  def get_active_tasks(host=None):

      """Return data on tasks that are yet to be run"""

      fields = ['id', 'state', 'channel_id', 'host_id', 'arch', 'method', 'priority', 'create_time']
@@ -2336,13 +2415,14 @@ 

          clauses = [clause]

      else:

          clauses = ['state IN (%(FREE)i,%(ASSIGNED)i)']

-     queryOpts = {'limit' : 100, 'order' : 'priority,create_time'}

+     queryOpts = {'limit': 100, 'order': 'priority,create_time'}

      query = QueryProcessor(columns=fields, tables=['task'], clauses=clauses,

                             values=values, opts=queryOpts)

      return query.execute()

  

+ 

  def get_task_descendents(task, childMap=None, request=False):

-     if childMap == None:

+     if childMap is None:

          childMap = {}

      children = task.getChildren(request=request)

      children.sort(key=lambda x: x['id'])
@@ -2352,6 +2432,7 @@ 

          get_task_descendents(Task(child['id']), childMap, request)

      return childMap

  

+ 

  def maven_tag_archives(tag_id, event_id=None, inherit=True):

      """

      Get Maven artifacts associated with the given tag, following inheritance.
@@ -2400,6 +2481,7 @@ 

      # group_id/artifact_id/version/build_id/archive_id, which is much smaller than

      # the full query

      # ballpark estimate: 20-25% of total, less with heavy duplication of indexed values

+ 

      def _iter_archives():

          for tag_id in taglist:

              taginfo = get_tag(tag_id, strict=True, event=event_id)
@@ -2446,6 +2528,7 @@ 

                      yield archive

      return _iter_archives()

  

+ 

  def repo_init(tag, with_src=False, with_debuginfo=False, event=None, with_separate_src=False):

      """Create a new repo entry in the INIT state, return full repo data

  
@@ -2455,8 +2538,9 @@ 

      logger = logging.getLogger("koji.hub.repo_init")

      state = koji.REPO_INIT

      tinfo = get_tag(tag, strict=True, event=event)

-     koji.plugin.run_callbacks('preRepoInit', tag=tinfo, with_src=with_src, with_debuginfo=with_debuginfo,

-                               event=event, repo_id=None, with_separate_src=with_separate_src)

+     koji.plugin.run_callbacks('preRepoInit', tag=tinfo, with_src=with_src,

+                               with_debuginfo=with_debuginfo, event=event, repo_id=None,

+                               with_separate_src=with_separate_src)

      tag_id = tinfo['id']

      repo_arches = {}

      if with_separate_src:
@@ -2471,7 +2555,7 @@ 

      if event is None:

          event_id = _singleValue("SELECT get_event()")

      else:

-         #make sure event is valid

+         # make sure event is valid

          q = "SELECT time FROM events WHERE id=%(event)s"

          event_time = _singleValue(q, locals(), strict=True)

          event_id = event
@@ -2485,12 +2569,12 @@ 

      #       see https://pagure.io/koji/issue/588 for background

      rpms, builds = readTaggedRPMS(tag_id, event=event_id, inherit=True, latest=latest)

      groups = readTagGroups(tag_id, event=event_id, inherit=True)

-     blocks = [pkg for pkg in readPackageList(tag_id, event=event_id, inherit=True).values() \

-                   if pkg['blocked']]

+     blocks = [pkg for pkg in readPackageList(tag_id, event=event_id, inherit=True).values()

+               if pkg['blocked']]

      repodir = koji.pathinfo.repo(repo_id, tinfo['name'])

-     os.makedirs(repodir)  #should not already exist

+     os.makedirs(repodir)  # should not already exist

  

-     #generate comps and groups.spec

+     # generate comps and groups.spec

      groupsdir = "%s/groups" % (repodir)

      koji.ensuredir(groupsdir)

      comps = koji.generate_comps(groups, expand_groups=True)
@@ -2499,24 +2583,24 @@ 

  

      # write repo info to disk

      repo_info = {

-             'id': repo_id,

-             'tag': tinfo['name'],

-             'tag_id': tinfo['id'],

-             'event_id': event_id,

-             'with_src': with_src,

-             'with_separate_src': with_separate_src,

-             'with_debuginfo': with_debuginfo,

-             }

+         'id': repo_id,

+         'tag': tinfo['name'],

+         'tag_id': tinfo['id'],

+         'event_id': event_id,

+         'with_src': with_src,

+         'with_separate_src': with_separate_src,

+         'with_debuginfo': with_debuginfo,

+     }

      with open('%s/repo.json' % repodir, 'w') as fp:

          json.dump(repo_info, fp, indent=2)

  

-     #get build dirs

+     # get build dirs

      relpathinfo = koji.PathInfo(topdir='toplink')

      builddirs = {}

      for build in builds:

          relpath = relpathinfo.build(build)

          builddirs[build['id']] = relpath.lstrip('/')

-     #generate pkglist files

+     # generate pkglist files

      pkglist = {}

      for repoarch in repo_arches:

          archdir = joinpath(repodir, repoarch)
@@ -2526,7 +2610,7 @@ 

          top_link = joinpath(archdir, 'toplink')

          os.symlink(top_relpath, top_link)

          pkglist[repoarch] = open(joinpath(archdir, 'pkglist'), 'w')

-     #NOTE - rpms is now an iterator

+     # NOTE - rpms is now an iterator

      for rpminfo in rpms:

          if not with_debuginfo and koji.is_debuginfo(rpminfo['name']):

              continue
@@ -2552,7 +2636,7 @@ 

      for repoarch in repo_arches:

          pkglist[repoarch].close()

  

-     #write blocked package lists

+     # write blocked package lists

      for repoarch in repo_arches:

          blocklist = open(joinpath(repodir, repoarch, 'blocklist'), 'w')

          for pkg in blocks:
@@ -2569,7 +2653,7 @@ 

                           'release': archive['build_release'],

                           'epoch': archive['build_epoch'],

                           'volume_name': archive['volume_name'],

-                         }

+                          }

              srcdir = joinpath(koji.pathinfo.mavenbuild(buildinfo),

                                koji.pathinfo.mavenrepo(archive))

              destlink = joinpath(repodir, 'maven',
@@ -2582,21 +2666,23 @@ 

          created_dirs = set()

          for srcdir, destlink in dir_links:

              dest_parent = os.path.dirname(destlink)

-             if not dest_parent in created_dirs:

+             if dest_parent not in created_dirs:

                  koji.ensuredir(dest_parent)

                  created_dirs.add(dest_parent)

              relpath = os.path.relpath(srcdir, dest_parent)

              try:

                  os.symlink(relpath, destlink)

-             except:

+             except Exception:

                  log_error('Error linking %s to %s' % (destlink, relpath))

          for artifact_dir, artifacts in six.iteritems(artifact_dirs):

              _write_maven_repo_metadata(artifact_dir, artifacts)

  

-     koji.plugin.run_callbacks('postRepoInit', tag=tinfo, with_src=with_src, with_debuginfo=with_debuginfo,

-                               event=event, repo_id=repo_id, with_separate_src=with_separate_src)

+     koji.plugin.run_callbacks('postRepoInit', tag=tinfo, with_src=with_src,

+                               with_debuginfo=with_debuginfo, event=event, repo_id=repo_id,

+                               with_separate_src=with_separate_src)

      return [repo_id, event_id]

  

+ 

  def _write_maven_repo_metadata(destdir, artifacts):

      # Sort the list so that the highest version number comes last.

      # group_id and artifact_id should be the same for all entries,
@@ -2629,6 +2715,7 @@ 

          mdfile.write(contents)

      _generate_maven_metadata(destdir)

  

+ 

  def dist_repo_init(tag, keys, task_opts):

      """Create a new repo entry in the INIT state, return full repo data"""

      state = koji.REPO_INIT
@@ -2641,15 +2728,15 @@ 

      arches = list(set([koji.canonArch(a) for a in task_opts['arch']]))

      # note: we need to match args from the other preRepoInit callback

      koji.plugin.run_callbacks('preRepoInit', tag=tinfo, with_src=False,

-             with_debuginfo=False, event=event, repo_id=None,

-             dist=True, keys=keys, arches=arches, task_opts=task_opts,

-             with_separate_src=False)

+                               with_debuginfo=False, event=event, repo_id=None,

+                               dist=True, keys=keys, arches=arches, task_opts=task_opts,

+                               with_separate_src=False)

      if not event:

          event = get_event()

      repo_id = nextval('repo_id_seq')

      insert = InsertProcessor('repo')

      insert.set(id=repo_id, create_event=event, tag_id=tag_id,

-         state=state, dist=True)

+                state=state, dist=True)

      insert.execute()

      repodir = koji.pathinfo.distrepo(repo_id, tinfo['name'], volume=volume)

      for arch in arches:
@@ -2665,23 +2752,23 @@ 

          groupsdir = joinpath(repodir, 'groups')

          koji.ensuredir(groupsdir)

          shutil.copyfile(joinpath(koji.pathinfo.work(),

-             task_opts['comps']), groupsdir + '/comps.xml')

+                                  task_opts['comps']), groupsdir + '/comps.xml')

      # write repo info to disk

      repo_info = {

-             'id': repo_id,

-             'tag': tinfo['name'],

-             'tag_id': tinfo['id'],

-             'keys': keys,

-             'volume': volume,

-             'task_opts': task_opts,

-             }

+         'id': repo_id,

+         'tag': tinfo['name'],

+         'tag_id': tinfo['id'],

+         'keys': keys,

+         'volume': volume,

+         'task_opts': task_opts,

+     }

      with open('%s/repo.json' % repodir, 'w') as fp:

          json.dump(repo_info, fp, indent=2)

      # note: we need to match args from the other postRepoInit callback

      koji.plugin.run_callbacks('postRepoInit', tag=tinfo, with_src=False,

-             with_debuginfo=False, event=event, repo_id=repo_id,

-             dist=True, keys=keys, arches=arches, task_opts=task_opts,

-             repodir=repodir, with_reparate_src=False)

+                               with_debuginfo=False, event=event, repo_id=repo_id,

+                               dist=True, keys=keys, arches=arches, task_opts=task_opts,

+                               repodir=repodir, with_reparate_src=False)

      return repo_id, event

  

  
@@ -2692,17 +2779,18 @@ 

          q = """SELECT state FROM repo WHERE id = %(repo_id)s FOR UPDATE"""

          oldstate = _singleValue(q, locals())

          if oldstate > state:

-             raise koji.GenericError("Invalid repo state transition %s->%s" \

-                     % (oldstate, state))

+             raise koji.GenericError("Invalid repo state transition %s->%s"

+                                     % (oldstate, state))

      q = """UPDATE repo SET state=%(state)s WHERE id = %(repo_id)s"""

      _dml(q, locals())

  

+ 

  def repo_info(repo_id, strict=False):

      fields = (

          ('repo.id', 'id'),

          ('repo.state', 'state'),

          ('repo.create_event', 'create_event'),

-         ('events.time', 'creation_time'),  #for compatibility with getRepo

+         ('events.time', 'creation_time'),  # for compatibility with getRepo

          ('EXTRACT(EPOCH FROM events.time)', 'create_ts'),

          ('repo.tag_id', 'tag_id'),

          ('tag.name', 'tag_name'),
@@ -2714,23 +2802,27 @@ 

      WHERE repo.id = %%(repo_id)s""" % ','.join([f[0] for f in fields])

      return _singleRow(q, locals(), [f[1] for f in fields], strict=strict)

  

+ 

  def repo_ready(repo_id):

      """Set repo state to ready"""

      repo_set_state(repo_id, koji.REPO_READY)

  

+ 

  def repo_expire(repo_id):

      """Set repo state to expired"""

      repo_set_state(repo_id, koji.REPO_EXPIRED)

  

+ 

  def repo_problem(repo_id):

      """Set repo state to problem"""

      repo_set_state(repo_id, koji.REPO_PROBLEM)

  

+ 

  def repo_delete(repo_id):

      """Attempt to mark repo deleted, return number of references

  

      If the number of references is nonzero, no change is made"""

-     #get a row lock on the repo

+     # get a row lock on the repo

      q = """SELECT state FROM repo WHERE id = %(repo_id)s FOR UPDATE"""

      _singleValue(q, locals())

      references = repo_references(repo_id)
@@ -2745,9 +2837,9 @@ 

      If dist is not None, then only expire repos with the given dist value

      """

      st_ready = koji.REPO_READY

-     clauses=['tag_id = %(tag_id)s',

-              'create_event < %(event_id)s',

-              'state = %(st_ready)s']

+     clauses = ['tag_id = %(tag_id)s',

+                'create_event < %(event_id)s',

+                'state = %(st_ready)s']

      if dist is not None:

          dist = bool(dist)

          clauses.append('dist = %(dist)s')
@@ -2767,8 +2859,8 @@ 

      values = {'repo_id': repo_id}

      clauses = ['repo_id=%(repo_id)s', 'retire_event IS NULL']

      query = QueryProcessor(columns=fields, aliases=aliases, tables=['standard_buildroot'],

-                 clauses=clauses, values=values)

-     #check results for bad states

+                            clauses=clauses, values=values)

+     # check results for bad states

      ret = []

      for data in query.execute():

          if data['state'] == koji.BR_STATES['EXPIRED']:
@@ -2797,7 +2889,7 @@ 

      joins = ['tag ON repo.tag_id=tag.id', 'events ON repo.create_event = events.id']

      clauses = ['repo.state != %(st_deleted)s']

      query = QueryProcessor(columns=fields, aliases=aliases, tables=['repo'],

-                 joins=joins, clauses=clauses, values=values)

+                            joins=joins, clauses=clauses, values=values)

      return query.execute()

  

  
@@ -2813,14 +2905,14 @@ 

      Returns: True or False

      """

      data = locals().copy()

-     #first check the tag_updates table

+     # first check the tag_updates table

      clauses = ['update_event > %(event)i', 'tag_id IN %(taglist)s']

      query = QueryProcessor(tables=['tag_updates'], columns=['id'],

-                             clauses=clauses, values=data,

-                             opts={'limit': 1})

+                            clauses=clauses, values=data,

+                            opts={'limit': 1})

      if query.execute():

          return True

-     #also check these versioned tables

+     # also check these versioned tables

      tables = (

          'tag_listing',

          'tag_inheritance',
@@ -2835,11 +2927,12 @@ 

                 'tag_id IN %(taglist)s']

      for table in tables:

          query = QueryProcessor(tables=[table], columns=['tag_id'], clauses=clauses,

-                                 values=data, opts={'limit': 1})

+                                values=data, opts={'limit': 1})

          if query.execute():

              return True

      return False

  

+ 

  def set_tag_update(tag_id, utype, event_id=None, user_id=None):

      """Record a non-versioned tag update"""

      utype_id = koji.TAG_UPDATE_TYPES.getnum(utype)
@@ -2855,6 +2948,7 @@ 

      insert = InsertProcessor('tag_updates', data=data)

      insert.execute()

  

+ 

  def _validate_build_target_name(name):

      """ A helper function that validates a build target name. """

      max_name_length = 256
@@ -2890,8 +2984,8 @@ 

          raise koji.GenericError("destination tag '%s' does not exist" % dest_tag)

      dest_tag = dest_tag_object['id']

  

-     #build targets are versioned, so if the target has previously been deleted, it

-     #is possible the name is in the system

+     # build targets are versioned, so if the target has previously been deleted, it

+     # is possible the name is in the system

      id = get_build_target_id(name, create=True)

  

      insert = InsertProcessor('build_target_config')
@@ -2940,7 +3034,7 @@ 

          _dml(rename, locals())

  

      update = UpdateProcessor('build_target_config', values=locals(),

-                 clauses=["build_target_id = %(buildTargetID)i"])

+                              clauses=["build_target_id = %(buildTargetID)i"])

      update.make_revoke()

  

      insert = InsertProcessor('build_target_config')
@@ -2969,7 +3063,7 @@ 

      # build targets are versioned, so we do not delete them from the db

      # instead we revoke the config entry

      update = UpdateProcessor('build_target_config', values=locals(),

-                 clauses=["build_target_id = %(targetID)i"])

+                              clauses=["build_target_id = %(targetID)i"])

      update.make_revoke()

      update.execute()

  
@@ -2998,9 +3092,9 @@ 

              clauses.append('build_target.id = %(info)i')

          else:

              raise koji.GenericError('invalid type for lookup: %s' % type(info))

-     if buildTagID != None:

+     if buildTagID is not None:

          clauses.append('build_tag = %(buildTagID)i')

-     if destTagID != None:

+     if destTagID is not None:

          clauses.append('dest_tag = %(destTagID)i')

  

      query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields],
@@ -3008,6 +3102,7 @@ 

                             values=locals(), opts=queryOpts)

      return query.execute()

  

+ 

  def get_build_target(info, event=None, strict=False):

      """Return the build target with the given name or ID.

      If there is no matching build target, return None."""
@@ -3019,6 +3114,7 @@ 

      else:

          return None

  

+ 

  def lookup_name(table, info, strict=False, create=False):

      """Find the id and name in the table associated with info.

  
@@ -3041,7 +3137,7 @@ 

          q = """SELECT id,name FROM %s WHERE id=%%(info)d""" % table

      elif isinstance(info, str):

          q = """SELECT id,name FROM %s WHERE name=%%(info)s""" % table

-     elif six.PY2 and isinstance(info, unicode):

+     elif six.PY2 and isinstance(info, unicode):  # noqa: F821

          info = koji.fixEncoding(info)

          q = """SELECT id,name FROM %s WHERE name=%%(info)s""" % table

      else:
@@ -3061,6 +3157,7 @@ 

              return ret

      return ret

  

+ 

  def get_id(table, info, strict=False, create=False):

      """Find the id in the table associated with info."""

      data = lookup_name(table, info, strict, create)
@@ -3069,62 +3166,76 @@ 

      else:

          return data['id']

  

+ 

  def get_tag_id(info, strict=False, create=False):

      """Get the id for tag"""

      return get_id('tag', info, strict, create)

  

+ 

  def lookup_tag(info, strict=False, create=False):

      """Get the id,name for tag"""

      return lookup_name('tag', info, strict, create)

  

+ 

  def get_perm_id(info, strict=False, create=False):

      """Get the id for a permission"""

      return get_id('permissions', info, strict, create)

  

+ 

  def lookup_perm(info, strict=False, create=False):

      """Get the id,name for perm"""

      return lookup_name('permissions', info, strict, create)

  

+ 

  def get_package_id(info, strict=False, create=False):

      """Get the id for a package"""

      return get_id('package', info, strict, create)

  

+ 

  def lookup_package(info, strict=False, create=False):

      """Get the id,name for package"""

      return lookup_name('package', info, strict, create)

  

+ 

  def get_channel_id(info, strict=False, create=False):

      """Get the id for a channel"""

      return get_id('channels', info, strict, create)

  

+ 

  def lookup_channel(info, strict=False, create=False):

      """Get the id,name for channel"""

      return lookup_name('channels', info, strict, create)

  

+ 

  def get_group_id(info, strict=False, create=False):

      """Get the id for a group"""

      return get_id('groups', info, strict, create)

  

+ 

  def lookup_group(info, strict=False, create=False):

      """Get the id,name for group"""

      return lookup_name('groups', info, strict, create)

  

+ 

  def get_build_target_id(info, strict=False, create=False):

      """Get the id for a build target"""

      return get_id('build_target', info, strict, create)

  

+ 

  def lookup_build_target(info, strict=False, create=False):

      """Get the id,name for build target"""

      return lookup_name('build_target', info, strict, create)

  

  

- def create_tag(name, parent=None, arches=None, perm=None, locked=False, maven_support=False, maven_include_all=False, extra=None):

+ def create_tag(name, parent=None, arches=None, perm=None, locked=False, maven_support=False,

+                maven_include_all=False, extra=None):

      """Create a new tag"""

      context.session.assertPerm('tag')

      return _create_tag(name, parent, arches, perm, locked, maven_support, maven_include_all, extra)

  

  

- def _create_tag(name, parent=None, arches=None, perm=None, locked=False, maven_support=False, maven_include_all=False, extra=None):

+ def _create_tag(name, parent=None, arches=None, perm=None, locked=False, maven_support=False,

+                 maven_include_all=False, extra=None):

      """Create a new tag, without access check"""

  

      max_name_length = 256
@@ -3137,7 +3248,7 @@ 

      if not context.opts.get('EnableMaven') and (maven_support or maven_include_all):

          raise koji.GenericError("Maven support not enabled")

  

-     #see if there is already a tag by this name (active)

+     # see if there is already a tag by this name (active)

      if get_tag(name):

          raise koji.GenericError("A tag with the name '%s' already exists" % name)

  
@@ -3150,7 +3261,7 @@ 

      else:

          parent_id = None

  

-     #there may already be an id for a deleted tag, this will reuse it

+     # there may already be an id for a deleted tag, this will reuse it

      tag_id = get_tag_id(name, create=True)

  

      insert = InsertProcessor('tag_config')
@@ -3182,6 +3293,7 @@ 

  

      return tag_id

  

+ 

  def get_tag(tagInfo, strict=False, event=None):

      """Get tag information based on the tagInfo.  tagInfo may be either

      a string (the tag name) or an int (the tag ID).
@@ -3216,7 +3328,7 @@ 

                'tag_config.locked': 'locked',

                'tag_config.maven_support': 'maven_support',

                'tag_config.maven_include_all': 'maven_include_all'

-              }

+               }

      clauses = [eventCondition(event, table='tag_config')]

      if isinstance(tagInfo, six.integer_types):

          clauses.append("tag.id = %(tagInfo)i")
@@ -3282,7 +3394,7 @@ 

  def _edit_tag(tagInfo, **kwargs):

      """Edit information for an existing tag."""

      if not context.opts.get('EnableMaven') \

-                 and dslice(kwargs, ['maven_support', 'maven_include_all'], strict=False):

+             and dslice(kwargs, ['maven_support', 'maven_include_all'], strict=False):

          raise koji.GenericError("Maven support not enabled")

  

      tag = get_tag(tagInfo, strict=True)
@@ -3296,8 +3408,8 @@ 

  

      name = kwargs.get('name')

      if name and tag['name'] != name:

-         #attempt to update tag name

-         #XXX - I'm not sure we should allow this sort of renaming anyway.

+         # attempt to update tag name

+         # XXX - I'm not sure we should allow this sort of renaming anyway.

          # while I can see the convenience, it is an untracked change (granted

          # a cosmetic one). The more versioning-friendly way would be to create

          # a new tag with duplicate data and revoke the old tag. This is more
@@ -3305,11 +3417,11 @@ 

          values = {

              'name': name,

              'tagID': tag['id']

-             }

+         }

          q = """SELECT id FROM tag WHERE name=%(name)s"""

          id = _singleValue(q, values, strict=False)

          if id is not None:

-             #new name is taken

+             # new name is taken

              raise koji.GenericError("Name %s already taken by tag %s" % (name, id))

          update = """UPDATE tag

  SET name = %(name)s
@@ -3321,7 +3433,7 @@ 

      if arches and tag['arches'] != arches:

          kwargs['arches'] = koji.parse_arches(arches, strict=True, allow_none=True)

  

-     #check for changes

+     # check for changes

      data = tag.copy()

      changed = False

      for key in ('perm_id', 'arches', 'locked', 'maven_support', 'maven_include_all'):
@@ -3345,7 +3457,8 @@ 

          if 'remove_extra' in kwargs:

              for removed in kwargs['remove_extra']:

                  if removed in kwargs['extra']:

-                     raise koji.GenericError("Can not both add/update and remove tag-extra: '%s'" % removed)

+                     raise koji.GenericError("Can not both add/update and remove tag-extra: '%s'" %

+                                             removed)

          for key in kwargs['extra']:

              value = kwargs['extra'][key]

              if key not in tag['extra'] or tag['extra'][key] != value:
@@ -3355,7 +3468,8 @@ 

                      'value': json.dumps(kwargs['extra'][key]),

                  }

                  # revoke old entry, if any

-                 update = UpdateProcessor('tag_extra', values=data, clauses=['tag_id = %(tag_id)i', 'key=%(key)s'])

+                 update = UpdateProcessor('tag_extra', values=data, clauses=['tag_id = %(tag_id)i',

+                                                                             'key=%(key)s'])

                  update.make_revoke()

                  update.execute()

                  # add new entry
@@ -3367,14 +3481,16 @@ 

      if 'remove_extra' in kwargs:

          ne = [e for e in kwargs['remove_extra'] if e not in tag['extra']]

          if ne:

-             raise koji.GenericError("Tag: %s doesn't have extra: %s" % (tag['name'], ', '.join(ne)))

+             raise koji.GenericError("Tag: %s doesn't have extra: %s" %

+                                     (tag['name'], ', '.join(ne)))

          for key in kwargs['remove_extra']:

              data = {

                  'tag_id': tag['id'],

                  'key': key,

              }

              # revoke old entry

-             update = UpdateProcessor('tag_extra', values=data, clauses=['tag_id = %(tag_id)i', 'key=%(key)s'])

+             update = UpdateProcessor('tag_extra', values=data, clauses=['tag_id = %(tag_id)i',

+                                                                         'key=%(key)s'])

              update.make_revoke()

              update.execute()

  
@@ -3394,11 +3510,11 @@ 

  def _delete_tag(tagInfo):

      """Delete the specified tag."""

  

-     #We do not ever DELETE tag data. It is versioned -- we revoke it instead.

+     # We do not ever DELETE tag data. It is versioned -- we revoke it instead.

  

      def _tagDelete(tableName, value, columnName='tag_id'):

          update = UpdateProcessor(tableName, clauses=["%s = %%(value)i" % columnName],

-                     values={'value':value})

+                                  values={'value': value})

          update.make_revoke()

          update.execute()

  
@@ -3406,8 +3522,8 @@ 

      tagID = tag['id']

  

      _tagDelete('tag_config', tagID)

-     #technically, to 'delete' the tag we only have to revoke the tag_config entry

-     #these remaining revocations are more for cleanup.

+     # technically, to 'delete' the tag we only have to revoke the tag_config entry

+     # these remaining revocations are more for cleanup.

      _tagDelete('tag_extra', tagID)

      _tagDelete('tag_inheritance', tagID)

      _tagDelete('tag_inheritance', tagID, 'parent_id')
@@ -3424,10 +3540,12 @@ 

      # is still referenced by the revoked rows).

      # note: there is no need to do anything with the repo entries that reference tagID

  

+ 

  def get_external_repo_id(info, strict=False, create=False):

      """Get the id for a build target"""

      return get_id('external_repo', info, strict, create)

  

+ 

  def create_external_repo(name, url):

      """Create a new external repo with the given name and url.

      Return a map containing the id, name, and url
@@ -3449,6 +3567,7 @@ 

      insert.execute()

      return values

  

+ 

  def get_external_repos(info=None, url=None, event=None, queryOpts=None):

      """Get a list of external repos.  If info is not None it may be a

      string (name) or an integer (id).
@@ -3473,6 +3592,7 @@ 

                             values=locals(), opts=queryOpts)

      return query.execute()

  

+ 

  def get_external_repo(info, strict=False, event=None):

      """Get information about a single external repo.

      info can either be a string (name) or an integer (id).
@@ -3488,6 +3608,7 @@ 

          else:

              return None

  

+ 

  def edit_external_repo(info, name=None, url=None):

      """Edit an existing external repo"""

  
@@ -3500,7 +3621,8 @@ 

          existing_id = _singleValue("""SELECT id FROM external_repo WHERE name = %(name)s""",

                                     locals(), strict=False)

          if existing_id is not None:

-             raise koji.GenericError('name "%s" is already taken by external repo %i' % (name, existing_id))

+             raise koji.GenericError('name "%s" is already taken by external repo %i' %

+                                     (name, existing_id))

  

          rename = """UPDATE external_repo SET name = %(name)s WHERE id = %(repo_id)i"""

          _dml(rename, locals())
@@ -3511,7 +3633,7 @@ 

              url += '/'

  

          update = UpdateProcessor('external_repo_config', values=locals(),

-                     clauses=['external_repo_id = %(repo_id)i'])

+                                  clauses=['external_repo_id = %(repo_id)i'])

          update.make_revoke()

  

          insert = InsertProcessor('external_repo_config')
@@ -3521,6 +3643,7 @@ 

          update.execute()

          insert.execute()

  

+ 

  def delete_external_repo(info):

      """Delete an external repo"""

  
@@ -3534,10 +3657,11 @@ 

                                        repo_info=repo_id)

  

      update = UpdateProcessor('external_repo_config', values=locals(),

-                     clauses=['external_repo_id = %(repo_id)i'])

+                              clauses=['external_repo_id = %(repo_id)i'])

      update.make_revoke()

      update.execute()

  

+ 

  def add_external_repo_to_tag(tag_info, repo_info, priority, merge_mode='koji'):

      """Add an external repo to a tag"""

  
@@ -3553,11 +3677,11 @@ 

  

      tag_repos = get_tag_external_repos(tag_info=tag_id)

      if [tr for tr in tag_repos if tr['external_repo_id'] == repo_id]:

-         raise koji.GenericError('tag %s already associated with external repo %s' % \

-             (tag['name'], repo['name']))

+         raise koji.GenericError('tag %s already associated with external repo %s' %

+                                 (tag['name'], repo['name']))

      if [tr for tr in tag_repos if tr['priority'] == priority]:

-         raise koji.GenericError('tag %s already associated with an external repo at priority %i' % \

-             (tag['name'], priority))

+         raise koji.GenericError('tag %s already associated with an external repo at priority %i' %

+                                 (tag['name'], priority))

  

      insert = InsertProcessor('tag_external_repos')

      insert.set(tag_id=tag_id, external_repo_id=repo_id, priority=priority,
@@ -3565,6 +3689,7 @@ 

      insert.make_create()

      insert.execute()

  

+ 

  def remove_external_repo_from_tag(tag_info, repo_info):

      """Remove an external repo from a tag"""

  
@@ -3576,14 +3701,15 @@ 

      repo_id = repo['id']

  

      if not get_tag_external_repos(tag_info=tag_id, repo_info=repo_id):

-         raise koji.GenericError('external repo %s not associated with tag %s' % \

-             (repo['name'], tag['name']))

+         raise koji.GenericError('external repo %s not associated with tag %s' %

+                                 (repo['name'], tag['name']))

  

      update = UpdateProcessor('tag_external_repos', values=locals(),

-                 clauses=["tag_id = %(tag_id)i", "external_repo_id = %(repo_id)i"])

+                              clauses=["tag_id = %(tag_id)i", "external_repo_id = %(repo_id)i"])

      update.make_revoke()

      update.execute()

  

+ 

  def edit_tag_external_repo(tag_info, repo_info, priority):

      """Edit a tag<->external repo association

      This allows you to update the priority without removing/adding the repo."""
@@ -3597,14 +3723,15 @@ 

  

      tag_repos = get_tag_external_repos(tag_info=tag_id, repo_info=repo_id)

      if not tag_repos:

-         raise koji.GenericError('external repo %s not associated with tag %s' % \

-             (repo['name'], tag['name']))

+         raise koji.GenericError('external repo %s not associated with tag %s' %

+                                 (repo['name'], tag['name']))

      tag_repo = tag_repos[0]

  

      if priority != tag_repo['priority']:

          remove_external_repo_from_tag(tag_id, repo_id)

          add_external_repo_to_tag(tag_id, repo_id, priority)

  

+ 

  def get_tag_external_repos(tag_info=None, repo_info=None, event=None):

      """

      Get a list of tag<->external repo associations.
@@ -3623,17 +3750,18 @@ 

               'external_repo ON tag_external_repos.external_repo_id = external_repo.id',

               'external_repo_config ON external_repo.id = external_repo_config.external_repo_id']

      fields = {

-             'external_repo.id': 'external_repo_id',

-             'external_repo.name': 'external_repo_name',

-             'priority': 'priority',

-             'tag.id': 'tag_id',

-             'tag.name': 'tag_name',

-             'url': 'url',

-             'merge_mode': 'merge_mode',

-             }

+         'external_repo.id': 'external_repo_id',

+         'external_repo.name': 'external_repo_name',

+         'priority': 'priority',

+         'tag.id': 'tag_id',

+         'tag.name': 'tag_name',

+         'url': 'url',

+         'merge_mode': 'merge_mode',

+     }

      columns, aliases = zip(*fields.items())

  

-     clauses = [eventCondition(event, table='tag_external_repos'), eventCondition(event, table='external_repo_config')]

+     clauses = [eventCondition(event, table='tag_external_repos'),

+                eventCondition(event, table='external_repo_config')]

      if tag_info:

          tag = get_tag(tag_info, strict=True, event=event)

          tag_id = tag['id']
@@ -3651,6 +3779,7 @@ 

                             opts=opts)

      return query.execute()

  

+ 

  def get_external_repo_list(tag_info, event=None):

      """

      Get an ordered list of all external repos associated with the tags in the
@@ -3749,6 +3878,7 @@ 

          user['krb_principals'] = list_user_krb_principals(user['id'])

      return user

  

+ 

  def edit_user(userInfo, name=None, krb_principal_mappings=None):

      """Edit information for an existing user.

  
@@ -3776,7 +3906,7 @@ 

          values = {

              'name': name,

              'userID': user['id']

-             }

+         }

          q = """SELECT id FROM users WHERE name=%(name)s"""

          id = _singleValue(q, values, strict=False)

          if id is not None:
@@ -3890,10 +4020,10 @@ 

      AND build.release=%(release)s

      """

      # contraints should ensure this is unique

-     #log_error(koji.db._quoteparams(q,data))

+     # log_error(koji.db._quoteparams(q,data))

      c.execute(q, data)

      r = c.fetchone()

-     #log_error("%r" % r )

+     # log_error("%r" % r )

      if not r:

          if strict:

              raise koji.GenericError('No matching build found: %r' % X)
@@ -3947,15 +4077,18 @@ 

          associated task ids, and not all import methods provide source info.

      """

      buildID = find_build_id(buildInfo, strict=strict)

-     if buildID == None:

+     if buildID is None:

          return None

  

      fields = (('build.id', 'id'), ('build.version', 'version'), ('build.release', 'release'),

                ('build.id', 'build_id'),

-               ('build.epoch', 'epoch'), ('build.state', 'state'), ('build.completion_time', 'completion_time'),

+               ('build.epoch', 'epoch'), ('build.state', 'state'),

+               ('build.completion_time', 'completion_time'),

                ('build.start_time', 'start_time'),

-               ('build.task_id', 'task_id'), ('events.id', 'creation_event_id'), ('events.time', 'creation_time'),

-               ('package.id', 'package_id'), ('package.name', 'package_name'), ('package.name', 'name'),

+               ('build.task_id', 'task_id'),

+               ('events.id', 'creation_event_id'), ('events.time', 'creation_time'),

+               ('package.id', 'package_id'), ('package.name', 'package_name'),

+               ('package.name', 'name'),

                ('volume.id', 'volume_id'), ('volume.name', 'volume_name'),

                ("package.name || '-' || build.version || '-' || build.release", 'nvr'),

                ('EXTRACT(EPOCH FROM events.time)', 'creation_ts'),
@@ -3970,7 +4103,7 @@ 

               'package on build.pkg_id = package.id',

               'volume on build.volume_id = volume.id',

               'users on build.owner = users.id',

-             ]

+              ]

      clauses = ['build.id = %(buildID)i']

      query = QueryProcessor(columns=fields, aliases=aliases, values=locals(),

                             transform=_fix_extra_field,
@@ -4013,7 +4146,7 @@ 

                  'name': fn,

                  'dir': subdir,

                  'path': "%s/%s/%s" % (logreldir, subdir, fn)

-                 }

+             }

              logs.append(loginfo)

      return logs

  
@@ -4022,14 +4155,14 @@ 

      """find the last successful or deleted build of this N-V. If building is

      specified, skip also builds in progress"""

      values = {

-                   'name': build_info['name'],

-                   'version': build_info['version'],

-                   'states': (

-                       koji.BUILD_STATES['COMPLETE'],

-                       koji.BUILD_STATES['DELETED'],

-                       koji.BUILD_STATES['BUILDING']

-                   )

-              }

+         'name': build_info['name'],

+         'version': build_info['version'],

+         'states': (

+             koji.BUILD_STATES['COMPLETE'],

+             koji.BUILD_STATES['DELETED'],

+             koji.BUILD_STATES['BUILDING']

+         )

+     }

      query = QueryProcessor(tables=['build'], joins=['package ON build.pkg_id = package.id'],

                             columns=['build.id', 'release'],

                             clauses=['name = %(name)s', 'version = %(version)s',
@@ -4060,7 +4193,8 @@ 

          row['extra'] = parse_json(row['extra'], desc='rpm extra')

      return row

  

- #alias for now, may change in the future

+ 

+ # alias for now, may change in the future

  _fix_archive_row = _fix_rpm_row

  

  
@@ -4116,7 +4250,7 @@ 

          ('buildtime', 'buildtime'),

          ('metadata_only', 'metadata_only'),

          ('extra', 'extra'),

-         )

+     )

      # we can look up by id or NVRA

      data = None

      if isinstance(rpminfo, six.integer_types):
@@ -4138,9 +4272,9 @@ 

          data['external_repo_id'] = get_external_repo_id(data['location'], strict=True)

          clauses.append("""external_repo_id = %(external_repo_id)i""")

      elif not multi:

-         #try to match internal first, otherwise first matching external

-         retry = True  #if no internal match

-         orig_clauses = list(clauses)  #copy

+         # try to match internal first, otherwise first matching external

+         retry = True  # if no internal match

+         orig_clauses = list(clauses)  # copy

          clauses.append("""external_repo_id = 0""")

  

      joins = ['external_repo ON rpminfo.external_repo_id = external_repo.id']
@@ -4154,7 +4288,7 @@ 

      if ret:

          return ret

      if retry:

-         #at this point we have just an NVRA with no internal match. Open it up to externals

+         # at this point we have just an NVRA with no internal match. Open it up to externals

          query.clauses = orig_clauses

          ret = query.executeOne()

      if not ret:
@@ -4164,7 +4298,8 @@ 

      return ret

  

  

- def list_rpms(buildID=None, buildrootID=None, imageID=None, componentBuildrootID=None, hostID=None, arches=None, queryOpts=None):

+ def list_rpms(buildID=None, buildrootID=None, imageID=None, componentBuildrootID=None, hostID=None,

+               arches=None, queryOpts=None):

      """List RPMS.  If buildID, imageID and/or buildrootID are specified,

      restrict the list of RPMs to only those RPMs that are part of that

      build, or were built in that buildroot.  If componentBuildrootID is specified,
@@ -4206,15 +4341,15 @@ 

                ('external_repo.name', 'external_repo_name'),

                ('rpminfo.metadata_only', 'metadata_only'),

                ('rpminfo.extra', 'extra'),

-              ]

+               ]

      joins = ['LEFT JOIN external_repo ON rpminfo.external_repo_id = external_repo.id']

      clauses = []

  

-     if buildID != None:

+     if buildID is not None:

          clauses.append('rpminfo.build_id = %(buildID)i')

-     if buildrootID != None:

+     if buildrootID is not None:

          clauses.append('rpminfo.buildroot_id = %(buildrootID)i')

-     if componentBuildrootID != None:

+     if componentBuildrootID is not None:

          fields.append(('buildroot_listing.buildroot_id as component_buildroot_id',

                         'component_buildroot_id'))

          fields.append(('buildroot_listing.is_update', 'is_update'))
@@ -4222,14 +4357,15 @@ 

          clauses.append('buildroot_listing.buildroot_id = %(componentBuildrootID)i')

  

      # image specific constraints

-     if imageID != None:

+     if imageID is not None:

          clauses.append('archive_rpm_components.archive_id = %(imageID)i')

          joins.append('archive_rpm_components ON rpminfo.id = archive_rpm_components.rpm_id')

  

-     if hostID != None:

-         joins.append('standard_buildroot ON rpminfo.buildroot_id = standard_buildroot.buildroot_id')

+     if hostID is not None:

+         joins.append(

+             'standard_buildroot ON rpminfo.buildroot_id = standard_buildroot.buildroot_id')

          clauses.append('standard_buildroot.host_id = %(hostID)i')

-     if arches != None:

+     if arches is not None:

          if isinstance(arches, (list, tuple)):

              clauses.append('rpminfo.arch IN %(arches)s')

          elif isinstance(arches, str):
@@ -4267,6 +4403,7 @@ 

      WHERE build_id = %%(build_id)i""" % ', '.join(fields)

      return _singleRow(query, locals(), fields, strict)

  

+ 

  def get_win_build(buildInfo, strict=False):

      """

      Retrieve Windows-specific information about a build.
@@ -4290,6 +4427,7 @@ 

          raise koji.GenericError('no such Windows build: %s' % buildInfo)

      return result

  

+ 

  def get_image_build(buildInfo, strict=False):

      """

      Retrieve image-specific information about a build.
@@ -4326,20 +4464,20 @@ 

          return None

  

      query = QueryProcessor(

-                 tables=['btype'],

-                 columns=['name'],

-                 joins=['build_types ON btype_id=btype.id'],

-                 clauses=['build_id = %(id)i'],

-                 values=binfo,

-                 opts={'asList':True},

-             )

+         tables=['btype'],

+         columns=['name'],

+         joins=['build_types ON btype_id=btype.id'],

+         clauses=['build_id = %(id)i'],

+         values=binfo,

+         opts={'asList': True},

+     )

  

      ret = {}

      extra = binfo['extra'] or {}

      for (btype,) in query.execute():

          ret[btype] = extra.get('typeinfo', {}).get(btype)

  

-     #deal with legacy types

+     # deal with legacy types

      l_funcs = [['maven', get_maven_build], ['win', get_win_build],

                 ['image', get_image_build]]

      for ltype, func in l_funcs:
@@ -4388,17 +4526,19 @@ 

      insert.execute()

  

  

- def list_archives(buildID=None, buildrootID=None, componentBuildrootID=None, hostID=None, type=None,

-                   filename=None, size=None, checksum=None, typeInfo=None, queryOpts=None, imageID=None,

-                   archiveID=None, strict=False):

+ def list_archives(buildID=None, buildrootID=None, componentBuildrootID=None, hostID=None,

+                   type=None, filename=None, size=None, checksum=None, typeInfo=None,

+                   queryOpts=None, imageID=None, archiveID=None, strict=False):

      """

      Retrieve information about archives.

      If buildID is not null it will restrict the list to archives built by the build with that ID.

-     If buildrootID is not null it will restrict the list to archives built in the buildroot with that ID.

-     If componentBuildrootID is not null it will restrict the list to archives that were present in the

-       buildroot with that ID.

+     If buildrootID is not null it will restrict the list to archives built in the buildroot with

+     that ID.

+     If componentBuildrootID is not null it will restrict the list to archives that were present in

+     the buildroot with that ID.

      If hostID is not null it will restrict the list to archives built on the host with that ID.

-     If filename, size, and/or checksum are not null it will filter the results to entries matching the provided values.

+     If filename, size, and/or checksum are not null it will filter the results to entries matching

+     the provided values.

  

      Returns a list of maps containing the following keys:

  
@@ -4471,7 +4611,7 @@ 

                ('archivetypes.name', 'type_name'),

                ('archivetypes.description', 'type_description'),

                ('archivetypes.extensions', 'type_extensions'),

-             ]

+               ]

      clauses = []

  

      if buildID is not None:
@@ -4486,13 +4626,14 @@ 

          values['component_buildroot_id'] = componentBuildrootID

          fields.append(['buildroot_archives.buildroot_id', 'component_buildroot_id'])

          fields.append(['buildroot_archives.project_dep', 'project'])

-     if imageID != None:

+     if imageID is not None:

          # TODO: arg name is now a misnomer, could be any archive

-        clauses.append('archive_components.archive_id = %(imageID)i')

-        values['imageID'] = imageID

-        joins.append('archive_components ON archiveinfo.id = archive_components.component_id')

+         clauses.append('archive_components.archive_id = %(imageID)i')

+         values['imageID'] = imageID

+         joins.append('archive_components ON archiveinfo.id = archive_components.component_id')

      if hostID is not None:

-         joins.append('standard_buildroot on archiveinfo.buildroot_id = standard_buildroot.buildroot_id')

+         joins.append(

+             'standard_buildroot on archiveinfo.buildroot_id = standard_buildroot.buildroot_id')

          clauses.append('standard_buildroot.host_id = %(host_id)i')

          values['host_id'] = hostID

          fields.append(['standard_buildroot.host_id', 'host_id'])
@@ -4514,9 +4655,9 @@ 

      elif type == 'maven':

          joins.append('maven_archives ON archiveinfo.id = maven_archives.archive_id')

          fields.extend([

-                 ('maven_archives.group_id', 'group_id'),

-                 ('maven_archives.artifact_id', 'artifact_id'),

-                 ('maven_archives.version', 'version'),

+             ('maven_archives.group_id', 'group_id'),

+             ('maven_archives.artifact_id', 'artifact_id'),

+             ('maven_archives.version', 'version'),

          ])

  

          if typeInfo:
@@ -4527,9 +4668,9 @@ 

      elif type == 'win':

          joins.append('win_archives ON archiveinfo.id = win_archives.archive_id')

          fields.extend([

-                 ('win_archives.relpath', 'relpath'),

-                 ('win_archives.platforms', 'platforms'),

-                 ('win_archives.flags', 'flags'),

+             ('win_archives.relpath', 'relpath'),

+             ('win_archives.platforms', 'platforms'),

+             ('win_archives.flags', 'flags'),

          ])

  

          if typeInfo:
@@ -4558,14 +4699,14 @@ 

              raise koji.GenericError('unsupported archive type: %s' % type)

          if typeInfo:

              raise koji.GenericError('typeInfo queries not supported for type '

-                     '%(name)s' % btype)

+                                     '%(name)s' % btype)

          clauses.append('archiveinfo.btype_id = %(btype_id)s')

          values['btype_id'] = btype['id']

  

      columns, aliases = zip(*fields)

      ret = QueryProcessor(tables=tables, columns=columns, aliases=aliases, joins=joins,

-                           transform=_fix_archive_row,

-                           clauses=clauses, values=values, opts=queryOpts).execute()

+                          transform=_fix_archive_row,

+                          clauses=clauses, values=values, opts=queryOpts).execute()

      if strict and not ret:

          raise koji.GenericError('No archives found.')

      return ret
@@ -4621,6 +4762,7 @@ 

          archive.update(image_info)

      return archive

  

+ 

  def get_maven_archive(archive_id, strict=False):

      """

      Retrieve Maven-specific information about an archive.
@@ -4636,6 +4778,7 @@ 

      WHERE archive_id = %%(archive_id)i""" % ', '.join(fields)

      return _singleRow(select, locals(), fields, strict=strict)

  

+ 

  def get_win_archive(archive_id, strict=False):

      """

      Retrieve Windows-specific information about an archive.
@@ -4651,6 +4794,7 @@ 

      WHERE archive_id = %%(archive_id)i""" % ', '.join(fields)

      return _singleRow(select, locals(), fields, strict=strict)

  

+ 

  def get_image_archive(archive_id, strict=False):

      """

      Retrieve image-specific information about an archive.
@@ -4675,6 +4819,7 @@ 

          results['rootid'] = True

      return results

  

+ 

  def _get_zipfile_list(archive_id, zippath):

      """

      Get a list of the entries in the zipfile located at zippath.
@@ -4696,6 +4841,7 @@ 

                             'mtime': int(time.mktime(entry.date_time + (0, 0, -1)))})

      return result

  

+ 

  def _get_tarball_list(archive_id, tarpath):

      """

      Get a list of the entries in the tarball located at tarpath.
@@ -4843,7 +4989,7 @@ 

          # raise error if task doesn't exist

          try:

              Task(taskID).getInfo(strict=True)

-         except:

+         except Exception:

              raise koji.GenericError("Task doesn't exist")

  

      if stat or all_volumes:
@@ -4883,6 +5029,7 @@ 

                          result.append(relfilename)

      return result

  

+ 

  def _fetchMulti(query, values):

      """Run the query and return all rows"""

      c = context.cnx.cursor()
@@ -4891,6 +5038,7 @@ 

      c.close()

      return results

  

+ 

  def _fetchSingle(query, values, strict=False):

      """Run the query and return a single row

  
@@ -4908,6 +5056,7 @@ 

      else:

          return results[0]

  

+ 

  def _multiRow(query, values, fields):

      """Return all rows from "query".  Named query parameters

      can be specified using the "values" map.  Results will be returned
@@ -4916,6 +5065,7 @@ 

      list will be returned."""

      return [dict(zip(fields, row)) for row in _fetchMulti(query, values)]

  

+ 

  def _singleRow(query, values, fields, strict=False):

      """Return a single row from "query".  Named parameters can be

      specified using the "values" map.  The result will be returned as
@@ -4928,9 +5078,10 @@ 

      if row:

          return dict(zip(fields, row))

      else:

-         #strict enforced by _fetchSingle

+         # strict enforced by _fetchSingle

          return None

  

+ 

  def _singleValue(query, values=None, strict=True):

      """Perform a query that returns a single value.

  
@@ -4947,6 +5098,7 @@ 

          # don't need to check strict here, since that was already handled by _singleRow()

          return None

  

+ 

  def _dml(operation, values):

      """Run an insert, update, or delete. Return number of rows affected"""

      c = context.cnx.cursor()
@@ -4957,6 +5109,7 @@ 

      context.commit_pending = True

      return ret

  

+ 

  def get_host(hostInfo, strict=False, event=None):

      """Get information about the given host.  hostInfo may be

      either a string (hostname) or int (host id).  A map will be returned
@@ -5007,6 +5160,7 @@ 

          return None

      return result

  

+ 

  def edit_host(hostInfo, **kw):

      """Edit information for an existing host.

      hostInfo specifies the host to edit, either as an integer (id)
@@ -5036,7 +5190,10 @@ 

      update.make_revoke()

      update.execute()

  

-     insert = InsertProcessor('host_config', data=dslice(host, ('arches', 'capacity', 'description', 'comment', 'enabled')))

+     insert = InsertProcessor('host_config',

+                              data=dslice(host,

+                                          ('arches', 'capacity', 'description', 'comment',

+                                           'enabled')))

      insert.set(host_id=host['id'])

      for change in changes:

          insert.set(**{change: kw[change]})
@@ -5045,6 +5202,7 @@ 

  

      return True

  

+ 

  def get_channel(channelInfo, strict=False):

      """

      Look up the ID number and name for a channel.
@@ -5071,7 +5229,8 @@ 

      return _singleRow(query, locals(), fields, strict)

  

  

- def query_buildroots(hostID=None, tagID=None, state=None, rpmID=None, archiveID=None, taskID=None, buildrootID=None, queryOpts=None):

+ def query_buildroots(hostID=None, tagID=None, state=None, rpmID=None, archiveID=None, taskID=None,

+                      buildrootID=None, queryOpts=None):

      """Return a list of matching buildroots

  

      Optional args:
@@ -5090,7 +5249,7 @@ 

                ('content_generator.name', 'cg_name'),

                ('buildroot.cg_version', 'cg_version'),

                ('buildroot.container_arch', 'container_arch'),

-               ('buildroot.container_arch', 'arch'), #alias for back compat

+               ('buildroot.container_arch', 'arch'),  # alias for back compat

                ('buildroot.container_type', 'container_type'),

                ('buildroot.host_os', 'host_os'),

                ('buildroot.host_arch', 'host_arch'),
@@ -5104,41 +5263,46 @@ 

                ('EXTRACT(EPOCH FROM create_events.time)', 'create_ts'),

                ('retire_events.id', 'retire_event_id'), ('retire_events.time', 'retire_event_time'),

                ('EXTRACT(EPOCH FROM retire_events.time)', 'retire_ts'),

-               ('repo_create.id', 'repo_create_event_id'), ('repo_create.time', 'repo_create_event_time')]

+               ('repo_create.id', 'repo_create_event_id'),

+               ('repo_create.time', 'repo_create_event_time')]

  

      tables = ['buildroot']

-     joins = ['LEFT OUTER JOIN standard_buildroot ON standard_buildroot.buildroot_id = buildroot.id',

-            'LEFT OUTER JOIN content_generator ON buildroot.cg_id = content_generator.id',

-            'LEFT OUTER JOIN host ON host.id = standard_buildroot.host_id',

-            'LEFT OUTER JOIN repo ON repo.id = standard_buildroot.repo_id',

-            'LEFT OUTER JOIN tag ON tag.id = repo.tag_id',

-            'LEFT OUTER JOIN events AS create_events ON create_events.id = standard_buildroot.create_event',

-            'LEFT OUTER JOIN events AS retire_events ON standard_buildroot.retire_event = retire_events.id',

-            'LEFT OUTER JOIN events AS repo_create ON repo_create.id = repo.create_event']

+     joins = ['LEFT OUTER JOIN standard_buildroot '

+              'ON standard_buildroot.buildroot_id = buildroot.id',

+              'LEFT OUTER JOIN content_generator '

+              'ON buildroot.cg_id = content_generator.id',

+              'LEFT OUTER JOIN host ON host.id = standard_buildroot.host_id',

+              'LEFT OUTER JOIN repo ON repo.id = standard_buildroot.repo_id',

+              'LEFT OUTER JOIN tag ON tag.id = repo.tag_id',

+              'LEFT OUTER JOIN events AS create_events ON '

+              'create_events.id = standard_buildroot.create_event',

+              'LEFT OUTER JOIN events AS retire_events ON '

+              'standard_buildroot.retire_event = retire_events.id',

+              'LEFT OUTER JOIN events AS repo_create ON repo_create.id = repo.create_event']

  

      clauses = []

-     if buildrootID != None:

+     if buildrootID is not None:

          if isinstance(buildrootID, (list, tuple)):

              clauses.append('buildroot.id IN %(buildrootID)s')

          else:

              clauses.append('buildroot.id = %(buildrootID)i')

-     if hostID != None:

+     if hostID is not None:

          clauses.append('host.id = %(hostID)i')

-     if tagID != None:

+     if tagID is not None:

          clauses.append('tag.id = %(tagID)i')

-     if state != None:

+     if state is not None:

          if isinstance(state, (list, tuple)):

              clauses.append('standard_buildroot.state IN %(state)s')

          else:

              clauses.append('standard_buildroot.state = %(state)i')

-     if rpmID != None:

+     if rpmID is not None:

          joins.insert(0, 'buildroot_listing ON buildroot.id = buildroot_listing.buildroot_id')

          fields.append(('buildroot_listing.is_update', 'is_update'))

          clauses.append('buildroot_listing.rpm_id = %(rpmID)i')

-     if archiveID != None:

+     if archiveID is not None:

          joins.append('buildroot_archives ON buildroot.id = buildroot_archives.buildroot_id')

          clauses.append('buildroot_archives.archive_id = %(archiveID)i')

-     if taskID != None:

+     if taskID is not None:

          clauses.append('standard_buildroot.task_id = %(taskID)i')

  

      query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields],
@@ -5147,6 +5311,7 @@ 

                             opts=queryOpts)

      return query.execute()

  

+ 

  def get_buildroot(buildrootID, strict=False):

      """Return information about a buildroot.  buildrootID must be an int ID."""

  
@@ -5157,10 +5322,11 @@ 

          else:

              return None

      if len(result) > 1:

-         #this should be impossible

+         # this should be impossible

          raise koji.GenericError("More that one buildroot with id: %i" % buildrootID)

      return result[0]

  

+ 

  def list_channels(hostID=None, event=None):

      """List channels.  If hostID is specified, only list

      channels associated with the host with that ID."""
@@ -5170,8 +5336,8 @@ 

          tables = ['host_channels']

          joins = ['channels ON channels.id = host_channels.channel_id']

          clauses = [

-                 eventCondition(event, table='host_channels'),

-                 'host_channels.host_id = %(host_id)s']

+             eventCondition(event, table='host_channels'),

+             'host_channels.host_id = %(host_id)s']

          values = {'host_id': hostID}

          query = QueryProcessor(tables=tables, aliases=aliases,

                                 columns=columns, joins=joins,
@@ -5184,6 +5350,7 @@ 

                                 columns=columns)

      return query.execute()

  

+ 

  def new_package(name, strict=True):

      c = context.cnx.cursor()

      # TODO - table lock?
@@ -5218,21 +5385,24 @@ 

      volinfo = lookup_name('volume', name, strict=False, create=True)

      return volinfo

  

+ 

  def remove_volume(volume):

      """Remove unused storage volume from the database"""

      context.session.assertPerm('admin')

      volinfo = lookup_name('volume', volume, strict=True)

      query = QueryProcessor(tables=['build'], clauses=['volume_id=%(id)i'],

-                     values=volinfo, columns=['id'], opts={'limit':1})

+                            values=volinfo, columns=['id'], opts={'limit': 1})

      if query.execute():

          raise koji.GenericError('volume %(name)s has build references' % volinfo)

      delete = """DELETE FROM volume WHERE id=%(id)i"""

      _dml(delete, volinfo)

  

+ 

  def list_volumes():

      """List storage volumes"""

      return QueryProcessor(tables=['volume'], columns=['id', 'name']).execute()

  

+ 

  def change_build_volume(build, volume, strict=True):

      """Move a build to a different storage volume"""

      context.session.assertPerm('admin')
@@ -5247,7 +5417,7 @@ 

          if strict:

              raise koji.GenericError("Build %(nvr)s already on volume %(volume_name)s" % binfo)

          else:

-             #nothing to do

+             # nothing to do

              return

      state = koji.BUILD_STATES[binfo['state']]

      if state not in ['COMPLETE', 'DELETED']:
@@ -5256,7 +5426,7 @@ 

      if not os.path.isdir(voldir):

          raise koji.GenericError("Directory entry missing for volume %(name)s" % volinfo)

  

-     #more sanity checks

+     # more sanity checks

      for check_vol in list_volumes():

          check_binfo = binfo.copy()

          check_binfo['volume_id'] = check_vol['id']
@@ -5293,7 +5463,8 @@ 

          shutil.copytree(olddir, newdir, symlinks=True)

  

      # Second, update the db

-     koji.plugin.run_callbacks('preBuildStateChange', attribute='volume_id', old=old_binfo['volume_id'], new=volinfo['id'], info=binfo)

+     koji.plugin.run_callbacks('preBuildStateChange', attribute='volume_id',

+                               old=old_binfo['volume_id'], new=volinfo['id'], info=binfo)

      update = UpdateProcessor('build', clauses=['id=%(id)i'], values=binfo)

      update.set(volume_id=volinfo['id'])

      update.execute()
@@ -5304,7 +5475,7 @@ 

      for olddir, newdir in dir_moves:

          koji.util.rmtree(olddir)

  

-     #Fourth, maintain a symlink if appropriate

+     # Fourth, maintain a symlink if appropriate

      if volinfo['name'] and volinfo['name'] != 'DEFAULT':

          base_vol = lookup_name('volume', 'DEFAULT', strict=True)

          base_binfo = binfo.copy()
@@ -5316,7 +5487,8 @@ 

          relpath = os.path.relpath(newdir, os.path.dirname(basedir))

          os.symlink(relpath, basedir)

  

-     koji.plugin.run_callbacks('postBuildStateChange', attribute='volume_id', old=old_binfo['volume_id'], new=volinfo['id'], info=binfo)

+     koji.plugin.run_callbacks('postBuildStateChange', attribute='volume_id',

+                               old=old_binfo['volume_id'], new=volinfo['id'], info=binfo)

  

  

  def ensure_volume_symlink(binfo):
@@ -5431,7 +5603,7 @@ 

      if 'pkg_id' in data:

          data['name'] = lookup_package(data['pkg_id'], strict=True)['name']

      else:

-         #see if there's a package name

+         # see if there's a package name

          name = data.get('name')

          if not name:

              raise koji.GenericError("No name or package id provided for build")
@@ -5450,7 +5622,7 @@ 

      else:

          data['extra'] = None

  

-     #provide a few default values

+     # provide a few default values

      data.setdefault('state', koji.BUILD_STATES['COMPLETE'])

      data.setdefault('start_time', 'NOW')

      data.setdefault('completion_time', 'NOW')
@@ -5459,7 +5631,7 @@ 

      data.setdefault('task_id', None)

      data.setdefault('volume_id', 0)

  

-     #check for existing build

+     # check for existing build

      old_binfo = get_build(data)

      if old_binfo:

          if strict:
@@ -5467,19 +5639,22 @@ 

          recycle_build(old_binfo, data)

          # Raises exception if there is a problem

          return old_binfo['id']

-     koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=None, new=data['state'], info=data)

+     koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=None,

+                               new=data['state'], info=data)

  

-     #insert the new data

+     # insert the new data

      insert_data = dslice(data, ['pkg_id', 'version', 'release', 'epoch', 'state', 'volume_id',

-                          'task_id', 'owner', 'start_time', 'completion_time', 'source', 'extra'])

+                                 'task_id', 'owner', 'start_time', 'completion_time', 'source',

+                                 'extra'])

      if 'cg_id' in data:

          insert_data['cg_id'] = data['cg_id']

      data['id'] = insert_data['id'] = _singleValue("SELECT nextval('build_id_seq')")

      insert = InsertProcessor('build', data=insert_data)

      insert.execute()

      new_binfo = get_build(data['id'], strict=True)

-     koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=None, new=data['state'], info=new_binfo)

-     #return build_id

+     koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=None,

+                               new=data['state'], info=new_binfo)

+     # return build_id

      return data['id']

  

  
@@ -5490,39 +5665,39 @@ 

      if st_desc == 'BUILDING':

          # check to see if this is the controlling task

          if data['state'] == old['state'] and data.get('task_id', '') == old['task_id']:

-             #the controlling task must have restarted (and called initBuild again)

+             # the controlling task must have restarted (and called initBuild again)

              return

          raise koji.GenericError("Build already in progress (task %(task_id)d)"

-                                     % old)

+                                 % old)

          # TODO? - reclaim 'stale' builds (state=BUILDING and task_id inactive)

  

      if st_desc not in ('FAILED', 'CANCELED'):

          raise koji.GenericError("Build already exists (id=%d, state=%s): %r"

-                 % (old['id'], st_desc, data))

+                                 % (old['id'], st_desc, data))

  

      # check for evidence of tag activity

      query = QueryProcessor(columns=['tag_id'], tables=['tag_listing'],

-                 clauses = ['build_id = %(id)s'], values=old)

+                            clauses=['build_id = %(id)s'], values=old)

      if query.execute():

          raise koji.GenericError("Build already exists. Unable to recycle, "

-                 "has tag history")

+                                 "has tag history")

  

      # check for rpms or archives

      query = QueryProcessor(columns=['id'], tables=['rpminfo'],

-                 clauses = ['build_id = %(id)s'], values=old)

+                            clauses=['build_id = %(id)s'], values=old)

      if query.execute():

          raise koji.GenericError("Build already exists. Unable to recycle, "

-                 "has rpm data")

+                                 "has rpm data")

      query = QueryProcessor(columns=['id'], tables=['archiveinfo'],

-                 clauses = ['build_id = %(id)s'], values=old)

+                            clauses=['build_id = %(id)s'], values=old)

      if query.execute():

          raise koji.GenericError("Build already exists. Unable to recycle, "

-                 "has archive data")

+                                 "has archive data")

  

-    # If we reach here, should be ok to replace

+     # If we reach here, should be ok to replace

  

      koji.plugin.run_callbacks('preBuildStateChange', attribute='state',

-                 old=old['state'], new=data['state'], info=data)

+                               old=old['state'], new=data['state'], info=data)

  

      # If there is any old build type info, clear it

      delete = """DELETE FROM maven_builds WHERE build_id = %(id)i"""
@@ -5537,8 +5712,8 @@ 

      data['id'] = old['id']

      update = UpdateProcessor('build', clauses=['id=%(id)s'], values=data)

      update.set(**dslice(data,

-         ['state', 'task_id', 'owner', 'start_time', 'completion_time',

-          'epoch', 'source', 'extra', 'volume_id']))

+                         ['state', 'task_id', 'owner', 'start_time', 'completion_time',

+                          'epoch', 'source', 'extra', 'volume_id']))

      if 'cg_id' in data:

          update.set(cg_id=data['cg_id'])

      update.rawset(create_event='get_event()')
@@ -5548,7 +5723,7 @@ 

          koji.util.rmtree(builddir)

      buildinfo = get_build(data['id'], strict=True)

      koji.plugin.run_callbacks('postBuildStateChange', attribute='state',

-                 old=old['state'], new=data['state'], info=buildinfo)

+                               old=old['state'], new=data['state'], info=buildinfo)

  

  

  def check_noarch_rpms(basepath, rpms, logs=None):
@@ -5590,6 +5765,7 @@ 

  

      return result

  

+ 

  def import_build(srpm, rpms, brmap=None, task_id=None, build_id=None, logs=None):

      """Import a build into the database (single transaction)

  
@@ -5606,7 +5782,7 @@ 

      koji.plugin.run_callbacks('preImport', type='build', srpm=srpm, rpms=rpms, brmap=brmap,

                                task_id=task_id, build_id=build_id, build=None, logs=logs)

      uploadpath = koji.pathinfo.work()

-     #verify files exist

+     # verify files exist

      for relpath in [srpm] + rpms:

          fn = "%s/%s" % (uploadpath, relpath)

          if not os.path.exists(fn):
@@ -5614,13 +5790,13 @@ 

  

      rpms = check_noarch_rpms(uploadpath, rpms, logs=logs)

  

-     #verify buildroot ids from brmap

+     # verify buildroot ids from brmap

      found = {}

      for br_id in brmap.values():

          if br_id in found:

              continue

          found[br_id] = 1

-         #this will raise an exception if the buildroot id is invalid

+         # this will raise an exception if the buildroot id is invalid

          BuildRoot(br_id)

  

      # get build informaton
@@ -5632,13 +5808,13 @@ 

      build['task_id'] = task_id

  

      policy_data = {

-             'package': build['name'],

-             'version': build['version'],

-             'release': build['release'],

-             'buildroots': to_list(brmap.values()),

-             'import': True,

-             'import_type': 'rpm',

-             }

+         'package': build['name'],

+         'version': build['version'],

+         'release': build['release'],

+         'buildroots': to_list(brmap.values()),

+         'import': True,

+         'import_type': 'rpm',

+     }

      vol = check_volume_policy(policy_data, strict=False, default='DEFAULT')

      build['volume_id'] = vol['id']

      build['volume_name'] = vol['name']
@@ -5648,25 +5824,29 @@ 

          binfo = get_build(build_id, strict=True)

          new_typed_build(binfo, 'rpm')

      else:

-         #build_id was passed in - sanity check

+         # build_id was passed in - sanity check

          binfo = get_build(build_id, strict=True)

          st_complete = koji.BUILD_STATES['COMPLETE']

          st_old = binfo['state']

-         koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old, new=st_complete, info=binfo)

+         koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old,

+                                   new=st_complete, info=binfo)

          for key in ('name', 'version', 'release', 'epoch', 'task_id'):

              if build[key] != binfo[key]:

-                 raise koji.GenericError("Unable to complete build: %s mismatch (build: %s, rpm: %s)" % (key, binfo[key], build[key]))

+                 raise koji.GenericError(

+                     "Unable to complete build: %s mismatch (build: %s, rpm: %s)" %

+                     (key, binfo[key], build[key]))

          if binfo['state'] != koji.BUILD_STATES['BUILDING']:

-             raise koji.GenericError("Unable to complete build: state is %s" \

-                     % koji.BUILD_STATES[binfo['state']])

-         #update build state

+             raise koji.GenericError("Unable to complete build: state is %s"

+                                     % koji.BUILD_STATES[binfo['state']])

+         # update build state

          update = UpdateProcessor('build', clauses=['id=%(id)s'], values=binfo)

          update.set(state=st_complete)

          update.rawset(completion_time='NOW()')

          update.set(volume_id=build['volume_id'])

          update.execute()

          binfo = get_build(build_id, strict=True)

-         koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old, new=st_complete, info=binfo)

+         koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old,

+                                   new=st_complete, info=binfo)

  

      # now to handle the individual rpms

      for relpath in [srpm] + rpms:
@@ -5697,21 +5877,21 @@ 

      if not os.path.exists(fn):

          raise koji.GenericError("no such file: %s" % fn)

  

-     #read rpm info

+     # read rpm info

      hdr = koji.get_rpm_header(fn)

      rpminfo = koji.get_header_fields(hdr, ['name', 'version', 'release', 'epoch',

-                     'sourcepackage', 'arch', 'buildtime', 'sourcerpm'])

+                                            'sourcepackage', 'arch', 'buildtime', 'sourcerpm'])

      if rpminfo['sourcepackage'] == 1:

          rpminfo['arch'] = "src"

  

-     #sanity check basename

+     # sanity check basename

      basename = os.path.basename(fn)

      expected = "%(name)s-%(version)s-%(release)s.%(arch)s.rpm" % rpminfo

      if basename != expected:

          raise koji.GenericError("bad filename: %s (expected %s)" % (basename, expected))

  

      if buildinfo is None:

-         #figure it out for ourselves

+         # figure it out for ourselves

          if rpminfo['sourcepackage'] == 1:

              buildinfo = get_build(rpminfo, strict=False)

              if not buildinfo:
@@ -5720,10 +5900,10 @@ 

                  # we add the rpm build type below

                  buildinfo = get_build(build_id, strict=True)

          else:

-             #figure it out from sourcerpm string

+             # figure it out from sourcerpm string

              buildinfo = get_build(koji.parse_NVRA(rpminfo['sourcerpm']))

              if buildinfo is None:

-                 #XXX - handle case where package is not a source rpm

+                 # XXX - handle case where package is not a source rpm

                  #      and we still need to create a new build

                  raise koji.GenericError('No matching build')

              state = koji.BUILD_STATES[buildinfo['state']]
@@ -5733,21 +5913,21 @@ 

      elif not wrapper:

          # only enforce the srpm name matching the build for non-wrapper rpms

          srpmname = "%(name)s-%(version)s-%(release)s.src.rpm" % buildinfo

-         #either the sourcerpm field should match the build, or the filename

-         #itself (for the srpm)

+         # either the sourcerpm field should match the build, or the filename

+         # itself (for the srpm)

          if rpminfo['sourcepackage'] != 1:

              if rpminfo['sourcerpm'] != srpmname:

-                 raise koji.GenericError("srpm mismatch for %s: %s (expected %s)" \

-                         % (fn, rpminfo['sourcerpm'], srpmname))

+                 raise koji.GenericError("srpm mismatch for %s: %s (expected %s)"

+                                         % (fn, rpminfo['sourcerpm'], srpmname))

          elif basename != srpmname:

-             raise koji.GenericError("srpm mismatch for %s: %s (expected %s)" \

-                     % (fn, basename, srpmname))

+             raise koji.GenericError("srpm mismatch for %s: %s (expected %s)"

+                                     % (fn, basename, srpmname))

  

      # if we're adding an rpm to it, then this build is of rpm type

      # harmless if build already has this type

      new_typed_build(buildinfo, 'rpm')

  

-     #add rpminfo entry

+     # add rpminfo entry

      rpminfo['id'] = _singleValue("""SELECT nextval('rpminfo_id_seq')""")

      rpminfo['build_id'] = buildinfo['id']

      rpminfo['size'] = os.path.getsize(fn)
@@ -5773,7 +5953,7 @@ 

      koji.plugin.run_callbacks('postImport', type='rpm', rpm=rpminfo, build=buildinfo,

                                filepath=fn, fileinfo=fileinfo)

  

-     #extra fields for return

+     # extra fields for return

      rpminfo['build'] = buildinfo

      rpminfo['brootid'] = brootid

      return rpminfo
@@ -5937,7 +6117,7 @@ 

          self.check_build_dir()

  

          koji.plugin.run_callbacks('preImport', type='cg', metadata=metadata,

-                 directory=directory)

+                                   directory=directory)

  

          # finalize import

          self.get_build(token)
@@ -5954,7 +6134,6 @@ 

  

          return self.buildinfo

  

- 

      def get_metadata(self, metadata, directory):

          """Get the metadata from the args"""

  
@@ -5967,7 +6146,7 @@ 

                  raise koji.GenericError("Invalid metadata, cannot encode: %r" % metadata)

              return metadata

          if metadata is None:

-             #default to looking for uploaded file

+             # default to looking for uploaded file

              metadata = 'metadata.json'

          if not isinstance(metadata, six.string_types):

              raise koji.GenericError("Invalid metadata value: %r" % metadata)
@@ -5983,7 +6162,6 @@ 

          self.metadata = parse_json(metadata, desc='metadata')

          return self.metadata

  

- 

      def assert_cg_access(self):

          """Check that user has access for all referenced content generators"""

  
@@ -5997,7 +6175,6 @@ 

              assert_cg(cg_id)

          self.cgs = cgs

  

- 

      def assert_policy(self):

          policy_data = {

              'package': self.buildinfo['name'],
@@ -6008,25 +6185,23 @@ 

          }

          assert_policy('cg_import', policy_data)

  

- 

      def set_volume(self):

          """Use policy to determine what the volume should be"""

          # we have to be careful and provide sufficient data

          policy_data = {

-                 'package': self.buildinfo['name'],

-                 'version': self.buildinfo['version'],

-                 'release': self.buildinfo['release'],

-                 'source': self.buildinfo['source'],

-                 'cg_list': list(self.cgs),

-                 'import': True,

-                 'import_type': 'cg',

-                 }

+             'package': self.buildinfo['name'],

+             'version': self.buildinfo['version'],

+             'release': self.buildinfo['release'],

+             'source': self.buildinfo['source'],

+             'cg_list': list(self.cgs),

+             'import': True,

+             'import_type': 'cg',

+         }

          vol = check_volume_policy(policy_data, strict=False)

          if vol:

              self.buildinfo['volume_id'] = vol['id']

              self.buildinfo['volume_name'] = vol['name']

  

- 

      def check_build_dir(self, delete=False):

          """Check that the import directory does not already exist"""

          path = koji.pathinfo.build(self.buildinfo)
@@ -6037,13 +6212,12 @@ 

              else:

                  raise koji.GenericError("Destination directory already exists: %s" % path)

  

- 

- 

      def prep_build(self, token=None):

          metadata = self.metadata

          if metadata['build'].get('build_id'):

              if len(self.cgs) != 1:

-                 raise koji.GenericError("Reserved builds can handle only single content generator.")

+                 raise koji.GenericError(

+                     "Reserved builds can handle only single content generator.")

              cg_id = list(self.cgs)[0]

              build_id = metadata['build']['build_id']

              buildinfo = get_build(build_id, strict=True)
@@ -6112,7 +6286,6 @@ 

          self.typeinfo = typeinfo

          return buildinfo

  

- 

      def get_build(self, token=None):

          if token:

              # token and reservation were already checked in prep_build
@@ -6145,7 +6318,6 @@ 

          self.buildinfo = buildinfo

          return buildinfo

  

- 

      def update_build(self):

          """Update a reserved build"""

          # sanity checks performed by prep_build
@@ -6159,7 +6331,8 @@ 

          source = self.buildinfo.get('source')

          st_complete = koji.BUILD_STATES['COMPLETE']

          st_old = old_info['state']

-         koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old, new=st_complete, info=old_info)

+         koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old,

+                                   new=st_complete, info=old_info)

          update = UpdateProcessor('build', clauses=['id=%(build_id)s'], values=self.buildinfo)

          update.set(state=st_complete, extra=extra, owner=owner, source=source)

          if self.buildinfo.get('volume_id'):
@@ -6169,11 +6342,11 @@ 

          update.execute()

          buildinfo = get_build(build_id, strict=True)

          clear_reservation(build_id)

-         koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old, new=st_complete, info=buildinfo)

+         koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old,

+                                   new=st_complete, info=buildinfo)

  

          return buildinfo

  

- 

      def import_metadata(self):

          """Import the raw metadata"""

  
@@ -6187,7 +6360,6 @@ 

          with open(path, 'w') as fo:

              fo.write(self.raw_metadata)

  

- 

      def prep_brs(self):

          metadata = self.metadata

          br_used = set([f['buildroot_id'] for f in metadata['output']])
@@ -6201,7 +6373,6 @@ 

              br_idx[brfakeid] = self.prep_buildroot(brdata)

          self.br_prep = br_idx

  

- 

      def import_brs(self):

          brmap = {}

          for brfakeid in self.br_prep:
@@ -6209,7 +6380,6 @@ 

              brmap[brfakeid] = self.import_buildroot(entry)

          self.brmap = brmap

  

- 

      def prep_buildroot(self, brdata):

          ret = {}

          brinfo = {
@@ -6230,7 +6400,6 @@ 

          }

          return ret

  

- 

      def import_buildroot(self, entry):

          """Import the prepared buildroot data"""

  
@@ -6238,7 +6407,7 @@ 

          br = BuildRoot()

          br.cg_new(entry['brinfo'])

  

-         #buildroot components

+         # buildroot components

          br.setList(entry['rpmlist'])

          br.updateArchiveList(entry['archives'])

  
@@ -6247,7 +6416,6 @@ 

  

          return br

  

- 

      def match_components(self, components):

          rpms = []

          files = []
@@ -6268,7 +6436,6 @@ 

                  raise koji.GenericError("Unknown component type: %(type)s" % comp)

          return rpms, files

  

- 

      def match_rpm(self, comp):

          # TODO: do we allow inclusion of external rpms?

          if 'location' in comp:
@@ -6284,13 +6451,12 @@ 

          if rinfo['payloadhash'] != comp['sigmd5']:

              # XXX - this is a temporary workaround until we can better track external refs

              logger.warning("IGNORING rpm component (md5 mismatch): %r", comp)

-             #nvr = "%(name)s-%(version)s-%(release)s" % rinfo

-             #raise koji.GenericError("md5sum mismatch for %s: %s != %s"

+             # nvr = "%(name)s-%(version)s-%(release)s" % rinfo

+             # raise koji.GenericError("md5sum mismatch for %s: %s != %s"

              #            % (nvr, comp['sigmd5'], rinfo['payloadhash']))

          # TODO - should we check the signature field?

          return rinfo

  

- 

      def match_file(self, comp):

          # hmm, how do we look up archives?

          # updateMavenBuildRootList does seriously wild stuff
@@ -6305,15 +6471,17 @@ 

                  continue

              if archive['checksum'] == comp['checksum']:

                  return archive

-         #else

-         logger.error("Failed to match archive %(filename)s (size %(filesize)s, sum %(checksum)s", comp)

+         # else

+         logger.error("Failed to match archive %(filename)s (size %(filesize)s, sum %(checksum)s",

+                      comp)

          if type_mismatches:

              logger.error("Match failed with %i type mismatches", type_mismatches)

          # TODO: allow external archives

          # XXX - this is a temporary workaround until we can better track external refs

          logger.warning("IGNORING unmatched archive: %r", comp)

          return None

-         #raise koji.GenericError("No match: %(filename)s (size %(filesize)s, sum %(checksum)s" % comp)

+         # raise koji.GenericError("No match: %(filename)s (size %(filesize)s, sum %(checksum)s" %

+         #                         comp)

  

      def match_kojifile(self, comp):

          """Look up the file by archive id and sanity check the other data"""
@@ -6324,19 +6492,19 @@ 

          for key in ['nvr', 'filename']:

              if key not in comp:

                  raise koji.GenericError('%s field missing for component, '

-                         'archive_id=%s' % (key, archive['id']))

+                                         'archive_id=%s' % (key, archive['id']))

          expected = {

-                 'nvr': build['nvr'],

-                 'filename': archive['filename'],

-                 'filesize': int(archive['size']),

-                 'checksum': archive['checksum'],

-                 'checksum_type': koji.CHECKSUM_TYPES[archive['checksum_type']],

-                 }

+             'nvr': build['nvr'],

+             'filename': archive['filename'],

+             'filesize': int(archive['size']),

+             'checksum': archive['checksum'],

+             'checksum_type': koji.CHECKSUM_TYPES[archive['checksum_type']],

+         }

          for key in expected:

              if key in comp and expected[key] != comp[key]:

                  raise koji.GenericError('Component field %s does not match for '

-                     'archive_id=%s: %s != %s' % (key, archive['id'],

-                         expected[key], comp[key]))

+                                         'archive_id=%s: %s != %s' % (key, archive['id'],

+                                                                      expected[key], comp[key]))

          return archive

  

      def prep_outputs(self):
@@ -6347,13 +6515,15 @@ 

              if fileinfo.get('metadata_only', False):

                  self.metadata_only = True

              workdir = koji.pathinfo.work()

-             path = joinpath(workdir, self.directory, fileinfo.get('relpath', ''), fileinfo['filename'])

+             path = joinpath(workdir, self.directory, fileinfo.get('relpath', ''),

+                             fileinfo['filename'])

              fileinfo['hub.path'] = path

  

              filesize = os.path.getsize(path)

              if filesize != fileinfo['filesize']:

-                 raise koji.GenericError("File size %s for %s (expected %s) doesn't match. Corrupted upload?" %

-                         (filesize, fileinfo['filename'], fileinfo['filesize']))

+                 raise koji.GenericError(

+                     "File size %s for %s (expected %s) doesn't match. Corrupted upload?" %

+                     (filesize, fileinfo['filename'], fileinfo['filesize']))

  

              # checksum

              if fileinfo['checksum_type'] != 'md5':
@@ -6369,11 +6539,13 @@ 

                      m.update(contents)

                  if fileinfo['checksum'] != m.hexdigest():

                      raise koji.GenericError("File checksum mismatch for %s: %s != %s" %

-                             (fileinfo['filename'], fileinfo['checksum'], m.hexdigest()))

+                                             (fileinfo['filename'], fileinfo['checksum'],

+                                              m.hexdigest()))

              fileinfo['hub.checked_md5'] = True

  

              if fileinfo['buildroot_id'] not in self.br_prep:

-                 raise koji.GenericError("Missing buildroot metadata for id %(buildroot_id)r" % fileinfo)

+                 raise koji.GenericError("Missing buildroot metadata for id %(buildroot_id)r" %

+                                         fileinfo)

              if fileinfo['type'] not in ['rpm', 'log']:

                  self.prep_archive(fileinfo)

              if fileinfo['type'] == 'rpm':
@@ -6381,7 +6553,6 @@ 

              outputs.append(fileinfo)

          self.prepped_outputs = outputs

  

- 

      def import_outputs(self):

          for fileinfo in self.prepped_outputs:

              brinfo = self.brmap.get(fileinfo['buildroot_id'])
@@ -6397,7 +6568,6 @@ 

                  self.import_archive(self.buildinfo, brinfo, fileinfo)

          ensure_volume_symlink(self.buildinfo)

  

- 

      def prep_archive(self, fileinfo):

          # determine archive import type

          extra = fileinfo.get('extra', {})
@@ -6409,7 +6579,7 @@ 

                  continue

              if btype is not None:

                  raise koji.GenericError("Output file has multiple types: "

-                     "%(filename)s" % fileinfo)

+                                         "%(filename)s" % fileinfo)

              btype = key

              type_info = extra[key]

          for key in extra.get('typeinfo', {}):
@@ -6417,7 +6587,7 @@ 

                  raise koji.GenericError("Duplicate typeinfo for: %r" % btype)

              elif btype is not None:

                  raise koji.GenericError("Output file has multiple types: "

-                     "%(filename)s" % fileinfo)

+                                         "%(filename)s" % fileinfo)

              btype = key

              type_info = extra['typeinfo'][key]

  
@@ -6426,7 +6596,7 @@ 

  

          if btype not in self.typeinfo:

              raise koji.GenericError('Output type %s not listed in build '

-                         'types' % btype)

+                                     'types' % btype)

  

          fileinfo['hub.btype'] = btype

          fileinfo['hub.type_info'] = type_info
@@ -6434,7 +6604,7 @@ 

          if 'components' in fileinfo:

              if btype in ('maven', 'win'):

                  raise koji.GenericError("Component list not allowed for "

-                         "archives of type %s" % btype)

+                                         "archives of type %s" % btype)

              # for new types, we trust the metadata

              components = fileinfo['components']

              rpmlist, archives = self.match_components(components)
@@ -6442,7 +6612,6 @@ 

              fileinfo['hub.rpmlist'] = rpmlist

              fileinfo['hub.archives'] = archives

  

- 

      def import_rpm(self, buildinfo, brinfo, fileinfo):

          if fileinfo.get('metadata_only', False):

              raise koji.GenericError('Metadata-only imports are not supported for rpms')
@@ -6452,7 +6621,6 @@ 

          import_rpm_file(fn, buildinfo, rpminfo)

          add_rpm_sig(rpminfo['id'], koji.rip_rpm_sighdr(fn))

  

- 

      def import_log(self, buildinfo, fileinfo):

          if fileinfo.get('metadata_only', False):

              # logs are not currently tracked, so this is a no op
@@ -6461,7 +6629,6 @@ 

          fn = fileinfo['hub.path']

          import_build_log(fn, buildinfo, subdir=None)

  

- 

      def import_archive(self, buildinfo, brinfo, fileinfo):

          fn = fileinfo['hub.path']

          btype = fileinfo['hub.btype']
@@ -6472,7 +6639,6 @@ 

          if 'components' in fileinfo:

              self.import_components(archiveinfo['id'], fileinfo)

  

- 

      def import_components(self, archive_id, fileinfo):

          rpmlist = fileinfo['hub.rpmlist']

          archives = fileinfo['hub.archives']
@@ -6520,7 +6686,7 @@ 

          if field not in rpminfo:

              raise koji.GenericError("%s field missing: %r" % (field, rpminfo))

          if not isinstance(rpminfo[field], allowed):

-             #this will catch unwanted NULLs

+             # this will catch unwanted NULLs

              raise koji.GenericError("Invalid value for %s: %r" % (field, rpminfo[field]))

      # strip extra fields

      rpminfo = dslice(rpminfo, [x[0] for x in dtypes])
@@ -6536,8 +6702,8 @@ 

              if strict:

                  raise koji.GenericError("external rpm already exists: %s" % disp)

              elif data['payloadhash'] != previous['payloadhash']:

-                 raise koji.GenericError("hash changed for external rpm: %s (%s -> %s)" \

-                         % (disp, previous['payloadhash'], data['payloadhash']))

+                 raise koji.GenericError("hash changed for external rpm: %s (%s -> %s)"

+                                         % (disp, previous['payloadhash'], data['payloadhash']))

              else:

                  return previous

  
@@ -6566,6 +6732,7 @@ 

  

      return get_rpm(data['id'])

  

+ 

  def import_build_log(fn, buildinfo, subdir=None):

      """Move a logfile related to a build to the right place"""

      logdir = koji.pathinfo.build_logs(buildinfo)
@@ -6579,6 +6746,7 @@ 

          raise koji.GenericError("Error importing build log. %s is not a regular file." % fn)

      move_and_symlink(fn, final_path)

  

+ 

  def import_rpm_file(fn, buildinfo, rpminfo):

      """Move the rpm file into the proper place

  
@@ -6587,6 +6755,7 @@ 

      final_path = "%s/%s" % (koji.pathinfo.build(buildinfo), koji.pathinfo.rpm(rpminfo))

      _import_archive_file(fn, os.path.dirname(final_path))

  

+ 

  def _import_wrapper(task_id, build_info, rpm_results):

      """Helper function to import wrapper rpms for a Maven build"""

      rpm_buildroot_id = rpm_results['buildroot_id']
@@ -6603,6 +6772,7 @@ 

          import_build_log(joinpath(rpm_task_dir, log),

                           build_info, subdir='noarch')

  

+ 

  def merge_scratch(task_id):

      """Import rpms from a scratch build into an existing build, retaining

      buildroot metadata and build logs."""
@@ -6633,8 +6803,8 @@ 

                      srpm = srpm_name

                  else:

                      if srpm != srpm_name:

-                         raise koji.ImportError('task srpm names do not match: %s, %s' % \

-                               (srpm, srpm_name))

+                         raise koji.ImportError('task srpm names do not match: %s, %s' %

+                                                (srpm, srpm_name))

              elif output.endswith('.noarch.rpm'):

                  continue

              elif output.endswith('.rpm'):
@@ -6642,8 +6812,8 @@ 

                  if 'arch' not in info:

                      info['arch'] = rpminfo['arch']

                  elif info['arch'] != rpminfo['arch']:

-                     raise koji.ImportError('multiple arches generated by task %s: %s, %s' % \

-                           (child['id'], info['arch'], rpminfo['arch']))

+                     raise koji.ImportError('multiple arches generated by task %s: %s, %s' %

+                                            (child['id'], info['arch'], rpminfo['arch']))

                  info['rpms'].append(output)

              elif output.endswith('.log'):

                  info['logs'].append(output)
@@ -6664,8 +6834,8 @@ 

      build_nvr = koji.parse_NVRA(srpm)

      build = get_build(build_nvr)

      if not build:

-         raise koji.ImportError('no such build: %(name)s-%(version)s-%(release)s' % \

-               build_nvr)

+         raise koji.ImportError('no such build: %(name)s-%(version)s-%(release)s' %

+                                build_nvr)

      if build['state'] != koji.BUILD_STATES['COMPLETE']:

          raise koji.ImportError('%s did not complete successfully' % build['nvr'])

      if not build['task_id']:
@@ -6677,15 +6847,15 @@ 

  

      # compare the task and build and make sure they are compatible with importing

      if task_info['request'][0] != build_task_info['request'][0]:

-         raise koji.ImportError('SCM URLs for the task and build do not match: %s, %s' % \

-               (task_info['request'][0], build_task_info['request'][0]))

+         raise koji.ImportError('SCM URLs for the task and build do not match: %s, %s' %

+                                (task_info['request'][0], build_task_info['request'][0]))

      build_arches = set()

      for rpminfo in list_rpms(buildID=build['id']):

          if rpminfo['arch'] == 'src':

              build_srpm = '%s.src.rpm' % rpminfo['nvr']

              if srpm != build_srpm:

-                 raise koji.ImportError('task and build srpm names do not match: %s, %s' % \

-                       (srpm, build_srpm))

+                 raise koji.ImportError('task and build srpm names do not match: %s, %s' %

+                                        (srpm, build_srpm))

          elif rpminfo['arch'] == 'noarch':

              continue

          else:
@@ -6695,8 +6865,8 @@ 

      task_arches = set([t['arch'] for t in tasks.values()])

      overlapping_arches = task_arches.intersection(build_arches)

      if overlapping_arches:

-         raise koji.ImportError('task %s and %s produce rpms with the same arches: %s' % \

-               (task_info['id'], build['nvr'], ', '.join(overlapping_arches)))

+         raise koji.ImportError('task %s and %s produce rpms with the same arches: %s' %

+                                (task_info['id'], build['nvr'], ', '.join(overlapping_arches)))

  

      # everything looks good, do the import

      for task_id, info in tasks.items():
@@ -6716,22 +6886,26 @@ 

  

      return build['id']

  

+ 

  def get_archive_types():

      """Return a list of all supported archive types."""

      select = """SELECT id, name, description, extensions FROM archivetypes

      ORDER BY id"""

      return _multiRow(select, {}, ('id', 'name', 'description', 'extensions'))

  

+ 

  def _get_archive_type_by_name(name, strict=True):

      select = """SELECT id, name, description, extensions FROM archivetypes

      WHERE name = %(name)s"""

      return _singleRow(select, locals(), ('id', 'name', 'description', 'extensions'), strict)

  

+ 

  def _get_archive_type_by_id(type_id, strict=False):

      select = """SELECT id, name, description, extensions FROM archivetypes

      WHERE id = %(type_id)i"""

      return _singleRow(select, locals(), ('id', 'name', 'description', 'extensions'), strict)

  

+ 

  def get_archive_type(filename=None, type_name=None, type_id=None, strict=False):

      """

      Get the archive type for the given filename, type_name, or type_id.
@@ -6748,11 +6922,11 @@ 

  

      parts = filename.split('.')

      query = QueryProcessor(

-             tables=['archivetypes'],

-             columns=['id', 'name', 'description', 'extensions'],

-             clauses=['extensions ~* %(pattern)s'],

-             )

-     for start in range(len(parts)-1, -1, -1):

+         tables=['archivetypes'],

+         columns=['id', 'name', 'description', 'extensions'],

+         clauses=['extensions ~* %(pattern)s'],

+     )

+     for start in range(len(parts) - 1, -1, -1):

          ext = '.'.join(parts[start:])

          query.values['pattern'] = r'(\s|^)%s(\s|$)' % ext

          results = query.execute()
@@ -6762,12 +6936,13 @@ 

          elif len(results) > 1:

              # this should never happen, and is a misconfiguration in the database

              raise koji.GenericError('multiple matches for file extension: %s' % ext)

-     #otherwise

+     # otherwise

      if strict:

          raise koji.GenericError('unsupported file extension: %s' % ext)

      else:

          return None

  

+ 

  def add_archive_type(name, description, extensions):

      """

      Add new archive type.
@@ -6783,7 +6958,7 @@ 

      data = {'name': name,

              'description': description,

              'extensions': extensions,

-     }

+             }

      if get_archive_type(type_name=name):

          raise koji.GenericError("archivetype %s already exists" % name)

      # No invalid or duplicate extensions
@@ -6812,8 +6987,8 @@ 

          # already exists, verify that it matches

          for field in ('group_id', 'artifact_id', 'version'):

              if current_maven_info[field] != maven_info[field]:

-                 raise koji.BuildError('%s mismatch (current: %s, new: %s)' % \

-                     (field, current_maven_info[field], maven_info[field]))

+                 raise koji.BuildError('%s mismatch (current: %s, new: %s)' %

+                                       (field, current_maven_info[field], maven_info[field]))

      else:

          maven_info['build_id'] = build['id']

          data = dslice(maven_info, ['build_id', 'group_id', 'artifact_id', 'version'])
@@ -6822,6 +6997,7 @@ 

          # also add build_types entry

          new_typed_build(build, 'maven')

  

+ 

  def new_win_build(build_info, win_info):

      """

      Add Windows metadata to an existing build.
@@ -6843,6 +7019,7 @@ 

          # also add build_types entry

          new_typed_build(build_info, 'win')

  

+ 

  def new_image_build(build_info):

      """

      Added Image metadata to an existing build. This is just the buildid so that
@@ -6866,7 +7043,7 @@ 

  def new_typed_build(build_info, btype):

      """Mark build as a given btype"""

  

-     btype_id=lookup_name('btype', btype, strict=True)['id']

+     btype_id = lookup_name('btype', btype, strict=True)['id']

      query = QueryProcessor(tables=('build_types',), columns=('build_id',),

                             clauses=('build_id = %(build_id)i',

                                      'btype_id = %(btype_id)i',),
@@ -6897,7 +7074,8 @@ 

      be any non-rpm filetype supported by Koji.

  

      filepath: full path to the archive file

-     buildinfo: dict of information about the build to associate the archive with (as returned by getBuild())

+     buildinfo: dict of information about the build to associate the archive with

+                (as returned by getBuild())

      type: type of the archive being imported.  Currently supported archive types: maven, win, image

      typeInfo: dict of type-specific information

      buildroot_id: the id of the buildroot the archive was built in (may be None)
@@ -6947,14 +7125,14 @@ 

              # check against metadata

              if archiveinfo['size'] != fileinfo['filesize']:

                  raise koji.GenericError("File size mismatch for %s: %s != %s" %

-                         (filename, archiveinfo['size'], fileinfo['filesize']))

+                                         (filename, archiveinfo['size'], fileinfo['filesize']))

              if fileinfo['checksum_type'] != 'md5':

                  # XXX

                  # until we change the way we handle checksums, we have to limit this to md5

                  raise koji.GenericError("Unsupported checksum type: %(checksum_type)s" % fileinfo)

              if archiveinfo['checksum'] != fileinfo['checksum']:

                  raise koji.GenericError("File checksum mismatch for %s: %s != %s" %

-                         (filename, archiveinfo['checksum'], fileinfo['checksum']))

+                                         (filename, archiveinfo['checksum'], fileinfo['checksum']))

      archivetype = get_archive_type(filename, strict=True)

      archiveinfo['type_id'] = archivetype['id']

      btype = lookup_name('btype', type, strict=False)
@@ -6986,14 +7164,16 @@ 

              pom_maveninfo = koji.pom_to_maven_info(pom_info)

              # sanity check: Maven info from pom must match the user-supplied typeInfo

              if koji.mavenLabel(pom_maveninfo) != koji.mavenLabel(typeInfo):

-                 raise koji.BuildError('Maven info from .pom file (%s) does not match user-supplied typeInfo (%s)' % \

+                 raise koji.BuildError(

+                     'Maven info from .pom file (%s) does not match user-supplied typeInfo (%s)' %

                      (koji.mavenLabel(pom_maveninfo), koji.mavenLabel(typeInfo)))

              # sanity check: the filename of the pom file must match <artifactId>-<version>.pom

              if filename != '%(artifact_id)s-%(version)s.pom' % typeInfo:

-                 raise koji.BuildError('Maven info (%s) is not consistent with pom filename (%s)' % \

-                       (koji.mavenLabel(typeInfo), filename))

+                 raise koji.BuildError('Maven info (%s) is not consistent with pom filename (%s)' %

+                                       (koji.mavenLabel(typeInfo), filename))

  

-         insert = InsertProcessor('maven_archives', data=dslice(typeInfo, ('group_id', 'artifact_id', 'version')))

+         insert = InsertProcessor('maven_archives',

+                                  data=dslice(typeInfo, ('group_id', 'artifact_id', 'version')))

          insert.set(archive_id=archive_id)

          insert.execute()

  
@@ -7056,9 +7236,11 @@ 

      if os.path.exists(final_path):

          raise koji.GenericError("Error importing archive file, %s already exists" % final_path)

      if os.path.islink(filepath) or not os.path.isfile(filepath):

-         raise koji.GenericError("Error importing archive file, %s is not a regular file" % filepath)

+         raise koji.GenericError("Error importing archive file, %s is not a regular file" %

+                                 filepath)

      move_and_symlink(filepath, final_path, create_dir=True)

  

+ 

  def _generate_maven_metadata(mavendir):

      """

      Generate md5 and sha1 sums for every file in mavendir, if it doesn't already exist.
@@ -7083,13 +7265,14 @@ 

                  with open('%s/%s' % (mavendir, sumfile), 'w') as sumobj:

                      sumobj.write(sum.hexdigest())

  

+ 

  def add_rpm_sig(an_rpm, sighdr):

      """Store a signature header for an rpm"""

-     #calling function should perform permission checks, if applicable

+     # calling function should perform permission checks, if applicable

      rinfo = get_rpm(an_rpm, strict=True)

      if rinfo['external_repo_id']:

-         raise koji.GenericError("Not an internal rpm: %s (from %s)" \

-                 % (an_rpm, rinfo['external_repo_name']))

+         raise koji.GenericError("Not an internal rpm: %s (from %s)"

+                                 % (an_rpm, rinfo['external_repo_name']))

      binfo = get_build(rinfo['build_id'])

      builddir = koji.pathinfo.build(binfo)

      if not os.path.isdir(builddir):
@@ -7116,7 +7299,7 @@ 

              raise koji.GenericError("wrong md5 for %s: %s" % (nvra, sigmd5))

      if not sigkey:

          sigkey = ''

-         #we use the sigkey='' to represent unsigned in the db (so that uniqueness works)

+         # we use the sigkey='' to represent unsigned in the db (so that uniqueness works)

      else:

          sigkey = koji.get_sigpacket_key_id(sigkey)

      sighash = hashlib.md5(sighdr).hexdigest()
@@ -7125,7 +7308,7 @@ 

      q = """SELECT sighash FROM rpmsigs WHERE rpm_id=%(rpm_id)i AND sigkey=%(sigkey)s"""

      rows = _fetchMulti(q, locals())

      if rows:

-         #TODO[?] - if sighash is the same, handle more gracefully

+         # TODO[?] - if sighash is the same, handle more gracefully

          nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rinfo

          raise koji.GenericError("Signature already exists for package %s, key %s" % (nvra, sigkey))

      koji.plugin.run_callbacks('preRPMSign', sigkey=sigkey, sighash=sighash, build=binfo, rpm=rinfo)
@@ -7137,7 +7320,9 @@ 

      koji.ensuredir(os.path.dirname(sigpath))

      with open(sigpath, 'wb') as fo:

          fo.write(sighdr)

-     koji.plugin.run_callbacks('postRPMSign', sigkey=sigkey, sighash=sighash, build=binfo, rpm=rinfo)

+     koji.plugin.run_callbacks('postRPMSign',

+                               sigkey=sigkey, sighash=sighash, build=binfo, rpm=rinfo)

+ 

  

  def _scan_sighdr(sighdr, fn):

      """Splices sighdr with other headers from fn and queries (no payload)"""
@@ -7146,24 +7331,24 @@ 

          raise koji.GenericError("No such path: %s" % fn)

      if not os.path.isfile(fn):

          raise koji.GenericError("Not a regular file: %s" % fn)

-     #XXX should probably add an option to splice_rpm_sighdr to handle this instead

+     # XXX should probably add an option to splice_rpm_sighdr to handle this instead

      sig_start, sigsize = koji.find_rpm_sighdr(fn)

      hdr_start = sig_start + sigsize

      hdrsize = koji.rpm_hdr_size(fn, hdr_start)

      inp = open(fn, 'rb')

      outp = tempfile.TemporaryFile(mode='w+b')

-     #before signature

+     # before signature

      outp.write(inp.read(sig_start))

-     #signature

+     # signature

      outp.write(sighdr)

      inp.seek(sigsize, 1)

-     #main header

+     # main header

      outp.write(inp.read(hdrsize))

      inp.close()

      outp.seek(0, 0)

      ts = rpm.TransactionSet()

-     ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)

-     #(we have no payload, so verifies would fail otherwise)

+     ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS)

+     # (we have no payload, so verifies would fail otherwise)

      hdr = ts.hdrFromFdno(outp.fileno())

      outp.close()

      sig = koji.get_header_field(hdr, 'siggpg')
@@ -7171,8 +7356,9 @@ 

          sig = koji.get_header_field(hdr, 'sigpgp')

      return koji.get_header_field(hdr, 'sigmd5'), sig

  

+ 

  def check_rpm_sig(an_rpm, sigkey, sighdr):

-     #verify that the provided signature header matches the key and rpm

+     # verify that the provided signature header matches the key and rpm

      rinfo = get_rpm(an_rpm, strict=True)

      binfo = get_build(rinfo['build_id'])

      builddir = koji.pathinfo.build(binfo)
@@ -7186,13 +7372,13 @@ 

      try:

          koji.splice_rpm_sighdr(sighdr, rpm_path, temp)

          ts = rpm.TransactionSet()

-         ts.setVSFlags(0)  #full verify

+         ts.setVSFlags(0)  # full verify

          with open(temp, 'rb') as fo:

              hdr = ts.hdrFromFdno(fo.fileno())

-     except:

+     except Exception:

          try:

              os.unlink(temp)

-         except:

+         except Exception:

              pass

          raise

      raw_key = koji.get_header_field(hdr, 'siggpg')
@@ -7203,12 +7389,11 @@ 

      else:

          found_key = koji.get_sigpacket_key_id(raw_key)

      if sigkey != found_key:

-         raise koji.GenericError("Signature key mismatch: got %s, expected %s" \

-                               % (found_key, sigkey))

+         raise koji.GenericError("Signature key mismatch: got %s, expected %s"

+                                 % (found_key, sigkey))

      os.unlink(temp)

  

  

- 

  def query_rpm_sigs(rpm_id=None, sigkey=None, queryOpts=None):

      fields = ('rpm_id', 'sigkey', 'sighash')

      clauses = []
@@ -7220,12 +7405,13 @@ 

                             values=locals(), opts=queryOpts)

      return query.execute()

  

+ 

  def write_signed_rpm(an_rpm, sigkey, force=False):

      """Write a signed copy of the rpm"""

      rinfo = get_rpm(an_rpm, strict=True)

      if rinfo['external_repo_id']:

-         raise koji.GenericError("Not an internal rpm: %s (from %s)" \

-                 % (an_rpm, rinfo['external_repo_name']))

+         raise koji.GenericError("Not an internal rpm: %s (from %s)"

+                                 % (an_rpm, rinfo['external_repo_name']))

      binfo = get_build(rinfo['build_id'])

      nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rinfo

      builddir = koji.pathinfo.build(binfo)
@@ -7234,7 +7420,7 @@ 

          raise koji.GenericError("No such path: %s" % rpm_path)

      if not os.path.isfile(rpm_path):

          raise koji.GenericError("Not a regular file: %s" % rpm_path)

-     #make sure we have it in the db

+     # make sure we have it in the db

      rpm_id = rinfo['id']

      q = """SELECT sighash FROM rpmsigs WHERE rpm_id=%(rpm_id)i AND sigkey=%(sigkey)s"""

      row = _fetchSingle(q, locals())
@@ -7244,7 +7430,7 @@ 

      signedpath = "%s/%s" % (builddir, koji.pathinfo.signed(rinfo, sigkey))

      if os.path.exists(signedpath):

          if not force:

-             #already present

+             # already present

              return

          else:

              os.unlink(signedpath)
@@ -7289,31 +7475,33 @@ 

      cg: only relating to a content generator

      """

      common_fields = {

-         #fields:aliases common to all versioned tables

+         # fields:aliases common to all versioned tables

          'active': 'active',

          'create_event': 'create_event',

          'revoke_event': 'revoke_event',

          'creator_id': 'creator_id',

          'revoker_id': 'revoker_id',

-         }

+     }

      common_joins = [

          "events AS ev1 ON ev1.id = create_event",

          "LEFT OUTER JOIN events AS ev2 ON ev2.id = revoke_event",

          "users AS creator ON creator.id = creator_id",

          "LEFT OUTER JOIN users AS revoker ON revoker.id = revoker_id",

-         ]

+     ]

      common_joined_fields = {

          'creator.name': 'creator_name',

          'revoker.name': 'revoker_name',

          'EXTRACT(EPOCH FROM ev1.time) AS create_ts': 'create_ts',

          'EXTRACT(EPOCH FROM ev2.time) AS revoke_ts': 'revoke_ts',

-         }

+     }

      table_fields = {

          'user_perms': ['user_id', 'perm_id'],

          'user_groups': ['user_id', 'group_id'],

          'cg_users': ['user_id', 'cg_id'],

-         'tag_inheritance': ['tag_id', 'parent_id', 'priority', 'maxdepth', 'intransitive', 'noconfig', 'pkg_filter'],

-         'tag_config': ['tag_id', 'arches', 'perm_id', 'locked', 'maven_support', 'maven_include_all'],

+         'tag_inheritance': ['tag_id', 'parent_id', 'priority', 'maxdepth', 'intransitive',

+                             'noconfig', 'pkg_filter'],

+         'tag_config': ['tag_id', 'arches', 'perm_id', 'locked', 'maven_support',

+                        'maven_include_all'],

          'tag_extra': ['tag_id', 'key', 'value'],

          'build_target_config': ['build_target_id', 'build_tag', 'dest_tag'],

          'external_repo_config': ['external_repo_id', 'url'],
@@ -7323,18 +7511,19 @@ 

          'tag_listing': ['build_id', 'tag_id'],

          'tag_packages': ['package_id', 'tag_id', 'blocked', 'extra_arches'],

          'tag_package_owners': ['package_id', 'tag_id', 'owner'],

-         'group_config': ['group_id', 'tag_id', 'blocked', 'exported', 'display_name', 'is_default', 'uservisible',

-                             'description', 'langonly', 'biarchonly'],

+         'group_config': ['group_id', 'tag_id', 'blocked', 'exported', 'display_name', 'is_default',

+                          'uservisible', 'description', 'langonly', 'biarchonly'],

          'group_req_listing': ['group_id', 'tag_id', 'req_id', 'blocked', 'type', 'is_metapkg'],

-         'group_package_listing': ['group_id', 'tag_id', 'package', 'blocked', 'type', 'basearchonly', 'requires'],

-         }

+         'group_package_listing': ['group_id', 'tag_id', 'package', 'blocked', 'type',

+                                   'basearchonly', 'requires'],

+     }

      name_joins = {

-         #joins triggered by table fields for name lookup

-         #field : [table, join-alias, alias]

+         # joins triggered by table fields for name lookup

+         # field : [table, join-alias, alias]

          'user_id': ['users', 'users', 'user'],

          'perm_id': ['permissions', 'permission'],

          'cg_id': ['content_generator'],

-         #group_id is overloaded (special case below)

+         # group_id is overloaded (special case below)

          'tag_id': ['tag'],

          'host_id': ['host'],

          'channel_id': ['channels'],
@@ -7347,7 +7536,7 @@ 

          'package_id': ['package'],

          'owner': ['users', 'owner'],

          'req_id': ['groups', 'req'],

-         }

+     }

      if tables is None:

          tables = sorted(table_fields.keys())

      else:
@@ -7374,18 +7563,19 @@ 

                  joined[tbl] = join_as

                  fullname = "%s.name" % join_as

                  if len(name_join) > 2:

-                     #apply alias

+                     # apply alias

                      fields[fullname] = "%s.name" % name_join[2]

                  else:

                      fields[fullname] = fullname

                  if join_as == tbl:

                      joins.append('LEFT OUTER JOIN %s ON %s = %s.id' % (tbl, field, tbl))

                  else:

-                     joins.append('LEFT OUTER JOIN %s AS %s ON %s = %s.id' % (tbl, join_as, field, join_as))

+                     joins.append('LEFT OUTER JOIN %s AS %s ON %s = %s.id' %

+                                  (tbl, join_as, field, join_as))

              elif field == 'build_id':

-                 #special case

+                 # special case

                  fields.update({

-                     'package.name': 'name', #XXX?

+                     'package.name': 'name',  # XXX?

                      'build.version': 'version',

                      'build.release': 'release',

                      'build.epoch': 'epoch',
@@ -7417,7 +7607,7 @@ 

                      break

                  data['tag_id'] = get_tag_id(value, strict=True)

                  if table == 'tag_inheritance':

-                     #special cased because there are two tag columns

+                     # special cased because there are two tag columns

                      clauses.append("tag_id = %(tag_id)i OR parent_id = %(tag_id)i")

                  else:

                      clauses.append("%s.id = %%(tag_id)i" % joined['tag'])
@@ -7504,7 +7694,8 @@ 

                  clauses.append('ev1.time > %(after)s OR ev2.time > %(after)s')

                  fields['ev1.time > %(after)s'] = '_created_after'

                  fields['ev2.time > %(after)s'] = '_revoked_after'

-                 #clauses.append('EXTRACT(EPOCH FROM ev1.time) > %(after)s OR EXTRACT(EPOCH FROM ev2.time) > %(after)s')

+                 # clauses.append('EXTRACT(EPOCH FROM ev1.time) > %(after)s OR '

+                 #                'EXTRACT(EPOCH FROM ev2.time) > %(after)s')

              elif arg == 'afterEvent':

                  data['afterEvent'] = value

                  c_test = '%s.create_event > %%(afterEvent)i' % table
@@ -7517,7 +7708,8 @@ 

                      value = datetime.datetime.fromtimestamp(value).isoformat(' ')

                  data['before'] = value

                  clauses.append('ev1.time < %(before)s OR ev2.time < %(before)s')

-                 #clauses.append('EXTRACT(EPOCH FROM ev1.time) < %(before)s OR EXTRACT(EPOCH FROM ev2.time) < %(before)s')

+                 # clauses.append('EXTRACT(EPOCH FROM ev1.time) < %(before)s OR '

+                 #                'EXTRACT(EPOCH FROM ev2.time) < %(before)s')

                  fields['ev1.time < %(before)s'] = '_created_before'

                  fields['ev2.time < %(before)s'] = '_revoked_before'

              elif arg == 'beforeEvent':
@@ -7555,12 +7747,12 @@ 

                'EXTRACT(EPOCH FROM ev1.time)', 'EXTRACT(EPOCH FROM ev2.time)',

                'maven_builds.build_id', 'win_builds.build_id')

      aliases = ('build_id', 'name', 'version', 'release',

-               'tag_id', 'tag_name', 'active',

-               'create_event', 'revoke_event',

-               'creator_id', 'revoker_id',

-               'creator_name', 'revoker_name',

-               'create_ts', 'revoke_ts',

-               'maven_build_id', 'win_build_id')

+                'tag_id', 'tag_name', 'active',

+                'create_event', 'revoke_event',

+                'creator_id', 'revoker_id',

+                'creator_name', 'revoker_name',

+                'create_ts', 'revoke_ts',

+                'maven_build_id', 'win_build_id')

      st_complete = koji.BUILD_STATES['COMPLETE']

      tables = ['tag_listing']

      joins = ["tag ON tag.id = tag_listing.tag_id",
@@ -7591,6 +7783,7 @@ 

                             opts=queryOpts)

      return query.iterate()

  

+ 

  def untagged_builds(name=None, queryOpts=None):

      """Returns the list of untagged builds"""

      fields = ('build.id', 'package.name', 'build.version', 'build.release')
@@ -7605,13 +7798,13 @@ 

      joins.append("""LEFT OUTER JOIN tag_listing ON tag_listing.build_id = build.id

                      AND tag_listing.active = TRUE""")

      clauses = ["tag_listing.tag_id IS NULL", "build.state = %(st_complete)i"]

-     #q = """SELECT build.id, package.name, build.version, build.release

-     #FROM build

+     # q = """SELECT build.id, package.name, build.version, build.release

+     # FROM build

      #    JOIN package on package.id = build.pkg_id

      #    LEFT OUTER JOIN tag_listing ON tag_listing.build_id = build.id

      #        AND tag_listing.active IS TRUE

-     #WHERE tag_listing.tag_id IS NULL AND build.state = %(st_complete)i"""

-     #return _multiRow(q, locals(), aliases)

+     # WHERE tag_listing.tag_id IS NULL AND build.state = %(st_complete)i"""

+     # return _multiRow(q, locals(), aliases)

      query = QueryProcessor(columns=fields, aliases=aliases, tables=tables,

                             joins=joins, clauses=clauses, values=locals(),

                             opts=queryOpts)
@@ -7640,7 +7833,7 @@ 

      if lazy and ret['tags']:

          return ret

  

-     #we'll need the component rpm and archive ids for the rest

+     # we'll need the component rpm and archive ids for the rest

      q = """SELECT id FROM rpminfo WHERE build_id=%(build_id)i"""

      build_rpm_ids = _fetchMulti(q, locals())

      q = """SELECT id FROM archiveinfo WHERE build_id=%(build_id)i"""
@@ -7650,7 +7843,8 @@ 

      st_complete = koji.BUILD_STATES['COMPLETE']

      fields = ('id', 'name', 'version', 'release', 'arch', 'build_id')

      idx = {}

-     q = """SELECT rpminfo.id, rpminfo.name, rpminfo.version, rpminfo.release, rpminfo.arch, rpminfo.build_id

+     q = """SELECT

+         rpminfo.id, rpminfo.name, rpminfo.version, rpminfo.release, rpminfo.arch, rpminfo.build_id

      FROM rpminfo, build

      WHERE

          rpminfo.buildroot_id IN (
@@ -7683,7 +7877,7 @@ 

      if limit:

          qopts['limit'] = limit

      query = QueryProcessor(columns=fields, tables=['archive_rpm_components'],

-             clauses=clauses, joins=joins, values=values, opts=qopts)

+                            clauses=clauses, joins=joins, values=values, opts=qopts)

      for (rpm_id,) in build_rpm_ids:

          query.values['rpm_id'] = rpm_id

          archive_ids = [i[0] for i in query.execute()]
@@ -7695,7 +7889,8 @@ 

      # find archives whose buildroots we were in

      fields = ('id', 'type_id', 'type_name', 'build_id', 'filename')

      idx = {}

-     q = """SELECT archiveinfo.id, archiveinfo.type_id, archivetypes.name, archiveinfo.build_id, archiveinfo.filename

+     q = """SELECT archiveinfo.id, archiveinfo.type_id, archivetypes.name, archiveinfo.build_id,

+         archiveinfo.filename

      FROM buildroot_archives

          JOIN archiveinfo ON archiveinfo.buildroot_id = buildroot_archives.buildroot_id

          JOIN build ON archiveinfo.build_id = build.id
@@ -7725,7 +7920,7 @@ 

      if limit:

          qopts['limit'] = limit

      query = QueryProcessor(columns=fields, tables=['archive_components'],

-             clauses=clauses, joins=joins, values=values, opts=qopts)

+                            clauses=clauses, joins=joins, values=values, opts=qopts)

      for (archive_id,) in build_archive_ids:

          query.values['archive_id'] = archive_id

          archive_ids = [i[0] for i in query.execute()]
@@ -7797,11 +7992,13 @@ 

          return False

      if refs.get('archives'):

          if strict:

-             raise koji.GenericError("Cannot delete build, used in archive buildroots: %s" % refs['archives'])

+             raise koji.GenericError("Cannot delete build, used in archive buildroots: %s" %

+                                     refs['archives'])

          return False

      if refs.get('component_of'):

          if strict:

-             raise koji.GenericError("Cannot delete build, used as component of: %r" % refs['component_of'])

+             raise koji.GenericError("Cannot delete build, used as component of: %r" %

+                                     refs['component_of'])

          return False

      if refs.get('last_used'):

          age = time.time() - refs['last_used']
@@ -7835,7 +8032,8 @@ 

      #   files on disk: DELETE

      st_deleted = koji.BUILD_STATES['DELETED']

      st_old = binfo['state']

-     koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old, new=st_deleted, info=binfo)

+     koji.plugin.run_callbacks('preBuildStateChange',

+                               attribute='state', old=st_old, new=st_deleted, info=binfo)

      build_id = binfo['id']

      q = """SELECT id FROM rpminfo WHERE build_id=%(build_id)i"""

      rpm_ids = _fetchMulti(q, locals())
@@ -7852,7 +8050,9 @@ 

      if os.path.exists(builddir):

          koji.util.rmtree(builddir)

      binfo = get_build(build_id, strict=True)

-     koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old, new=st_deleted, info=binfo)

+     koji.plugin.run_callbacks('postBuildStateChange',

+                               attribute='state', old=st_old, new=st_deleted, info=binfo)

+ 

  

  def reset_build(build):

      """Reset a build so that it can be reimported
@@ -7869,10 +8069,12 @@ 

      context.session.assertPerm('admin')

      binfo = get_build(build)

      if not binfo:

-         #nothing to do

+         # nothing to do

          return

      st_old = binfo['state']

-     koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old, new=koji.BUILD_STATES['CANCELED'], info=binfo)

+     koji.plugin.run_callbacks('preBuildStateChange',

+                               attribute='state', old=st_old, new=koji.BUILD_STATES['CANCELED'],

+                               info=binfo)

      q = """SELECT id FROM rpminfo WHERE build_id=%(id)i"""

      ids = _fetchMulti(q, binfo)

      for (rpm_id,) in ids:
@@ -7921,7 +8123,10 @@ 

      if os.path.exists(builddir):

          koji.util.rmtree(builddir)

      binfo = get_build(build, strict=True)

-     koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old, new=koji.BUILD_STATES['CANCELED'], info=binfo)

+     koji.plugin.run_callbacks('postBuildStateChange',

+                               attribute='state', old=st_old, new=koji.BUILD_STATES['CANCELED'],

+                               info=binfo)

+ 

  

  def cancel_build(build_id, cancel_task=True):

      """Cancel a build
@@ -7941,7 +8146,8 @@ 

      if build['state'] != st_building:

          return False

      st_old = build['state']

-     koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old, new=st_canceled, info=build)

+     koji.plugin.run_callbacks('preBuildStateChange',

+                               attribute='state', old=st_old, new=st_canceled, info=build)

      update = """UPDATE build

      SET state = %(st_canceled)i, completion_time = NOW()

      WHERE id = %(build_id)i AND state = %(st_building)i"""
@@ -7950,7 +8156,7 @@ 

      if build['state'] != st_canceled:

          return False

      task_id = build['task_id']

-     if task_id != None:

+     if task_id is not None:

          build_notification(task_id, build_id)

          if cancel_task:

              Task(task_id).cancelFull(strict=False)
@@ -7960,9 +8166,11 @@ 

      _dml(delete, {'build_id': build_id})

  

      build = get_build(build_id, strict=True)

-     koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old, new=st_canceled, info=build)

+     koji.plugin.run_callbacks('postBuildStateChange',

+                               attribute='state', old=st_old, new=st_canceled, info=build)

      return True

  

+ 

  def _get_build_target(task_id):

      # XXX Should we be storing a reference to the build target

      # in the build table for reproducibility?
@@ -7979,6 +8187,7 @@ 

              return get_build_target(request[2])

      return None

  

+ 

  def get_notification_recipients(build, tag_id, state):

      """

      Return the list of email addresses that should be notified about events
@@ -8029,7 +8238,7 @@ 

          recipients.append({

              'user_id': build['owner_id'],

              'email': '%s@%s' % (build['owner_name'], email_domain)

-             })

+         })

  

          if tag_id:

              packages = readPackageList(pkgID=package_id, tagID=tag_id, inherit=True)
@@ -8044,8 +8253,8 @@ 

                      recipients.append({

                          'user_id': owner['id'],

                          'email': '%s@%s' % (owner['name'], email_domain)

-                         })

-         #FIXME - if tag_id is None, we don't have a good way to get the package owner.

+                     })

+         # FIXME - if tag_id is None, we don't have a good way to get the package owner.

          #   using all package owners from all tags would be way overkill.

  

      if not recipients:
@@ -8065,7 +8274,7 @@ 

          else:

              clauses.append('tag_id IS NULL')

          query = QueryProcessor(columns=['user_id'], clauses=clauses,

-                 tables=['build_notifications_block'], values=locals())

+                                tables=['build_notifications_block'], values=locals())

          optouts = [r['user_id'] for r in query.execute()]

          optouts = set(optouts)

      else:
@@ -8075,7 +8284,8 @@ 

      return list(set(emails))

  

  

- def tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_success=False, failure_msg=''):

+ def tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_success=False,

+                      failure_msg=''):

      if context.opts.get('DisableNotifications'):

          return

      if is_successful:
@@ -8099,10 +8309,13 @@ 

                  recipients[email] = 1

      recipients_uniq = to_list(recipients.keys())

      if len(recipients_uniq) > 0 and not (is_successful and ignore_success):

-         task_id = make_task('tagNotification', [recipients_uniq, is_successful, tag_id, from_id, build_id, user_id, ignore_success, failure_msg])

+         task_id = make_task('tagNotification',

+                             [recipients_uniq, is_successful, tag_id, from_id, build_id, user_id,

+                              ignore_success, failure_msg])

          return task_id

      return None

  

+ 

  def build_notification(task_id, build_id):

      if context.opts.get('DisableNotifications'):

          return
@@ -8122,6 +8335,7 @@ 

      if len(recipients) > 0:

          make_task('buildNotification', [recipients, build, target, web_url])

  

+ 

  def get_build_notifications(user_id):

      query = QueryProcessor(tables=['build_notifications'],

                             columns=('id', 'user_id', 'package_id', 'tag_id',
@@ -8130,6 +8344,7 @@ 

                             values=locals())

      return query.execute()

  

+ 

  def get_build_notification_blocks(user_id):

      query = QueryProcessor(tables=['build_notifications_block'],

                             columns=['id', 'user_id', 'package_id', 'tag_id'],
@@ -8145,6 +8360,7 @@ 

          raise koji.GenericError('user/group already exists: %s' % name)

      return context.session.createUser(name, usertype=koji.USERTYPES['GROUP'])

  

+ 

  def add_group_member(group, user, strict=True):

      """Add user to group"""

      context.session.assertPerm('admin')
@@ -8156,13 +8372,13 @@ 

          raise koji.GenericError("Not an user: %s" % user)

      if uinfo['usertype'] == koji.USERTYPES['GROUP']:

          raise koji.GenericError("Groups cannot be members of other groups")

-     #check to see if user is already a member

-     data = {'user_id' : uinfo['id'], 'group_id' : ginfo['id']}

+     # check to see if user is already a member

+     data = {'user_id': uinfo['id'], 'group_id': ginfo['id']}

      table = 'user_groups'

      clauses = ('user_id = %(user_id)i', 'group_id = %(group_id)s')

      query = QueryProcessor(columns=['user_id'], tables=[table],

-                            clauses=('active = TRUE',)+clauses,

-                            values=data, opts={'rowlock':True})

+                            clauses=('active = TRUE',) + clauses,

+                            values=data, opts={'rowlock': True})

      row = query.executeOne()

      if row:

          if not strict:
@@ -8172,6 +8388,7 @@ 

      insert.make_create()

      insert.execute()

  

+ 

  def drop_group_member(group, user):

      """Drop user from group"""

      context.session.assertPerm('admin')
@@ -8181,12 +8398,13 @@ 

          raise koji.GenericError("No such group: %s" % group)

      if user['id'] not in [u['id'] for u in get_group_members(group)]:

          raise koji.GenericError("No such user in group: %s" % group)

-     data = {'user_id' : user['id'], 'group_id' : ginfo['id']}

+     data = {'user_id': user['id'], 'group_id': ginfo['id']}

      clauses = ["user_id = %(user_id)i", "group_id = %(group_id)i"]

      update = UpdateProcessor('user_groups', values=data, clauses=clauses)

      update.make_revoke()

      update.execute()

  

+ 

  def get_group_members(group):

      """Get the members of a group"""

      context.session.assertPerm('admin')
@@ -8212,6 +8430,7 @@ 

                             transform=xform_user_krb)

      return query.iterate()

  

+ 

  def set_user_status(user, status):

      context.session.assertPerm('admin')

      if not koji.USER_STATUS.get(status):
@@ -8297,9 +8516,9 @@ 

      context.session.assertPerm('admin')

      user = get_user(user, strict=True)

      cg = lookup_name('content_generator', cg, strict=True)

-     data = {'user_id': user['id'], 'cg_id' : cg['id']}

+     data = {'user_id': user['id'], 'cg_id': cg['id']}

      update = UpdateProcessor('cg_users', values=data,

-                 clauses=["user_id = %(user_id)i", "cg_id = %(cg_id)i"])

+                              clauses=["user_id = %(user_id)i", "cg_id = %(cg_id)i"])

      update.make_revoke()

      update.execute()

  
@@ -8312,7 +8531,7 @@ 

          user = context.session.user_id

      user = get_user(user, strict=True)

      clauses = ['active = TRUE', 'user_id = %(user_id)s', 'cg_id = %(cg_id)s']

-     data = {'user_id' : user['id'], 'cg_id' : cg['id']}

+     data = {'user_id': user['id'], 'cg_id': cg['id']}

      query = QueryProcessor(tables=['cg_users'], columns=['cg_id'], clauses=clauses, values=data)

      if not query.execute():

          raise koji.AuthError("Content generator access required (%s)" % cg['name'])
@@ -8453,7 +8672,7 @@ 

              self._one_insert(self.data)

          else:

              for i in range(0, len(self.data), self.batch):

-                 data = self.data[i:i+self.batch]

+                 data = self.data[i:i + self.batch]

                  self._one_insert(data)

  

      def _one_insert(self, data):
@@ -8584,7 +8803,7 @@ 

          ret = {}

          ret.update(self.values)

          for key in self.data:

-             ret["data."+key] = self.data[key]

+             ret["data." + key] = self.data[key]

          return ret

  

      def set(self, **kwargs):
@@ -8683,7 +8902,7 @@ 

  

      def __str__(self):

          query = \

- """

+             """

  SELECT %(col_str)s

    FROM %(table_str)s

  %(join_str)s
@@ -8731,8 +8950,10 @@ 

          return query

  

      def __repr__(self):

-         return '<QueryProcessor: columns=%r, aliases=%r, tables=%r, joins=%r, clauses=%r, values=%r, opts=%r>' % \

-                (self.columns, self.aliases, self.tables, self.joins, self.clauses, self.values, self.opts)

+         return '<QueryProcessor: ' \

+                'columns=%r, aliases=%r, tables=%r, joins=%r, clauses=%r, values=%r, opts=%r>' % \

+                (self.columns, self.aliases, self.tables, self.joins, self.clauses, self.values,

+                 self.opts)

  

      def _seqtostr(self, seq, sep=', ', sort=False):

          if seq:
@@ -8805,7 +9026,6 @@ 

          # self.transform not applied here

          return _singleValue(str(self), self.values, strict=strict)

  

- 

      def execute(self):

          query = str(self)

          if self.opts.get('countOnly'):
@@ -8826,7 +9046,6 @@ 

                  data = [self.transform(row) for row in data]

              return data

  

- 

      def iterate(self):

          if self.opts.get('countOnly'):

              return self.execute()
@@ -8885,6 +9104,7 @@ 

                  return None

          return results

  

+ 

  def _applyQueryOpts(results, queryOpts):

      """

      Apply queryOpts to results in the same way QueryProcessor would.
@@ -8928,6 +9148,7 @@ 

      name = 'operation'

      field = 'operation'

  

+ 

  def policy_get_user(data):

      """Determine user from policy data (default to logged-in user)"""

      if 'user_id' in data:
@@ -8936,6 +9157,7 @@ 

          return get_user(context.session.user_id)

      return None

  

+ 

  def policy_get_pkg(data):

      """Determine package from policy data (default to logged-in user)

  
@@ -8945,17 +9167,17 @@ 

      if 'package' in data:

          pkginfo = lookup_package(data['package'], strict=False)

          if not pkginfo:

-             #for some operations (e.g. adding a new package), the package

-             #entry may not exist yet

+             # for some operations (e.g. adding a new package), the package

+             # entry may not exist yet

              if isinstance(data['package'], six.string_types):

-                 return {'id' : None, 'name' : data['package']}

+                 return {'id': None, 'name': data['package']}

              else:

                  raise koji.GenericError("Invalid package: %s" % data['package'])

          return pkginfo

      if 'build' in data:

          binfo = get_build(data['build'], strict=True)

-         return {'id' : binfo['package_id'], 'name' : binfo['name']}

-     #else

+         return {'id': binfo['package_id'], 'name': binfo['name']}

+     # else

      raise koji.GenericError("policy requires package data")

  

  
@@ -8968,7 +9190,7 @@ 

          return data['version']

      if 'build' in data:

          return get_build(data['build'], strict=True)['version']

-     #else

+     # else

      raise koji.GenericError("policy requires version data")

  

  
@@ -8981,7 +9203,7 @@ 

          return data['release']

      if 'build' in data:

          return get_build(data['build'], strict=True)['release']

-     #else

+     # else

      raise koji.GenericError("policy requires release data")

  

  
@@ -9004,7 +9226,7 @@ 

      # note that br_id will be None if a component had no buildroot

      if 'cg_list' in data:

          cgs = [lookup_name('content_generator', cg, strict=True)

-                 for cg in data['cg_list']]

+                for cg in data['cg_list']]

          return set(cgs)

      # otherwise try buildroot data

      cgs = set()
@@ -9042,18 +9264,22 @@ 

          return set(get_build_type(binfo).keys())

      return set()

  

+ 

  class NewPackageTest(koji.policy.BaseSimpleTest):

      """Checks to see if a package exists yet"""

      name = 'is_new_package'

+ 

      def run(self, data):

          return (policy_get_pkg(data)['id'] is None)

  

+ 

  class PackageTest(koji.policy.MatchTest):

      """Checks package against glob patterns"""

      name = 'package'

      field = '_package'

+ 

      def run(self, data):

-         #we need to find the package name from the base data

+         # we need to find the package name from the base data

          data[self.field] = policy_get_pkg(data)['name']

          return super(PackageTest, self).run(data)

  
@@ -9062,6 +9288,7 @@ 

      """Checks version against glob patterns"""

      name = 'version'

      field = '_version'

+ 

      def run(self, data):

          data[self.field] = policy_get_version(data)

          return super(VersionTest, self).run(data)
@@ -9071,8 +9298,9 @@ 

      """Checks release against glob patterns"""

      name = 'release'

      field = '_release'

+ 

      def run(self, data):

-         #we need to find the build NVR from the base data

+         # we need to find the build NVR from the base data

          data[self.field] = policy_get_release(data)

          return super(ReleaseTest, self).run(data)

  
@@ -9081,8 +9309,9 @@ 

      """Checks storage volume against glob patterns"""

      name = 'volume'

      field = '_volume'

+ 

      def run(self, data):

-         #we need to find the volume name from the base data

+         # we need to find the volume name from the base data

          volinfo = None

          if 'volume' in data:

              volinfo = lookup_name('volume', data['volume'], strict=False)
@@ -9105,7 +9334,7 @@ 

      name = 'cg_match_any'

  

      def run(self, data):

-         #we need to find the volume name from the base data

+         # we need to find the volume name from the base data

          cgs = policy_get_cgs(data)

          patterns = self.str.split()[1:]

          for cg_name in cgs:
@@ -9128,7 +9357,7 @@ 

      name = 'cg_match_all'

  

      def run(self, data):

-         #we need to find the volume name from the base data

+         # we need to find the volume name from the base data

          cgs = policy_get_cgs(data)

          if not cgs:

              return False
@@ -9157,43 +9386,49 @@ 

          return get_tag(tag, strict=False)

  

      def run(self, data):

-         #we need to find the tag name from the base data

+         # we need to find the tag name from the base data

          tinfo = self.get_tag(data)

          if tinfo is None:

              return False

          data[self.field] = tinfo['name']

          return super(TagTest, self).run(data)

  

+ 

  class FromTagTest(TagTest):

      name = 'fromtag'

+ 

      def get_tag(self, data):

          tag = data.get('fromtag')

          if tag is None:

              return None

          return get_tag(tag, strict=False)

  

+ 

  class HasTagTest(koji.policy.BaseSimpleTest):

      """Check to see if build (currently) has a given tag"""

      name = 'hastag'

+ 

      def run(self, data):

          if 'build' not in data:

              return False

          tags = list_tags(build=data['build'])

-         #True if any of these tags match any of the patterns

+         # True if any of these tags match any of the patterns

          args = self.str.split()[1:]

          for tag in tags:

              for pattern in args:

                  if fnmatch.fnmatch(tag['name'], pattern):

                      return True

-         #otherwise...

+         # otherwise...

          return False

  

+ 

  class SkipTagTest(koji.policy.BaseSimpleTest):

      """Check for the skip_tag option

  

      For policies regarding build tasks (e.g. build_from_srpm)

      """

      name = 'skip_tag'

+ 

      def run(self, data):

          return bool(data.get('skip_tag'))

  
@@ -9205,6 +9440,7 @@ 

      buildroots of the component rpms

      """

      name = 'buildtag'

+ 

      def run(self, data):

          args = self.str.split()[1:]

          for tagname in policy_get_build_tags(data):
@@ -9213,7 +9449,7 @@ 

                  continue

              if multi_fnmatch(tagname, args):

                  return True

-         #otherwise...

+         # otherwise...

          return False

  

  
@@ -9221,6 +9457,7 @@ 

      """Check the build type(s) of the build"""

  

      name = 'buildtype'

+ 

      def run(self, data):

          args = self.str.split()[1:]

          for btype in policy_get_build_types(data):
@@ -9235,6 +9472,7 @@ 

      This is determined by checking the buildroots of the rpms and archives

      True if any of them lack a buildroot (strict)"""

      name = 'imported'

+ 

      def run(self, data):

          build_info = data.get('build')

          if not build_info:
@@ -9250,18 +9488,22 @@ 

          # otherwise...

          return False

  

+ 

  class ChildTaskTest(koji.policy.BoolTest):

      name = 'is_child_task'

      field = 'parent'

  

+ 

  class MethodTest(koji.policy.MatchTest):

      name = 'method'

      field = 'method'

  

+ 

  class UserTest(koji.policy.MatchTest):

      """Checks username against glob patterns"""

      name = 'user'

      field = '_username'

+ 

      def run(self, data):

          user = policy_get_user(data)

          if not user:
@@ -9269,14 +9511,17 @@ 

          data[self.field] = user['name']

          return super(UserTest, self).run(data)

  

+ 

  class VMTest(koji.policy.MatchTest):

      """Checks a VM name against glob patterns"""

      name = 'vm_name'

      field = 'vm_name'

  

+ 

  class IsBuildOwnerTest(koji.policy.BaseSimpleTest):

      """Check if user owns the build"""

      name = "is_build_owner"

+ 

      def run(self, data):

          build = get_build(data['build'])

          owner = get_user(build['owner_id'])
@@ -9289,9 +9534,10 @@ 

              # owner is a group, check to see if user is a member

              if owner['id'] in koji.auth.get_user_groups(user['id']):

                  return True

-         #otherwise...

+         # otherwise...

          return False

  

+ 

  class UserInGroupTest(koji.policy.BaseSimpleTest):

      """Check if user is in group(s)

  
@@ -9299,6 +9545,7 @@ 

      true if user is in /any/ matching group

      """

      name = "user_in_group"

+ 

      def run(self, data):

          user = policy_get_user(data)

          if not user:
@@ -9309,9 +9556,10 @@ 

              for pattern in args:

                  if fnmatch.fnmatch(group, pattern):

                      return True

-         #otherwise...

+         # otherwise...

          return False

  

+ 

  class HasPermTest(koji.policy.BaseSimpleTest):

      """Check if user has permission(s)

  
@@ -9319,6 +9567,7 @@ 

      true if user has /any/ matching permission

      """

      name = "has_perm"

+ 

      def run(self, data):

          user = policy_get_user(data)

          if not user:
@@ -9329,9 +9578,10 @@ 

              for pattern in args:

                  if fnmatch.fnmatch(perm, pattern):

                      return True

-         #otherwise...

+         # otherwise...

          return False

  

+ 

  class SourceTest(koji.policy.MatchTest):

      """Match build source

  
@@ -9340,6 +9590,7 @@ 

      """

      name = "source"

      field = '_source'

+ 

      def run(self, data):

          if 'source' in data:

              data[self.field] = data['source']
@@ -9351,11 +9602,11 @@ 

                  # no source to match against

                  return False

              else:

-                 #crack open the build task

+                 # crack open the build task

                  task = Task(build['task_id'])

                  info = task.getInfo()

                  params = task.getRequest()

-                 #signatures:

+                 # signatures:

                  # build - (src, target, opts=None)

                  # maven - (url, target, opts=None)

                  # winbuild - (name, source_url, target, opts=None)
@@ -9369,6 +9620,7 @@ 

              return False

          return super(SourceTest, self).run(data)

  

+ 

  class PolicyTest(koji.policy.BaseSimpleTest):

      """Test named policy

  
@@ -9391,7 +9643,7 @@ 

      def run(self, data):

          args = self.str.split()[1:]

          if self.depth != 0:

-             #LOOP!

+             # LOOP!

              raise koji.GenericError("encountered policy loop at %s" % self.str)

          ruleset = context.policy.get(args[0])

          if not ruleset:
@@ -9438,7 +9690,9 @@ 

              reason = reason.lower()

          lastrule = ruleset.last_rule()

      if context.opts.get('KojiDebug', False):

-         logger.error("policy %(name)s gave %(result)s, reason: %(reason)s, last rule: %(lastrule)s", locals())

+         logger.error(

+             "policy %(name)s gave %(result)s, reason: %(reason)s, last rule: %(lastrule)s",

+             locals())

      if result == 'allow':

          return True, reason

      if result != 'deny':
@@ -9453,6 +9707,7 @@ 

          err_str += " [rule: %s]" % lastrule

      raise koji.ActionNotAllowed(err_str)

  

+ 

  def assert_policy(name, data, default='deny'):

      """Enforce the named policy

  
@@ -9463,6 +9718,7 @@ 

      """

      check_policy(name, data, default=default, strict=True)

  

+ 

  def rpmdiff(basepath, rpmlist, hashes):

      "Diff the first rpm in the list against the rest of the rpms."

      if len(rpmlist) < 2:
@@ -9481,7 +9737,7 @@ 

          # (files may have been generated at build time and contain

          #  embedded dates or other insignificant differences)

          d = koji.rpmdiff.Rpmdiff(joinpath(basepath, first_rpm),

-             joinpath(basepath, other_rpm), ignore='S5TN')

+                                  joinpath(basepath, other_rpm), ignore='S5TN')

          if d.differs():

              raise koji.BuildError(

                  'The following noarch package built differently on different architectures: %s\n'
@@ -9540,7 +9796,8 @@ 

          if os.path.exists(final_path):

              raise koji.GenericError("Error importing build log. %s already exists." % final_path)

          if os.path.islink(logsrc) or not os.path.isfile(logsrc):

-             raise koji.GenericError("Error importing build log. %s is not a regular file." % logsrc)

+             raise koji.GenericError("Error importing build log. %s is not a regular file." %

+                                     logsrc)

          move_and_symlink(logsrc, final_path, create_dir=True)

  

      # record all of the RPMs installed in the image(s)
@@ -9572,6 +9829,8 @@ 

  #

  # XMLRPC Methods

  #

+ 

+ 

  class RootExports(object):

      '''Contains functions that are made available via XMLRPC'''

  
@@ -9666,8 +9925,9 @@ 

          build: The build to generate wrapper rpms for.  Must be in the COMPLETE state and have no

                 rpms already associated with it.

          url: SCM URL to a specfile fragment

-         target: The build target to use when building the wrapper rpm.  The build_tag of the target will

-                 be used to populate the buildroot in which the rpms are built.

+         target: The build target to use when building the wrapper rpm.

+                 The build_tag of the target will be used to populate the buildroot in which the

+                 rpms are built.

          priority: the amount to increase (or decrease) the task priority, relative

                    to the default priority; higher values mean lower priority; only

                    admins have the right to specify a negative priority here
@@ -9684,7 +9944,8 @@ 

  

          build = self.getBuild(build, strict=True)

          if list_rpms(build['id']) and not (opts.get('scratch') or opts.get('create_build')):

-             raise koji.PreBuildError('wrapper rpms for %s have already been built' % koji.buildLabel(build))

+             raise koji.PreBuildError('wrapper rpms for %s have already been built' %

+                                      koji.buildLabel(build))

          build_target = self.getBuildTarget(target)

          if not build_target:

              raise koji.PreBuildError('no such build target: %s' % target)
@@ -9783,7 +10044,7 @@ 

              if priority < 0:

                  if not context.session.hasPerm('admin'):

                      raise koji.ActionNotAllowed(

-                                'only admins may create high-priority tasks')

+                         'only admins may create high-priority tasks')

  

              taskOpts['priority'] = koji.PRIO_DEFAULT + priority

  
@@ -9801,11 +10062,12 @@ 

              if priority < 0:

                  if not context.session.hasPerm('admin'):

                      raise koji.ActionNotAllowed(

-                                'only admins may create high-priority tasks')

+                         'only admins may create high-priority tasks')

  

              taskOpts['priority'] = koji.PRIO_DEFAULT + priority

          if 'scratch' not in opts and 'indirection_template_url' not in opts:

-             raise koji.ActionNotAllowed('Non-scratch builds must provide url for the indirection template')

+             raise koji.ActionNotAllowed(

+                 'Non-scratch builds must provide url for the indirection template')

  

          if 'arch' in opts:

              taskOpts['arch'] = opts['arch']
@@ -9823,7 +10085,7 @@ 

              if priority < 0:

                  if not context.session.hasPerm('admin'):

                      raise koji.ActionNotAllowed(

-                                'only admins may create high-priority tasks')

+                         'only admins may create high-priority tasks')

  

              taskOpts['priority'] = koji.PRIO_DEFAULT + priority

          if 'scratch' not in opts and 'ksurl' not in opts:
@@ -9914,18 +10176,18 @@ 

          return _singleRow(q, values, fields, strict=True)

  

      def makeTask(self, *args, **opts):

-         #this is mainly for debugging

-         #only an admin can make arbitrary tasks

+         # this is mainly for debugging

+         # only an admin can make arbitrary tasks

          context.session.assertPerm('admin')

          return make_task(*args, **opts)

  

      def uploadFile(self, path, name, size, md5sum, offset, data, volume=None):

-         #path: the relative path to upload to

-         #name: the name of the file

-         #size: size of contents (bytes)

-         #md5: md5sum (hex digest) of contents

-         #data: base64 encoded file contents

-         #offset: the offset of the chunk

+         # path: the relative path to upload to

+         # name: the name of the file

+         # size: size of contents (bytes)

+         # md5: md5sum (hex digest) of contents

+         # data: base64 encoded file contents

+         # offset: the offset of the chunk

          # files can be uploaded in chunks, if so the md5 and size describe

          # the chunk rather than the whole file. the offset indicates where

          # the chunk belongs
@@ -9947,7 +10209,8 @@ 

          sum_cls = get_verify_class(verify)

          if offset != -1:

              if size is not None:

-                 if size != len(contents): return False

+                 if size != len(contents):

+                     return False

              if verify is not None:

                  if digest != sum_cls(contents).hexdigest():

                      return False
@@ -9963,7 +10226,7 @@ 

              if not stat.S_ISREG(st.st_mode):

                  raise koji.GenericError("destination not a file: %s" % fn)

              elif offset == 0:

-                 #first chunk, so file should not exist yet

+                 # first chunk, so file should not exist yet

                  if not fn.endswith('.log'):

                      # but we allow .log files to be uploaded multiple times to support

                      # realtime log-file viewing
@@ -9972,8 +10235,8 @@ 

          # log_error("fd=%r" %fd)

          try:

              if offset == 0 or (offset == -1 and size == len(contents)):

-                 #truncate file

-                 fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)

+                 # truncate file

+                 fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)

                  try:

                      os.ftruncate(fd, 0)

                      # log_error("truncating fd %r to 0" %fd)
@@ -9983,8 +10246,8 @@ 

                  os.lseek(fd, 0, 2)

              else:

                  os.lseek(fd, offset, 0)

-             #write contents

-             fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB, len(contents), 0, 2)

+             # write contents

+             fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, len(contents), 0, 2)

              try:

                  os.write(fd, contents)

                  # log_error("wrote contents")
@@ -9992,22 +10255,23 @@ 

                  fcntl.lockf(fd, fcntl.LOCK_UN, len(contents), 0, 2)

              if offset == -1:

                  if size is not None:

-                     #truncate file

-                     fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)

+                     # truncate file

+                     fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)

                      try:

                          os.ftruncate(fd, size)

                          # log_error("truncating fd %r to size %r" % (fd,size))

                      finally:

                          fcntl.lockf(fd, fcntl.LOCK_UN)

                  if verify is not None:

-                     #check final digest

+                     # check final digest

                      chksum = sum_cls()

-                     fcntl.lockf(fd, fcntl.LOCK_SH|fcntl.LOCK_NB)

+                     fcntl.lockf(fd, fcntl.LOCK_SH | fcntl.LOCK_NB)

                      try:

                          os.lseek(fd, 0, 0)

                          while True:

                              block = os.read(fd, 819200)

-                             if not block: break

+                             if not block:

+                                 break

                              chksum.update(block)

                          if digest != chksum.hexdigest():

                              return False
@@ -10030,7 +10294,7 @@ 

                  raise

          try:

              try:

-                 fcntl.lockf(fd, fcntl.LOCK_SH|fcntl.LOCK_NB)

+                 fcntl.lockf(fd, fcntl.LOCK_SH | fcntl.LOCK_NB)

              except IOError as e:

                  raise koji.LockError(e)

              st = os.fstat(fd)
@@ -10059,13 +10323,14 @@ 

              # this will also free our lock

              os.close(fd)

  

- 

      def downloadTaskOutput(self, taskID, fileName, offset=0, size=-1, volume=None):

          """Download the file with the given name, generated by the task with the

          given ID."""

          if '..' in fileName:

              raise koji.GenericError('Invalid file name: %s' % fileName)

-         filePath = '%s/%s/%s' % (koji.pathinfo.work(volume), koji.pathinfo.taskrelpath(taskID), fileName)

+         filePath = '%s/%s/%s' % (koji.pathinfo.work(volume),

+                                  koji.pathinfo.taskrelpath(taskID),

+                                  fileName)

          filePath = os.path.normpath(filePath)

          if not os.path.isfile(filePath):

              raise koji.GenericError('no file "%s" output by task %i' % (fileName, taskID))
@@ -10073,14 +10338,13 @@ 

          with open(filePath, 'rb') as f:

              if isinstance(offset, str):

                  offset = int(offset)

-             if offset != None and offset > 0:

+             if offset is not None and offset > 0:

                  f.seek(offset, 0)

-             elif offset != None and offset < 0:

+             elif offset is not None and offset < 0:

                  f.seek(offset, 2)

              contents = f.read(size)

          return base64encode(contents)

  

- 

      listTaskOutput = staticmethod(list_task_output)

  

      createTag = staticmethod(create_tag)
@@ -10095,7 +10359,7 @@ 

      deleteExternalRepo = staticmethod(delete_external_repo)

  

      def addExternalRepoToTag(self, tag_info, repo_info, priority,

-                 merge_mode='koji'):

+                              merge_mode='koji'):

          """Add an external repo to a tag"""

          # wrap the local method so we don't expose the event parameter

          add_external_repo_to_tag(tag_info, repo_info, priority, merge_mode)
@@ -10118,7 +10382,8 @@ 

  

          filepath: path to the archive file (relative to the Koji workdir)

          buildinfo: information about the build to associate the archive with

-                    May be a string (NVR), integer (buildID), or dict (containing keys: name, version, release)

+                    May be a string (NVR), integer (buildID), or dict (containing keys: name,

+                    version, release)

          type: type of the archive being imported.  Currently supported archive types: maven, win

          typeInfo: dict of type-specific information

          """
@@ -10147,6 +10412,7 @@ 

      queryHistory = staticmethod(query_history)

  

      deleteBuild = staticmethod(delete_build)

+ 

      def buildReferences(self, build, limit=None, lazy=False):

          return build_references(get_build(build, strict=True)['id'], limit, lazy)

  
@@ -10180,8 +10446,8 @@ 

  

      def createEmptyBuild(self, name, version, release, epoch, owner=None):

          context.session.assertPerm('admin')

-         data = {'name' : name, 'version' : version, 'release' : release,

-                 'epoch' : epoch}

+         data = {'name': name, 'version': version, 'release': release,

+                 'epoch': epoch}

          if owner is not None:

              data['owner'] = owner

          return new_build(data)
@@ -10235,7 +10501,7 @@ 

          """

          context.session.assertPerm('admin')

          uploadpath = koji.pathinfo.work()

-         fn = "%s/%s/%s" %(uploadpath, path, basename)

+         fn = "%s/%s/%s" % (uploadpath, path, basename)

          if not os.path.exists(fn):

              raise koji.GenericError("No such file: %s" % fn)

          rpminfo = import_rpm(fn)
@@ -10298,7 +10564,7 @@ 

              context.session.assertPerm('tag')

              tag_id = get_tag(tag, strict=True)['id']

              build_id = get_build(build, strict=True)['id']

-             policy_data = {'tag' : tag_id, 'build' : build_id, 'fromtag' : None, 'operation' : 'tag'}

+             policy_data = {'tag': tag_id, 'build': build_id, 'fromtag': None, 'operation': 'tag'}

              assert_policy('tag', policy_data)

          _tag_build(tag, build, force=force)

          if notify:
@@ -10323,7 +10589,7 @@ 

          The return value is the task id

          """

          context.session.assertLogin()

-         #first some lookups and basic sanity checks

+         # first some lookups and basic sanity checks

          build = get_build(build, strict=True)

          tag = get_tag(tag, strict=True)

          if fromtag:
@@ -10354,16 +10620,16 @@ 

              else:

                  raise koji.TagError(pkg_error)

          # tag policy check

-         policy_data = {'tag' : tag_id, 'build' : build_id, 'fromtag' : fromtag_id}

+         policy_data = {'tag': tag_id, 'build': build_id, 'fromtag': fromtag_id}

          if fromtag is None:

              policy_data['operation'] = 'tag'

          else:

              policy_data['operation'] = 'move'

-         #don't check policy for admins using force

+         # don't check policy for admins using force

          if not (force and context.session.hasPerm('admin')):

              assert_policy('tag', policy_data)

-             #XXX - we're running this check twice, here and in host.tagBuild (called by the task)

-         #spawn the tagging task

+             # XXX - we're running this check twice, here and in host.tagBuild (called by the task)

+         # spawn the tagging task

          return make_task('tagBuild', [tag_id, build_id, force, fromtag_id], priority=10)

  

      def untagBuild(self, tag, build, strict=True, force=False):
@@ -10371,15 +10637,15 @@ 

  

          Unlike tagBuild, this does not create a task

          No return value"""

-         #we can't staticmethod this one -- we're limiting the options

+         # we can't staticmethod this one -- we're limiting the options

          context.session.assertLogin()

          user_id = context.session.user_id

          tag_id = get_tag(tag, strict=True)['id']

          build_id = get_build(build, strict=True)['id']

-         policy_data = {'tag' : None, 'build' : build_id, 'fromtag' : tag_id}

+         policy_data = {'tag': None, 'build': build_id, 'fromtag': tag_id}

          policy_data['operation'] = 'untag'

          try:

-             #don't check policy for admins using force

+             # don't check policy for admins using force

              if not (force and context.session.hasPerm('admin')):

                  assert_policy('tag', policy_data)

              _untag_build(tag, build, strict=strict, force=force)
@@ -10402,7 +10668,7 @@ 

              context.session.assertPerm('tag')

              tag_id = get_tag(tag, strict=True)['id']

              build_id = get_build(build, strict=True)['id']

-             policy_data = {'tag' : None, 'build' : build_id, 'fromtag' : tag_id, 'operation' : 'untag'}

+             policy_data = {'tag': None, 'build': build_id, 'fromtag': tag_id, 'operation': 'untag'}

              assert_policy('tag', policy_data)

          _untag_build(tag, build, strict=strict, force=force)

          if notify:
@@ -10420,7 +10686,7 @@ 

          Returns the task id of the task performing the move"""

  

          context.session.assertLogin()

-         #lookups and basic sanity checks

+         # lookups and basic sanity checks

          pkg_id = get_package_id(package, strict=True)

          tag1_id = get_tag_id(tag1, strict=True)

          tag2_id = get_tag_id(tag2, strict=True)
@@ -10440,7 +10706,7 @@ 

              else:

                  raise koji.TagError(pkg_error)

  

-         #access check

+         # access check

          assert_tag_access(tag1_id, user_id=None, force=force)

          assert_tag_access(tag2_id, user_id=None, force=force)

  
@@ -10448,25 +10714,28 @@ 

          # we want 'ORDER BY tag_listing.create_event ASC' not DESC so reverse

          build_list.reverse()

  

-         #policy check

-         policy_data = {'tag' : tag2, 'fromtag' : tag1, 'operation' : 'move'}

-         #don't check policy for admins using force

+         # policy check

+         policy_data = {'tag': tag2, 'fromtag': tag1, 'operation': 'move'}

+         # don't check policy for admins using force

          if not (force and context.session.hasPerm('admin')):

              for build in build_list:

                  policy_data['build'] = build['id']

                  assert_policy('tag', policy_data)

-                 #XXX - we're running this check twice, here and in host.tagBuild (called by the task)

+                 # XXX - we're running this check twice, here and in host.tagBuild (called by the

+                 # task)

  

          wait_on = []

          tasklist = []

          for build in build_list:

-             task_id = make_task('dependantTask', [wait_on, [['tagBuild', [tag2_id, build['id'], force, tag1_id], {'priority':15}]]])

+             task_id = make_task('dependantTask',

+                                 [wait_on, [['tagBuild',

+                                             [tag2_id, build['id'], force, tag1_id],

+                                             {'priority': 15}]]])

              wait_on = [task_id]

              log_error("\nMade Task: %s\n" % task_id)

              tasklist.append(task_id)

          return tasklist

  

- 

      listTags = staticmethod(list_tags)

  

      getBuild = staticmethod(get_build)
@@ -10498,11 +10767,11 @@ 

  

          - author: only return changelogs with a matching author

          - before: only return changelogs from before the given date (in UTC)

-                   (a datetime object, a string in the 'YYYY-MM-DD HH24:MI:SS format, or integer seconds

-                    since the epoch)

+                   (a datetime object, a string in the 'YYYY-MM-DD HH24:MI:SS format, or integer

+                   seconds since the epoch)

          - after: only return changelogs from after the given date (in UTC)

-                  (a datetime object, a string in the 'YYYY-MM-DD HH24:MI:SS format, or integer seconds

-                   since the epoch)

+                  (a datetime object, a string in the 'YYYY-MM-DD HH24:MI:SS format, or integer

+                  seconds since the epoch)

          - queryOpts: query options used by the QueryProcessor

          - strict: if srpm doesn't exist raise an error, otherwise return empty list

  
@@ -10570,7 +10839,8 @@ 

  

          results = []

  

-         fields = koji.get_header_fields(srpm_path, ['changelogtime', 'changelogname', 'changelogtext'])

+         fields = koji.get_header_fields(srpm_path,

+                                         ['changelogtime', 'changelogname', 'changelogtext'])

          for (cltime, clname, cltext) in zip(fields['changelogtime'], fields['changelogname'],

                                              fields['changelogtext']):

              cldate = datetime.datetime.fromtimestamp(cltime).isoformat(' ')
@@ -10587,12 +10857,14 @@ 

              if queryOpts.get('asList'):

                  results.append([cldate, clname, cltext])

              else:

-                 results.append({'date': cldate, 'date_ts': cltime, 'author': clname, 'text': cltext})

+                 results.append({'date': cldate,

+                                 'date_ts': cltime,

+                                 'author': clname,

+                                 'text': cltext})

  

          results = _applyQueryOpts(results, queryOpts)

          return koji.fixEncodingRecurse(results, remove_nonprintable=True)

  

- 

      def cancelBuild(self, buildID):

          """Cancel the build with the given buildID

  
@@ -10600,7 +10872,7 @@ 

          Return True if the build was successfully canceled, False if not."""

          context.session.assertLogin()

          build = get_build(buildID)

-         if build == None:

+         if build is None:

              return False

          if build['owner_id'] != context.session.user_id:

              if not context.session.hasPerm('admin'):
@@ -10629,13 +10901,13 @@ 

          if not task.verifyOwner() and not task.verifyHost():

              if not context.session.hasPerm('admin'):

                  raise koji.ActionNotAllowed('Cannot cancel task, not owner')

-         #non-admins can also use cancelBuild

+         # non-admins can also use cancelBuild

          task.cancel(recurse=recurse)

  

      def cancelTaskFull(self, task_id, strict=True):

          """Cancel a task and all tasks in its group"""

          context.session.assertPerm('admin')

-         #non-admins can use cancelBuild or cancelTask

+         # non-admins can use cancelBuild or cancelTask

          Task(task_id).cancelFull(strict=strict)

  

      def cancelTaskChildren(self, task_id):
@@ -10655,32 +10927,40 @@ 

              raise koji.GenericError("Finished task's priority can't be updated")

          task.setPriority(priority, recurse=recurse)

  

-     def listTagged(self, tag, event=None, inherit=False, prefix=None, latest=False, package=None, owner=None, type=None):

+     def listTagged(self, tag, event=None, inherit=False, prefix=None, latest=False, package=None,

+                    owner=None, type=None):

          """List builds tagged with tag"""

-         #lookup tag id

+         # lookup tag id

          tag = get_tag(tag, strict=True, event=event)['id']

-         results = readTaggedBuilds(tag, event, inherit=inherit, latest=latest, package=package, owner=owner, type=type)

+         results = readTaggedBuilds(tag, event, inherit=inherit, latest=latest, package=package,

+                                    owner=owner, type=type)

          if prefix:

              prefix = prefix.lower()

-             results = [build for build in results if build['package_name'].lower().startswith(prefix)]

+             results = [build for build in results

+                        if build['package_name'].lower().startswith(prefix)]

          return results

  

-     def listTaggedRPMS(self, tag, event=None, inherit=False, latest=False, package=None, arch=None, rpmsigs=False, owner=None, type=None):

+     def listTaggedRPMS(self, tag, event=None, inherit=False, latest=False, package=None, arch=None,

+                        rpmsigs=False, owner=None, type=None):

          """List rpms and builds within tag"""

-         #lookup tag id

+         # lookup tag id

          tag = get_tag(tag, strict=True, event=event)['id']

-         return readTaggedRPMS(tag, event=event, inherit=inherit, latest=latest, package=package, arch=arch, rpmsigs=rpmsigs, owner=owner, type=type)

+         return readTaggedRPMS(tag, event=event, inherit=inherit, latest=latest, package=package,

+                               arch=arch, rpmsigs=rpmsigs, owner=owner, type=type)

  

-     def listTaggedArchives(self, tag, event=None, inherit=False, latest=False, package=None, type=None):

+     def listTaggedArchives(self, tag, event=None, inherit=False, latest=False, package=None,

+                            type=None):

          """List archives and builds within a tag"""

          # lookup tag id

          tag = get_tag(tag, strict=True, event=event)['id']

-         return readTaggedArchives(tag, event=event, inherit=inherit, latest=latest, package=package, type=type)

+         return readTaggedArchives(tag, event=event, inherit=inherit, latest=latest,

+                                   package=package, type=type)

  

      def listBuilds(self, packageID=None, userID=None, taskID=None, prefix=None, state=None,

                     volumeID=None, source=None,

                     createdBefore=None, createdAfter=None,

-                    completeBefore=None, completeAfter=None, type=None, typeInfo=None, queryOpts=None):

+                    completeBefore=None, completeAfter=None, type=None, typeInfo=None,

+                    queryOpts=None):

          """Return a list of builds that match the given parameters

  

          Filter parameters
@@ -10754,16 +11034,20 @@ 

  

          If no builds match, an empty list is returned.

          """

-         fields = [('build.id', 'build_id'), ('build.version', 'version'), ('build.release', 'release'),

-                   ('build.epoch', 'epoch'), ('build.state', 'state'), ('build.completion_time', 'completion_time'),

+         fields = [('build.id', 'build_id'), ('build.version', 'version'),

+                   ('build.release', 'release'),

+                   ('build.epoch', 'epoch'), ('build.state', 'state'),

+                   ('build.completion_time', 'completion_time'),

                    ('build.start_time', 'start_time'),

                    ('build.source', 'source'),

                    ('build.extra', 'extra'),

-                   ('events.id', 'creation_event_id'), ('events.time', 'creation_time'), ('build.task_id', 'task_id'),

+                   ('events.id', 'creation_event_id'), ('events.time', 'creation_time'),

+                   ('build.task_id', 'task_id'),

                    ('EXTRACT(EPOCH FROM events.time)', 'creation_ts'),

                    ('EXTRACT(EPOCH FROM build.start_time)', 'start_ts'),

                    ('EXTRACT(EPOCH FROM build.completion_time)', 'completion_ts'),

-                   ('package.id', 'package_id'), ('package.name', 'package_name'), ('package.name', 'name'),

+                   ('package.id', 'package_id'), ('package.name', 'package_name'),

+                   ('package.name', 'name'),

                    ('volume.id', 'volume_id'), ('volume.name', 'volume_name'),

                    ("package.name || '-' || build.version || '-' || build.release", 'nvr'),

                    ('users.id', 'owner_id'), ('users.name', 'owner_name')]
@@ -10774,13 +11058,13 @@ 

                   'LEFT JOIN volume ON build.volume_id = volume.id',

                   'LEFT JOIN users ON build.owner = users.id']

          clauses = []

-         if packageID != None:

+         if packageID is not None:

              clauses.append('package.id = %(packageID)i')

-         if userID != None:

+         if userID is not None:

              clauses.append('users.id = %(userID)i')

-         if volumeID != None:

+         if volumeID is not None:

              clauses.append('volume.id = %(volumeID)i')

-         if taskID != None:

+         if taskID is not None:

              if taskID == -1:

                  clauses.append('build.task_id IS NOT NULL')

              else:
@@ -10790,7 +11074,7 @@ 

              clauses.append('build.source ilike %(source)s')

          if prefix:

              clauses.append("package.name ilike %(prefix)s || '%%'")

-         if state != None:

+         if state is not None:

              clauses.append('build.state = %(state)i')

          if createdBefore:

              if not isinstance(createdBefore, str):
@@ -10840,7 +11124,7 @@ 

                  raise koji.GenericError('unsupported build type: %s' % type)

              btype_id = btype['id']

              joins.append('build_types ON build.id = build_types.build_id '

-                     'AND btype_id = %(btype_id)s')

+                          'AND btype_id = %(btype_id)s')

  

          query = QueryProcessor(columns=[pair[0] for pair in fields],

                                 aliases=[pair[1] for pair in fields],
@@ -10853,16 +11137,17 @@ 

      def getLatestBuilds(self, tag, event=None, package=None, type=None):

          """List latest builds for tag (inheritance enabled)"""

          if not isinstance(tag, six.integer_types):

-             #lookup tag id

+             # lookup tag id

              tag = get_tag_id(tag, strict=True)

          return readTaggedBuilds(tag, event, inherit=True, latest=True, package=package, type=type)

  

      def getLatestRPMS(self, tag, package=None, arch=None, event=None, rpmsigs=False, type=None):

          """List latest RPMS for tag (inheritance enabled)"""

          if not isinstance(tag, six.integer_types):

-             #lookup tag id

+             # lookup tag id

              tag = get_tag_id(tag, strict=True)

-         return readTaggedRPMS(tag, package=package, arch=arch, event=event, inherit=True, latest=True, rpmsigs=rpmsigs, type=type)

+         return readTaggedRPMS(tag, package=package, arch=arch, event=event, inherit=True,

+                               latest=True, rpmsigs=rpmsigs, type=type)

  

      def getLatestMavenArchives(self, tag, event=None, inherit=True):

          """Return a list of the latest Maven archives in the tag, as of the given event
@@ -10950,7 +11235,7 @@ 

          if jumps is None:

              jumps = {}

          if not isinstance(tag, six.integer_types):

-             #lookup tag id

+             # lookup tag id

              tag = get_tag_id(tag, strict=True)

          for mapping in [stops, jumps]:

              for key in to_list(mapping.keys()):
@@ -10977,7 +11262,7 @@ 

  

          If no build has the given ID, or the build generated no RPMs, an empty list is returned."""

          if not isinstance(build, six.integer_types):

-             #lookup build id

+             # lookup build id

              build = self.findBuildID(build, strict=True)

          return self.listRPMs(buildID=build)

  
@@ -11018,7 +11303,8 @@ 

  

          results = []

  

-         for dep_name in ['REQUIRE', 'PROVIDE', 'CONFLICT', 'OBSOLETE', 'SUGGEST', 'ENHANCE', 'SUPPLEMENT', 'RECOMMEND']:

+         for dep_name in ['REQUIRE', 'PROVIDE', 'CONFLICT', 'OBSOLETE', 'SUGGEST', 'ENHANCE',

+                          'SUPPLEMENT', 'RECOMMEND']:

              dep_id = getattr(koji, 'DEP_' + dep_name)

              if depType is None or depType == dep_id:

                  fields = koji.get_header_fields(rpm_path, [dep_name + 'NAME',
@@ -11030,7 +11316,8 @@ 

                      if queryOpts.get('asList'):

                          results.append([name, version, flags, dep_id])

                      else:

-                         results.append({'name': name, 'version': version, 'flags': flags, 'type': dep_id})

+                         results.append(

+                             {'name': name, 'version': version, 'flags': flags, 'type': dep_id})

  

          return _applyQueryOpts(results, queryOpts)

  
@@ -11059,13 +11346,15 @@ 

          results = []

          hdr = koji.get_rpm_header(rpm_path)

          fields = koji.get_header_fields(hdr, ['filenames', 'filemd5s', 'filesizes', 'fileflags',

-                                               'fileusername', 'filegroupname', 'filemtimes', 'filemodes'])

+                                               'fileusername', 'filegroupname', 'filemtimes',

+                                               'filemodes'])

          digest_algo = koji.util.filedigestAlgo(hdr)

  

-         for (name, digest, size, flags, user, group, mtime, mode) in zip(fields['filenames'], fields['filemd5s'],

-                                                                          fields['filesizes'], fields['fileflags'],

-                                                                          fields['fileusername'], fields['filegroupname'],

-                                                                          fields['filemtimes'], fields['filemodes']):

+         for (name, digest, size, flags, user, group, mtime, mode) \

+                 in zip(fields['filenames'], fields['filemd5s'],

+                        fields['filesizes'], fields['fileflags'],

+                        fields['fileusername'], fields['filegroupname'],

+                        fields['filemtimes'], fields['filemodes']):

              if queryOpts.get('asList'):

                  results.append([name, digest, size, flags, digest_algo, user, group, mtime, mode])

              else:
@@ -11116,7 +11405,8 @@ 

          hdr = koji.get_rpm_header(rpm_path)

          # use filemd5s for backward compatibility

          fields = koji.get_header_fields(hdr, ['filenames', 'filemd5s', 'filesizes', 'fileflags',

-                                               'fileusername', 'filegroupname', 'filemtimes', 'filemodes'])

+                                               'fileusername', 'filegroupname', 'filemtimes',

+                                               'filemodes'])

          digest_algo = koji.util.filedigestAlgo(hdr)

  

          i = 0
@@ -11170,7 +11460,7 @@ 

      def writeSignedRPM(self, an_rpm, sigkey, force=False):

          """Write a signed copy of the rpm"""

          context.session.assertPerm('sign')

-         #XXX - still not sure if this is the right restriction

+         # XXX - still not sure if this is the right restriction

          return write_signed_rpm(an_rpm, sigkey, force)

  

      def addRPMSig(self, an_rpm, data):
@@ -11202,7 +11492,8 @@ 

  

      getPackage = staticmethod(lookup_package)

  

-     def listPackages(self, tagID=None, userID=None, pkgID=None, prefix=None, inherited=False, with_dups=False, event=None, queryOpts=None):

+     def listPackages(self, tagID=None, userID=None, pkgID=None, prefix=None, inherited=False,

+                      with_dups=False, event=None, queryOpts=None):

          """List if tagID and/or userID is specified, limit the

          list to packages belonging to the given user or with the

          given tag.
@@ -11233,8 +11524,8 @@ 

              if pkgID is not None:

                  pkgID = get_package_id(pkgID, strict=True)

              result_list = list(readPackageList(tagID=tagID, userID=userID, pkgID=pkgID,

-                                           inherit=inherited, with_dups=with_dups,

-                                           event=event).values())

+                                                inherit=inherited, with_dups=with_dups,

+                                                event=event).values())

              if with_dups:

                  # when with_dups=True, readPackageList returns a list of list of dicts

                  # convert it to a list of dicts for consistency
@@ -11246,11 +11537,11 @@ 

  

          if prefix:

              prefix = prefix.lower()

-             results = [package for package in results if package['package_name'].lower().startswith(prefix)]

+             results = [package for package in results

+                        if package['package_name'].lower().startswith(prefix)]

  

          return _applyQueryOpts(results, queryOpts)

  

- 

      def listPackagesSimple(self, prefix=None, queryOpts=None):

          """list packages that starts with prefix and are filted

          and ordered by queryOpts.
@@ -11265,7 +11556,7 @@ 

              'package_name' and 'package_id'.

          """

          fields = (('package.id', 'package_id'),

-                       ('package.name', 'package_name'))

+                   ('package.name', 'package_name'))

          if prefix is None:

              clauses = None

          else:
@@ -11276,7 +11567,6 @@ 

              opts=queryOpts)

          return query.execute()

  

- 

      def checkTagPackage(self, tag, pkg):

          """Check that pkg is in the list for tag. Returns true/false"""

          tag_id = get_tag_id(tag, strict=False)
@@ -11287,7 +11577,7 @@ 

          if pkg_id not in pkgs:

              return False

          else:

-             #still might be blocked

+             # still might be blocked

              return not pkgs[pkg_id]['blocked']

  

      def getPackageConfig(self, tag, pkg, event=None):
@@ -11309,7 +11599,8 @@ 

          perm = lookup_perm(permission, strict=(not create), create=create)

          perm_id = perm['id']

          if perm['name'] in koji.auth.get_user_perms(user_id):

-             raise koji.GenericError('user %s already has permission: %s' % (userinfo, perm['name']))

+             raise koji.GenericError('user %s already has permission: %s' %

+                                     (userinfo, perm['name']))

          insert = InsertProcessor('user_perms')

          insert.set(user_id=user_id, perm_id=perm_id)

          insert.make_create()
@@ -11322,9 +11613,10 @@ 

          perm = lookup_perm(permission, strict=True)

          perm_id = perm['id']

          if perm['name'] not in koji.auth.get_user_perms(user_id):

-             raise koji.GenericError('user %s does not have permission: %s' % (userinfo, perm['name']))

+             raise koji.GenericError('user %s does not have permission: %s' %

+                                     (userinfo, perm['name']))

          update = UpdateProcessor('user_perms', values=locals(),

-                     clauses=["user_id = %(user_id)i", "perm_id = %(perm_id)i"])

+                                  clauses=["user_id = %(user_id)i", "perm_id = %(perm_id)i"])

          update.make_revoke()

          update.execute()

  
@@ -11380,7 +11672,7 @@ 

      grantCGAccess = staticmethod(grant_cg_access)

      revokeCGAccess = staticmethod(revoke_cg_access)

  

-     #group management calls

+     # group management calls

      newGroup = staticmethod(new_group)

      addGroupMember = staticmethod(add_group_member)

      dropGroupMember = staticmethod(drop_group_member)
@@ -11423,7 +11715,7 @@ 

          """Return build configuration associated with a tag"""

          taginfo = get_tag(tag, strict=True, event=event)

          order = readFullInheritance(taginfo['id'], event=event)

-         #follow inheritance for arches and extra

+         # follow inheritance for arches and extra

          for link in order:

              if link['noconfig']:

                  continue
@@ -11441,7 +11733,8 @@ 

          else:

              id = get_tag_id(tag, strict=True)

  

-         fields = ['repo.id', 'repo.state', 'repo.create_event', 'events.time', 'EXTRACT(EPOCH FROM events.time)', 'repo.dist']

+         fields = ['repo.id', 'repo.state', 'repo.create_event', 'events.time',

+                   'EXTRACT(EPOCH FROM events.time)', 'repo.dist']

          aliases = ['id', 'state', 'create_event', 'creation_time', 'create_ts', 'dist']

          joins = ['events ON repo.create_event = events.id']

          clauses = ['repo.tag_id = %(id)i']
@@ -11475,17 +11768,18 @@ 

          build_config = self.getBuildConfig(tag)

          if build_config['extra'].get('distrepo.cancel_others', False):

              tasks = self.listTasks(opts={

-                                        'state': [koji.TASK_STATES['FREE'],

-                                                  koji.TASK_STATES['OPEN'],

-                                                  koji.TASK_STATES['ASSIGNED']],

-                                        'method': 'distRepo',

-                                        'decode': True})

+                 'state': [koji.TASK_STATES['FREE'],

+                           koji.TASK_STATES['OPEN'],

+                           koji.TASK_STATES['ASSIGNED']],

+                 'method': 'distRepo',

+                 'decode': True})

              # filter only for this tag

              task_ids = [t['id'] for t in tasks if t['request'][0] == tag]

              for task_id in task_ids:

                  logger.debug("Cancelling distRepo task %d" % task_id)

                  Task(task_id).cancel(recurse=True)

-         return make_task('distRepo', [tag, repo_id, keys, task_opts], priority=15, channel='createrepo')

+         return make_task('distRepo', [tag, repo_id, keys, task_opts],

+                          priority=15, channel='createrepo')

  

      def newRepo(self, tag, event=None, src=False, debuginfo=False, separate_src=False):

          """Create a newRepo task. returns task id"""
@@ -11615,7 +11909,8 @@ 

              owner[int|list]: limit to tasks owned by the user with the given ID

              not_owner[int|list]: limit to tasks not owned by the user with the given ID

              host_id[int|list]: limit to tasks running on the host with the given ID

-             not_host_id[int|list]: limit to tasks running on the hosts with IDs other than the given ID

+             not_host_id[int|list]: limit to tasks running on the hosts with IDs other than the

+                                    given ID

              channel_id[int|list]: limit to tasks in the specified channel

              not_channel_id[int|list]: limit to tasks not in the specified channel

              parent[int|list]: limit to tasks with the given parent
@@ -11654,14 +11949,14 @@ 

          else:

              joins = ['LEFT JOIN users ON task.owner = users.id']

          flist = Task.fields + (

-                     ('task.request', 'request'),

-                     ('task.result', 'result'),

-                     )

+             ('task.request', 'request'),

+             ('task.result', 'result'),

+         )

          if not countOnly:

              flist += (

-                     ('users.name', 'owner_name'),

-                     ('users.usertype', 'owner_type'),

-                     )

+                 ('users.name', 'owner_name'),

+                 ('users.usertype', 'owner_type'),

+             )

          fields = [f[0] for f in flist]

          aliases = [f[1] for f in flist]

  
@@ -11697,18 +11992,18 @@ 

              conditions.append('method = %(method)s')

  

          time_opts = [

-                 ['createdBefore', 'create_time', '<'],

-                 ['createdAfter', 'create_time', '>'],

-                 ['startedBefore', 'start_time', '<'],

-                 ['startedAfter', 'start_time', '>'],

-                 ['completeBefore', 'completion_time', '<'],

-                 ['completeAfter', 'completion_time', '>'],

-                 # and a couple aliases for api compat:

-                 ['completedBefore', 'completion_time', '<'],

-                 ['completedAfter', 'completion_time', '>'],

-             ]

+             ['createdBefore', 'create_time', '<'],

+             ['createdAfter', 'create_time', '>'],

+             ['startedBefore', 'start_time', '<'],

+             ['startedAfter', 'start_time', '>'],

+             ['completeBefore', 'completion_time', '<'],

+             ['completeAfter', 'completion_time', '>'],

+             # and a couple aliases for api compat:

+             ['completedBefore', 'completion_time', '<'],

+             ['completedAfter', 'completion_time', '>'],

+         ]

          for key, field, cmp in time_opts:

-             if opts.get(key) != None:

+             if opts.get(key) is not None:

                  value = opts[key]

                  if not isinstance(value, str):

                      opts[key] = datetime.datetime.fromtimestamp(value).isoformat(' ')
@@ -11742,7 +12037,7 @@ 

                  if val:

                      try:

                          if val.find('<?xml', 0, 10) == -1:

-                             #handle older base64 encoded data

+                             # handle older base64 encoded data

                              val = base64.b64decode(val)

                          # note: loads accepts either bytes or string

                          data, method = six.moves.xmlrpc_client.loads(val)
@@ -11790,7 +12085,7 @@ 

              """ % get_user(owner, strict=True)['id']

          q += """ORDER BY priority,create_time

          """

-         #XXX hard-coded interval

+         # XXX hard-coded interval

          c = context.cnx.cursor()

          c.execute(q, koji.TASK_STATES)

          return [dict(zip([f[1] for f in fields], row)) for row in c.fetchall()]
@@ -11803,7 +12098,7 @@ 

          if not (task.isCanceled() or task.isFailed()):

              raise koji.GenericError('only canceled or failed tasks may be resubmitted')

          taskInfo = task.getInfo()

-         if taskInfo['parent'] != None:

+         if taskInfo['parent'] is not None:

              raise koji.GenericError('only top-level tasks may be resubmitted')

          if not (context.session.user_id == taskInfo['owner'] or self.hasPerm('admin')):

              raise koji.GenericError('only the task owner or an admin may resubmit a task')
@@ -11811,7 +12106,9 @@ 

          args = task.getRequest()

          channel = get_channel(taskInfo['channel_id'], strict=True)

  

-         return make_task(taskInfo['method'], args, arch=taskInfo['arch'], channel=channel['name'], priority=taskInfo['priority'])

+         return make_task(taskInfo['method'], args,

+                          arch=taskInfo['arch'], channel=channel['name'],

+                          priority=taskInfo['priority'])

  

      def addHost(self, hostname, arches, krb_principal=None):

          """
@@ -11838,12 +12135,13 @@ 

              fmt = context.opts.get('HostPrincipalFormat')

              if fmt:

                  krb_principal = fmt % hostname

-         #users entry

+         # users entry

          userID = context.session.createUser(hostname, usertype=koji.USERTYPES['HOST'],

                                              krb_principal=krb_principal)

-         #host entry

+         # host entry

          hostID = _singleValue("SELECT nextval('host_id_seq')", strict=True)

-         insert = "INSERT INTO host (id, user_id, name) VALUES (%(hostID)i, %(userID)i, %(hostname)s)"

+         insert = "INSERT INTO host (id, user_id, name) VALUES (%(hostID)i, %(userID)i, " \

+                  "%(hostname)s)"

          _dml(insert, dslice(locals(), ('hostID', 'userID', 'hostname')))

  

          insert = InsertProcessor('host_config')
@@ -11851,7 +12149,7 @@ 

          insert.make_create()

          insert.execute()

  

-         #host_channels entry

+         # host_channels entry

          insert = InsertProcessor('host_channels')

          insert.set(host_id=hostID, channel_id=default_channel)

          insert.make_create()
@@ -11874,7 +12172,8 @@ 

      renameChannel = staticmethod(rename_channel)

      removeChannel = staticmethod(remove_channel)

  

-     def listHosts(self, arches=None, channelID=None, ready=None, enabled=None, userID=None, queryOpts=None):

+     def listHosts(self, arches=None, channelID=None, ready=None, enabled=None, userID=None,

+                   queryOpts=None):

          """Get a list of hosts.  "arches" is a list of string architecture

          names, e.g. ['i386', 'ppc64'].  If one of the arches associated with a given

          host appears in the list, it will be included in the results.  If "ready" and "enabled"
@@ -11911,20 +12210,20 @@ 

              clauses.append('user_id = %(userID)i')

  

          fields = {'host.id': 'id',

-               'host.user_id': 'user_id',

-               'host.name': 'name',

-               'host.ready': 'ready',

-               'host.task_load': 'task_load',

-               'host_config.arches': 'arches',

-               'host_config.capacity': 'capacity',

-               'host_config.description': 'description',

-               'host_config.comment': 'comment',

-               'host_config.enabled': 'enabled',

-               }

+                   'host.user_id': 'user_id',

+                   'host.name': 'name',

+                   'host.ready': 'ready',

+                   'host.task_load': 'task_load',

+                   'host_config.arches': 'arches',

+                   'host_config.capacity': 'capacity',

+                   'host_config.description': 'description',

+                   'host_config.comment': 'comment',

+                   'host_config.enabled': 'enabled',

+                   }

          tables = ['host_config']

          fields, aliases = zip(*fields.items())

          query = QueryProcessor(columns=fields, aliases=aliases,

-                 tables=tables, joins=joins, clauses=clauses, values=locals())

+                                tables=tables, joins=joins, clauses=clauses, values=locals())

          return query.execute()

  

      def getLastHostUpdate(self, hostID):
@@ -12009,11 +12308,15 @@ 

          userid = userinfo['id']

          buildid = buildinfo['id']

          owner_id_old = buildinfo['owner_id']

-         koji.plugin.run_callbacks('preBuildStateChange', attribute='owner_id', old=owner_id_old, new=userid, info=buildinfo)

+         koji.plugin.run_callbacks('preBuildStateChange',

+                                   attribute='owner_id', old=owner_id_old, new=userid,

+                                   info=buildinfo)

          q = """UPDATE build SET owner=%(userid)i WHERE id=%(buildid)i"""

          _dml(q, locals())

          buildinfo = get_build(build, strict=True)

-         koji.plugin.run_callbacks('postBuildStateChange', attribute='owner_id', old=owner_id_old, new=userid, info=buildinfo)

+         koji.plugin.run_callbacks('postBuildStateChange',

+                                   attribute='owner_id', old=owner_id_old, new=userid,

+                                   info=buildinfo)

  

      def setBuildTimestamp(self, build, ts):

          """Set the completion time for a build
@@ -12024,8 +12327,8 @@ 

          context.session.assertPerm('admin')

          buildinfo = get_build(build, strict=True)

          if isinstance(ts, six.moves.xmlrpc_client.DateTime):

-             #not recommended

-             #the xmlrpclib.DateTime class is almost useless

+             # not recommended

+             # the xmlrpclib.DateTime class is almost useless

              try:

                  ts = time.mktime(time.strptime(str(ts), '%Y%m%dT%H:%M:%S'))

              except ValueError:
@@ -12033,22 +12336,25 @@ 

          elif not isinstance(ts, NUMERIC_TYPES):

              raise koji.GenericError("Invalid type for timestamp")

          ts_old = buildinfo['completion_ts']

-         koji.plugin.run_callbacks('preBuildStateChange', attribute='completion_ts', old=ts_old, new=ts, info=buildinfo)

+         koji.plugin.run_callbacks('preBuildStateChange',

+                                   attribute='completion_ts', old=ts_old, new=ts, info=buildinfo)

          buildid = buildinfo['id']

          q = """UPDATE build

          SET completion_time=TIMESTAMP 'epoch' AT TIME ZONE 'utc' + '%(ts)f seconds'::interval

          WHERE id=%%(buildid)i""" % locals()

          _dml(q, locals())

          buildinfo = get_build(build, strict=True)

-         koji.plugin.run_callbacks('postBuildStateChange', attribute='completion_ts', old=ts_old, new=ts, info=buildinfo)

+         koji.plugin.run_callbacks('postBuildStateChange',

+                                   attribute='completion_ts', old=ts_old, new=ts, info=buildinfo)

  

      def count(self, methodName, *args, **kw):

          """Execute the XML-RPC method with the given name and count the results.

-         A method return value of None will return O, a return value of type "list", "tuple", or "dict"

-         will return len(value), and a return value of any other type will return 1.  An invalid

-         methodName will raise an AttributeError, and invalid arguments will raise a TypeError."""

+         A method return value of None will return O, a return value of type "list", "tuple", or

+         "dict" will return len(value), and a return value of any other type will return 1. An

+         invalid methodName will raise an AttributeError, and invalid arguments will raise a

+         TypeError."""

          result = getattr(self, methodName)(*args, **kw)

-         if result == None:

+         if result is None:

              return 0

          elif isinstance(result, (list, tuple, dict)):

              return len(result)
@@ -12082,7 +12388,6 @@ 

          """

          return self.countAndFilterResults(methodName, *args, **kw)[1]

  

- 

      def countAndFilterResults(self, methodName, *args, **kw):

          """Filter results by a given name and count total results account.

  
@@ -12138,7 +12443,6 @@ 

  

          return _count, results

  

- 

      def getBuildNotifications(self, userID=None):

          """Get build notifications for the user with the given ID, name or

          Kerberos principal. If no user is specified, get the notifications for
@@ -12153,10 +12457,10 @@ 

          raise GenericError, else return None.

          """

          query = QueryProcessor(tables=['build_notifications'],

-                                columns = ('id', 'user_id', 'package_id', 'tag_id',

-                                           'success_only', 'email'),

-                                clauses = ['id = %(id)i'],

-                                values = locals())

+                                columns=('id', 'user_id', 'package_id', 'tag_id',

+                                         'success_only', 'email'),

+                                clauses=['id = %(id)i'],

+                                values=locals())

          result = query.executeOne()

          if strict and not result:

              raise koji.GenericError("No notification with ID %i found" % id)
@@ -12176,9 +12480,9 @@ 

          raise GenericError, else return None.

          """

          query = QueryProcessor(tables=['build_notifications_block'],

-                                columns = ('id', 'user_id', 'package_id', 'tag_id'),

-                                clauses = ['id = %(id)i'],

-                                values = locals())

+                                columns=('id', 'user_id', 'package_id', 'tag_id'),

+                                clauses=['id = %(id)i'],

+                                values=locals())

          result = query.executeOne()

          if strict and not result:

              raise koji.GenericError("No notification block with ID %i found" % id)
@@ -12194,8 +12498,8 @@ 

  

          orig_notif = self.getBuildNotification(id, strict=True)

          if not (orig_notif['user_id'] == currentUser['id'] or self.hasPerm('admin')):

-             raise koji.GenericError('user %i cannot update notifications for user %i' % \

-                   (currentUser['id'], orig_notif['user_id']))

+             raise koji.GenericError('user %i cannot update notifications for user %i' %

+                                     (currentUser['id'], orig_notif['user_id']))

  

          # sanitize input

          if package_id is not None:
@@ -12208,7 +12512,7 @@ 

          for notification in get_build_notifications(orig_notif['user_id']):

              if (notification['package_id'] == package_id and

                  notification['tag_id'] == tag_id and

-                 notification['success_only'] == success_only):

+                     notification['success_only'] == success_only):

                  raise koji.GenericError('notification already exists')

  

          update = UpdateProcessor('build_notifications',
@@ -12228,8 +12532,8 @@ 

              raise koji.GenericError('invalid user ID: %s' % user_id)

  

          if not (notificationUser['id'] == currentUser['id'] or self.hasPerm('admin')):

-             raise koji.GenericError('user %s cannot create notifications for user %s' % \

-                   (currentUser['name'], notificationUser['name']))

+             raise koji.GenericError('user %s cannot create notifications for user %s' %

+                                     (currentUser['name'], notificationUser['name']))

  

          # sanitize input

          user_id = notificationUser['id']
@@ -12245,7 +12549,7 @@ 

          for notification in get_build_notifications(user_id):

              if (notification['package_id'] == package_id and

                  notification['tag_id'] == tag_id and

-                 notification['success_only'] == success_only):

+                     notification['success_only'] == success_only):

                  raise koji.GenericError('notification already exists')

  

          insert = InsertProcessor('build_notifications')
@@ -12263,8 +12567,8 @@ 

  

          if not (notification['user_id'] == currentUser['id'] or

                  self.hasPerm('admin')):

-             raise koji.GenericError('user %i cannot delete notifications for user %i' % \

-                   (currentUser['id'], notification['user_id']))

+             raise koji.GenericError('user %i cannot delete notifications for user %i' %

+                                     (currentUser['id'], notification['user_id']))

          delete = """DELETE FROM build_notifications WHERE id = %(id)i"""

          _dml(delete, locals())

  
@@ -12281,8 +12585,8 @@ 

              raise koji.GenericError('invalid user ID: %s' % user_id)

  

          if not (notificationUser['id'] == currentUser['id'] or self.hasPerm('admin')):

-             raise koji.GenericError('user %s cannot create notification blocks for user %s' % \

-                   (currentUser['name'], notificationUser['name']))

+             raise koji.GenericError('user %s cannot create notification blocks for user %s' %

+                                     (currentUser['name'], notificationUser['name']))

  

          # sanitize input

          user_id = notificationUser['id']
@@ -12310,8 +12614,8 @@ 

  

          if not (block['user_id'] == currentUser['id'] or

                  self.hasPerm('admin')):

-             raise koji.GenericError('user %i cannot delete notification blocks for user %i' % \

-                   (currentUser['id'], block['user_id']))

+             raise koji.GenericError('user %i cannot delete notification blocks for user %i' %

+                                     (currentUser['id'], block['user_id']))

          delete = """DELETE FROM build_notifications_block WHERE id = %(id)i"""

          _dml(delete, locals())

  
@@ -12322,7 +12626,8 @@ 

          be replaced with "%".  If matchType is "regexp", no changes will be

          made."""

          if matchType == 'glob':

-             return terms.replace('\\', '\\\\').replace('_', r'\_').replace('?', '_').replace('*', '%')

+             return terms.replace(

+                 '\\', '\\\\').replace('_', r'\_').replace('?', '_').replace('*', '%')

          else:

              return terms

  
@@ -12382,24 +12687,29 @@ 

          joins = []

          if type == 'build':

              joins.append('package ON build.pkg_id = package.id')

-             clause = "package.name || '-' || build.version || '-' || build.release %s %%(terms)s" % oper

+             clause = "package.name || '-' || build.version || '-' || build.release %s %%(terms)s" \

+                      % oper

              cols = ('build.id', "package.name || '-' || build.version || '-' || build.release")

          elif type == 'rpm':

-             clause = "name || '-' || version || '-' || release || '.' || arch || '.rpm' %s %%(terms)s" % oper

+             clause = "name || '-' || version || '-' || release || '.' || arch || '.rpm' %s " \

+                      "%%(terms)s" % oper

              cols = ('id', "name || '-' || version || '-' || release || '.' || arch || '.rpm'")

          elif type == 'tag':

              joins.append('tag_config ON tag.id = tag_config.tag_id')

              clause = 'tag_config.active = TRUE and name %s %%(terms)s' % oper

          elif type == 'target':

-             joins.append('build_target_config ON build_target.id = build_target_config.build_target_id')

+             joins.append('build_target_config '

+                          'ON build_target.id = build_target_config.build_target_id')

              clause = 'build_target_config.active = TRUE and name %s %%(terms)s' % oper

          elif type == 'maven':

              cols = ('id', 'filename')

              joins.append('maven_archives ON archiveinfo.id = maven_archives.archive_id')

              clause = "archiveinfo.filename %s %%(terms)s or maven_archives.group_id || '-' || " \

-                 "maven_archives.artifact_id || '-' || maven_archives.version %s %%(terms)s" % (oper, oper)

+                      "maven_archives.artifact_id || '-' || maven_archives.version %s %%(terms)s" \

+                      % (oper, oper)

          elif type == 'win':

-             cols = ('id', "trim(leading '/' from win_archives.relpath || '/' || archiveinfo.filename)")

+             cols = ('id',

+                     "trim(leading '/' from win_archives.relpath || '/' || archiveinfo.filename)")

              joins.append('win_archives ON archiveinfo.id = win_archives.archive_id')

              clause = "archiveinfo.filename %s %%(terms)s or win_archives.relpath || '/' || " \

                       "archiveinfo.filename %s %%(terms)s" % (oper, oper)
@@ -12417,11 +12727,11 @@ 

  

      def __init__(self, id=None):

          if id is None:

-             #db entry has yet to be created

+             # db entry has yet to be created

              self.id = None

          else:

              logging.getLogger("koji.hub").debug("BuildRoot id: %s" % id)

-             #load buildroot data

+             # load buildroot data

              self.load(id)

  

      def load(self, id):
@@ -12435,10 +12745,10 @@ 

              'host_os',

              'host_arch',

              'extra',

-             ]

+         ]

          query = QueryProcessor(columns=fields, tables=['buildroot'],

-                     transform=_fix_extra_field,

-                     values={'id': id}, clauses=['id=%(id)s'])

+                                transform=_fix_extra_field,

+                                values={'id': id}, clauses=['id=%(id)s'])

          data = query.executeOne()

          if not data:

              raise koji.GenericError('no buildroot with ID: %i' % id)
@@ -12457,9 +12767,9 @@ 

              'create_event',

              'retire_event',

              'state',

-             ]

+         ]

          query = QueryProcessor(columns=fields, tables=['standard_buildroot'],

-                     values={'id': self.id}, clauses=['buildroot_id=%(id)s'])

+                                values={'id': self.id}, clauses=['buildroot_id=%(id)s'])

          data = query.executeOne()

          if not data:

              raise koji.GenericError('Not a standard buildroot: %i' % self.id)
@@ -12495,7 +12805,7 @@ 

              'host_os',

              'host_arch',

              'extra',

-             ]

+         ]

          data.setdefault('br_type', koji.BR_TYPES['EXTERNAL'])

          data = dslice(data, fields)

          for key in fields:
@@ -12523,7 +12833,7 @@ 

      def assertTask(self, task_id):

          self.assertStandard()

          if not self.verifyTask(task_id):

-             raise koji.ActionNotAllowed('Task %s does not have lock on buildroot %s' \

+             raise koji.ActionNotAllowed('Task %s does not have lock on buildroot %s'

                                          % (task_id, self.id))

  

      def verifyHost(self, host_id):
@@ -12533,27 +12843,27 @@ 

      def assertHost(self, host_id):

          self.assertStandard()

          if not self.verifyHost(host_id):

-             raise koji.ActionNotAllowed("Host %s not owner of buildroot %s" \

+             raise koji.ActionNotAllowed("Host %s not owner of buildroot %s"

                                          % (host_id, self.id))

  

      def setState(self, state):

          self.assertStandard()

          if isinstance(state, str):

              state = koji.BR_STATES[state]

-         #sanity checks

+         # sanity checks

          if state == koji.BR_STATES['INIT']:

-             #we do not re-init buildroots

+             # we do not re-init buildroots

              raise koji.GenericError("Cannot change buildroot state to INIT")

          query = QueryProcessor(columns=['state', 'retire_event'], values=self.data,

-                     tables=['standard_buildroot'], clauses=['buildroot_id=%(id)s'],

-                     opts={'rowlock':True})

+                                tables=['standard_buildroot'], clauses=['buildroot_id=%(id)s'],

+                                opts={'rowlock': True})

          row = query.executeOne()

          if not row:

              raise koji.GenericError("Unable to get state for buildroot %s" % self.id)

          lstate, retire_event = row

          if koji.BR_STATES[row['state']] == 'EXPIRED':

-             #we will quietly ignore a request to expire an expired buildroot

-             #otherwise this is an error

+             # we will quietly ignore a request to expire an expired buildroot

+             # otherwise this is an error

              if koji.BR_STATES[state] == 'EXPIRED':

                  return

              else:
@@ -12581,12 +12891,13 @@ 

              ('build_id', 'build_id'),

              ('external_repo_id', 'external_repo_id'),

              ('external_repo.name', 'external_repo_name'),

-             )

+         )

          query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields],

-                         tables=['buildroot_listing'],

-                         joins=["rpminfo ON rpm_id = rpminfo.id", "external_repo ON external_repo_id = external_repo.id"],

-                         clauses=["buildroot_listing.buildroot_id = %(brootid)i"],

-                         values=locals())

+                                tables=['buildroot_listing'],

+                                joins=["rpminfo ON rpm_id = rpminfo.id",

+                                       "external_repo ON external_repo_id = external_repo.id"],

+                                clauses=["buildroot_listing.buildroot_id = %(brootid)i"],

+                                values=locals())

          return query.execute()

  

      def _setList(self, rpmlist, update=False):
@@ -12602,15 +12913,15 @@ 

              location = an_rpm.get('location')

              if location:

                  data = add_external_rpm(an_rpm, location, strict=False)

-                 #will add if missing, compare if not

+                 # will add if missing, compare if not

              else:

                  data = get_rpm(an_rpm, strict=True)

              rpm_id = data['id']

              if update and rpm_id in current:

-                 #ignore duplicate packages for updates

+                 # ignore duplicate packages for updates

                  continue

              rpm_ids.append(rpm_id)

-         #we sort to try to avoid deadlock issues

+         # we sort to try to avoid deadlock issues

          rpm_ids.sort()

  

          # actually do the inserts (in bulk)
@@ -12647,7 +12958,7 @@ 

                    ('checksum', 'checksum'),

                    ('checksum_type', 'checksum_type'),

                    ('project_dep', 'project_dep'),

-                  ]

+                   ]

          columns, aliases = zip(*fields)

          query = QueryProcessor(tables=tables, columns=columns,

                                 joins=joins, clauses=clauses,
@@ -12716,11 +13027,11 @@ 

  

      def taskUnwait(self, parent):

          """Clear wait data for task"""

-         #unwait the task

+         # unwait the task

          update = UpdateProcessor('task', clauses=['id=%(parent)s'], values=locals())

          update.set(waiting=False)

          update.execute()

-         #...and un-await its subtasks

+         # ...and un-await its subtasks

          update = UpdateProcessor('task', clauses=['parent=%(parent)s'], values=locals())

          update.set(awaited=False)

          update.execute()
@@ -12741,24 +13052,26 @@ 

              update.execute()

          elif tasks:

              # wait on specified subtasks

-             update = UpdateProcessor('task', clauses=['id IN %(tasks)s', 'parent=%(parent)s'], values=locals())

+             update = UpdateProcessor('task', clauses=['id IN %(tasks)s', 'parent=%(parent)s'],

+                                      values=locals())

              update.set(awaited=True)

              update.execute()

              # clear awaited flag on any other child tasks

              update = UpdateProcessor('task', values=locals(),

-                             clauses=['id NOT IN %(tasks)s', 'parent=%(parent)s', 'awaited=true'])

+                                      clauses=['id NOT IN %(tasks)s',

+                                               'parent=%(parent)s',

+                                               'awaited=true'])

              update.set(awaited=False)

              update.execute()

          else:

              logger.warning('taskSetWait called on empty task list by parent: %s', parent)

  

- 

      def taskWaitCheck(self, parent):

          """Return status of awaited subtask

  

          The return value is [finished, unfinished] where each entry

          is a list of task ids."""

-         #check to see if any of the tasks have finished

+         # check to see if any of the tasks have finished

          c = context.cnx.cursor()

          q = """

          SELECT id,state FROM task
@@ -12797,10 +13110,10 @@ 

          if tasks is None:

              # Query all finished subtasks

              states = tuple([koji.TASK_STATES[s]

-                             for s in ['CLOSED', 'FAILED','CANCELED']])

+                             for s in ['CLOSED', 'FAILED', 'CANCELED']])

              query = QueryProcessor(tables=['task'], columns=['id'],

-                         clauses=['parent=%(parent)s', 'state in %(states)s'],

-                         values=locals(), opts={'asList': True})

+                                    clauses=['parent=%(parent)s', 'state in %(states)s'],

+                                    values=locals(), opts={'asList': True})

              tasks = [r[0] for r in query.execute()]

          # Would use a dict, but xmlrpc requires the keys to be strings

          results = []
@@ -12824,13 +13137,13 @@ 

          """get status of open tasks assigned to host"""

          c = context.cnx.cursor()

          host_id = self.id

-         #query tasks

+         # query tasks

          fields = ['id', 'waiting', 'weight']

          st_open = koji.TASK_STATES['OPEN']

          q = """

          SELECT %s FROM task

          WHERE host_id = %%(host_id)s AND state = %%(st_open)s

-         """  % (",".join(fields))

+         """ % (",".join(fields))

          c.execute(q, locals())

          tasks = [dict(zip(fields, x)) for x in c.fetchall()]

          for task in tasks:
@@ -12860,9 +13173,9 @@ 

              if host['id'] == self.id:

                  break

          else:

-             #this host not in ready list

+             # this host not in ready list

              return [[], []]

-         #host is the host making the call

+         # host is the host making the call

          tasks = get_active_tasks(host)

          return [hosts, tasks]

  
@@ -12870,7 +13183,7 @@ 

          """Open next available task and return it"""

          c = context.cnx.cursor()

          id = self.id

-         #get arch and channel info for host

+         # get arch and channel info for host

          q = """

          SELECT arches FROM host_config WHERE host_id = %(id)s AND active IS TRUE

          """
@@ -12882,7 +13195,7 @@ 

          c.execute(q, locals())

          channels = [x[0] for x in c.fetchall()]

  

-         #query tasks

+         # query tasks

          fields = ['id', 'state', 'method', 'request', 'channel_id', 'arch', 'parent']

          st_free = koji.TASK_STATES['FREE']

          st_assigned = koji.TASK_STATES['ASSIGNED']
@@ -12891,7 +13204,7 @@ 

          WHERE (state = %%(st_free)s)

              OR (state = %%(st_assigned)s AND host_id = %%(id)s)

          ORDER BY priority,create_time

-         """  % (",".join(fields))

+         """ % (",".join(fields))

          c.execute(q, locals())

          for data in c.fetchall():

              data = dict(zip(fields, data))
@@ -12905,11 +13218,11 @@ 

              task = Task(data['id'])

              ret = task.open(self.id)

              if ret is None:

-                 #someone else got it while we were looking

-                 #log_error("task %s seems to be locked" % task['id'])

+                 # someone else got it while we were looking

+                 # log_error("task %s seems to be locked" % task['id'])

                  continue

              return ret

-         #else no appropriate tasks

+         # else no appropriate tasks

          return None

  

      def isEnabled(self):
@@ -12917,6 +13230,7 @@ 

          query = """SELECT enabled FROM host_config WHERE host_id = %(id)i AND active IS TRUE"""

          return _singleValue(query, {'id': self.id}, strict=True)

  

+ 

  class HostExports(object):

      '''Contains functions that are made available via XMLRPC'''

  
@@ -12972,12 +13286,12 @@ 

          for task_id in tasks:

              task = Task(task_id)

              if not task.verifyHost(host.id):

-                 #it's possible that a task was freed/reassigned since the host

-                 #last checked, so we should not raise an error

+                 # it's possible that a task was freed/reassigned since the host

+                 # last checked, so we should not raise an error

                  continue

              task.free()

-             #XXX - unfinished

-             #remove any files related to task

+             # XXX - unfinished

+             # remove any files related to task

  

      def setTaskWeight(self, task_id, weight):

          host = Host()
@@ -13018,7 +13332,7 @@ 

              WHERE parent=%(parent)s AND label=%(label)s"""

              row = _fetchSingle(q, opts)

              if row:

-                 #return task id

+                 # return task id

                  return row[0]

          if 'kwargs' in opts:

              arglist = koji.encode_args(*arglist, **opts['kwargs'])
@@ -13035,7 +13349,7 @@ 

  

          Remaining args are passed on to the subtask

          """

-         #self.subtask will verify the host

+         # self.subtask will verify the host

          args = koji.encode_args(*args, **opts)

          return self.subtask(__method, args, __parent, **__taskopts)

  
@@ -13046,7 +13360,7 @@ 

          task = Task(task_id)

          task.assertHost(host.id)

          uploadpath = koji.pathinfo.work()

-         #verify files exist

+         # verify files exist

          for relpath in [srpm] + rpms:

              fn = "%s/%s" % (uploadpath, relpath)

              if not os.path.exists(fn):
@@ -13054,7 +13368,7 @@ 

  

          rpms = check_noarch_rpms(uploadpath, rpms, logs=logs)

  

-         #figure out storage location

+         # figure out storage location

          #  <scratchdir>/<username>/task_<id>

          scratchdir = koji.pathinfo.scratch()

          username = get_user(task.getOwner())['name']
@@ -13142,7 +13456,7 @@ 

              scratchdir = koji.pathinfo.scratch()

              username = get_user(task.getOwner())['name']

              destdir = joinpath(scratchdir, username,

-                 'task_%s' % sub_results['task_id'])

+                                'task_%s' % sub_results['task_id'])

              for img in sub_results['files'] + sub_results['logs']:

                  src = joinpath(workdir, img)

                  dest = joinpath(destdir, img)
@@ -13168,10 +13482,10 @@ 

          """

          host = Host()

          host.verify()

-         #sanity checks

+         # sanity checks

          task = Task(data['task_id'])

          task.assertHost(host.id)

-         #prep the data

+         # prep the data

          data['owner'] = task.getOwner()

          data['state'] = koji.BUILD_STATES['BUILDING']

          data['completion_time'] = None
@@ -13182,7 +13496,7 @@ 

  

      def completeBuild(self, task_id, build_id, srpm, rpms, brmap=None, logs=None):

          """Import final build contents into the database"""

-         #sanity checks

+         # sanity checks

          host = Host()

          host.verify()

          task = Task(task_id)
@@ -13203,24 +13517,24 @@ 

          # check volume policy

          vol_update = False

          policy_data = {

-                 'build': build_info,

-                 'package': build_info['name'],

-                 'import': True,

-                 'import_type': 'maven',

-                 }

+             'build': build_info,

+             'package': build_info['name'],

+             'import': True,

+             'import_type': 'maven',

+         }

          vol = check_volume_policy(policy_data, strict=False, default='DEFAULT')

          if vol['id'] != build_info['volume_id']:

              build_info['volume_id'] = vol['id']

              build_info['volume_name'] = vol['name']

              vol_update = True

  

- 

          self.importImage(task_id, build_id, results)

          ensure_volume_symlink(build_info)

  

          st_old = build_info['state']

          st_complete = koji.BUILD_STATES['COMPLETE']

-         koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old, new=st_complete, info=build_info)

+         koji.plugin.run_callbacks('preBuildStateChange',

+                                   attribute='state', old=st_old, new=st_complete, info=build_info)

  

          update = UpdateProcessor('build', clauses=['id=%(build_id)i'],

                                   values={'build_id': build_id})
@@ -13230,7 +13544,8 @@ 

              update.set(volume_id=build_info['volume_id'])

          update.execute()

          build_info = get_build(build_id, strict=True)

-         koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old, new=st_complete, info=build_info)

+         koji.plugin.run_callbacks('postBuildStateChange',

+                                   attribute='state', old=st_old, new=st_complete, info=build_info)

  

          # send email

          build_notification(task_id, build_id)
@@ -13283,11 +13598,11 @@ 

          # check volume policy

          vol_update = False

          policy_data = {

-                 'build': build_info,

-                 'package': build_info['name'],

-                 'import': True,

-                 'import_type': 'maven',

-                 }

+             'build': build_info,

+             'package': build_info['name'],

+             'import': True,

+             'import_type': 'maven',

+         }

          vol = check_volume_policy(policy_data, strict=False, default='DEFAULT')

          if vol['id'] != build_info['volume_id']:

              build_info['volume_id'] = vol['id']
@@ -13347,7 +13662,8 @@ 

          # update build state

          st_complete = koji.BUILD_STATES['COMPLETE']

          st_old = build_info['state']

-         koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old, new=st_complete, info=build_info)

+         koji.plugin.run_callbacks('preBuildStateChange',

+                                   attribute='state', old=st_old, new=st_complete, info=build_info)

          update = UpdateProcessor('build', clauses=['id=%(build_id)i'],

                                   values={'build_id': build_id})

          update.set(state=st_complete)
@@ -13356,7 +13672,8 @@ 

          update.rawset(completion_time='now()')

          update.execute()

          build_info = get_build(build_id, strict=True)

-         koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old, new=st_complete, info=build_info)

+         koji.plugin.run_callbacks('postBuildStateChange',

+                                   attribute='state', old=st_old, new=st_complete, info=build_info)

  

          # send email

          build_notification(task_id, build_id)
@@ -13391,12 +13708,14 @@ 

          build_info = get_build(build_id, strict=True)

  

          if build_info['state'] != koji.BUILD_STATES['COMPLETE']:

-             raise koji.GenericError('cannot import wrapper rpms for %s: build state is %s, not complete' % \

+             raise koji.GenericError(

+                 'cannot import wrapper rpms for %s: build state is %s, not complete' %

                  (koji.buildLabel(build_info), koji.BUILD_STATES[build_info['state']].lower()))

  

          if list_rpms(buildID=build_info['id']):

              # don't allow overwriting of already-imported wrapper RPMs

-             raise koji.GenericError('wrapper rpms for %s have already been imported' % koji.buildLabel(build_info))

+             raise koji.GenericError('wrapper rpms for %s have already been imported' %

+                                     koji.buildLabel(build_info))

  

          _import_wrapper(task.id, build_info, rpm_results)

  
@@ -13424,7 +13743,7 @@ 

              raise koji.GenericError('Windows support not enabled')

          host = Host()

          host.verify()

-         #sanity checks

+         # sanity checks

          task = Task(task_id)

          task.assertHost(host.id)

          # build_info must contain name, version, and release
@@ -13453,11 +13772,11 @@ 

          # check volume policy

          vol_update = False

          policy_data = {

-                 'build': build_info,

-                 'package': build_info['name'],

-                 'import': True,

-                 'import_type': 'win',

-                 }

+             'build': build_info,

+             'package': build_info['name'],

+             'import': True,

+             'import_type': 'win',

+         }

          vol = check_volume_policy(policy_data, strict=False, default='DEFAULT')

          if vol['id'] != build_info['volume_id']:

              build_info['volume_id'] = vol['id']
@@ -13473,7 +13792,8 @@ 

                  raise koji.BuildError('unsupported file type: %s' % relpath)

              filepath = joinpath(task_dir, relpath)

              metadata['relpath'] = os.path.dirname(relpath)

-             import_archive(filepath, build_info, 'win', metadata, buildroot_id=results['buildroot_id'])

+             import_archive(filepath, build_info, 'win', metadata,

+                            buildroot_id=results['buildroot_id'])

  

          # move the logs to their final destination

          for relpath in results['logs']:
@@ -13492,7 +13812,8 @@ 

          # update build state

          st_old = build_info['state']

          st_complete = koji.BUILD_STATES['COMPLETE']

-         koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old, new=st_complete, info=build_info)

+         koji.plugin.run_callbacks('preBuildStateChange',

+                                   attribute='state', old=st_old, new=st_complete, info=build_info)

          update = UpdateProcessor('build', clauses=['id=%(build_id)i'],

                                   values={'build_id': build_id})

          update.set(state=st_complete)
@@ -13501,7 +13822,8 @@ 

          update.rawset(completion_time='now()')

          update.execute()

          build_info = get_build(build_id, strict=True)

-         koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old, new=st_complete, info=build_info)

+         koji.plugin.run_callbacks('postBuildStateChange',

+                                   attribute='state', old=st_old, new=st_complete, info=build_info)

  

          # send email

          build_notification(task_id, build_id)
@@ -13518,7 +13840,8 @@ 

          st_failed = koji.BUILD_STATES['FAILED']

          buildinfo = get_build(build_id, strict=True)

          st_old = buildinfo['state']

-         koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=st_old, new=st_failed, info=buildinfo)

+         koji.plugin.run_callbacks('preBuildStateChange',

+                                   attribute='state', old=st_old, new=st_failed, info=buildinfo)

  

          query = """SELECT state, completion_time

          FROM build
@@ -13527,11 +13850,11 @@ 

          result = _singleRow(query, locals(), ('state', 'completion_time'))

  

          if result['state'] != koji.BUILD_STATES['BUILDING']:

-             raise koji.GenericError('cannot update build %i, state: %s' % \

-                   (build_id, koji.BUILD_STATES[result['state']]))

+             raise koji.GenericError('cannot update build %i, state: %s' %

+                                     (build_id, koji.BUILD_STATES[result['state']]))

          elif result['completion_time'] is not None:

-             raise koji.GenericError('cannot update build %i, completed at %s' % \

-                   (build_id, result['completion_time']))

+             raise koji.GenericError('cannot update build %i, completed at %s' %

+                                     (build_id, result['completion_time']))

  

          update = """UPDATE build

          SET state = %(st_failed)i,
@@ -13539,7 +13862,8 @@ 

          WHERE id = %(build_id)i"""

          _dml(update, locals())

          buildinfo = get_build(build_id, strict=True)

-         koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=st_old, new=st_failed, info=buildinfo)

+         koji.plugin.run_callbacks('postBuildStateChange',

+                                   attribute='state', old=st_old, new=st_failed, info=buildinfo)

          build_notification(task_id, build_id)

  

      def tagBuild(self, task_id, tag, build, force=False, fromtag=None):
@@ -13560,7 +13884,7 @@ 

          pkg_id = build['package_id']

          tag_id = get_tag(tag, strict=True)['id']

          user_id = task.getOwner()

-         policy_data = {'tag' : tag, 'build' : build, 'fromtag' : fromtag}

+         policy_data = {'tag': tag, 'build': build, 'fromtag': fromtag}

          policy_data['user_id'] = user_id

          if fromtag is None:

              policy_data['operation'] = 'tag'
@@ -13600,14 +13924,16 @@ 

              if 'rpmresults' in sub_results:

                  rpm_results = sub_results['rpmresults']

                  _import_wrapper(rpm_results['task_id'],

-                     get_build(build_id, strict=True), rpm_results)

+                                 get_build(build_id, strict=True), rpm_results)

  

-     def tagNotification(self, is_successful, tag_id, from_id, build_id, user_id, ignore_success=False, failure_msg=''):

+     def tagNotification(self, is_successful, tag_id, from_id, build_id, user_id,

+                         ignore_success=False, failure_msg=''):

          """Create a tag notification message.

          Handles creation of tagNotification tasks for hosts."""

          host = Host()

          host.verify()

-         tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_success, failure_msg)

+         tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_success,

+                          failure_msg)

  

      def checkPolicy(self, name, data, default='deny', strict=False):

          host = Host()
@@ -13705,7 +14031,9 @@ 

                          archive['artifact_id'], {}).setdefault(

                              archive['version'], archive['build_id'])

              if idx_build != archive['build_id']:

-                 logger.error("Found multiple builds for %(group_id)s:%(artifact_id)s:%(version)s. Current build: %(build_id)i", archive)

+                 logger.error(

+                     "Found multiple builds for %(group_id)s:%(artifact_id)s:%(version)s. "

+                     "Current build: %(build_id)i", archive)

                  logger.error("Indexed build id was %i", idx_build)

  

          if not ignore:
@@ -13745,9 +14073,19 @@ 

                                  archive['artifact_id'], {}).setdefault(

                                      archive['version'], archive['build_id'])

                      if idx_build != archive['build_id']:

-                         logger.error("Overriding build for %(group_id)s:%(artifact_id)s:%(version)s.", archive)

-                         logger.error("Current build is %s, new build is %s.", idx_build, archive['build_id'])

-                         maven_build_index[archive['group_id']][archive['artifact_id']][archive['version']] = archive['build_id']

+                         logger.error(

+                             "Overriding build for %(group_id)s:%(artifact_id)s:%(version)s.",

+                             archive)

+                         logger.error(

+                             "Current build is %s, new build is %s.",

+                             idx_build, archive['build_id'])

+                         maven_build_index[

+                             archive['group_id']

+                         ][

+                             archive['artifact_id']

+                         ][

+                             archive['version']

+                         ] = archive['build_id']

  

          ignore.extend(task_deps.values())

  
@@ -13774,9 +14112,9 @@ 

              maven_label = koji.mavenLabel(maven_info)

              ignore_archives = ignore_by_label.get(maven_label, {})

              build_id = maven_build_index.get(

-                         maven_info['group_id'], {}).get(

-                             maven_info['artifact_id'], {}).get(

-                                 maven_info['version'])

+                 maven_info['group_id'], {}).get(

+                 maven_info['artifact_id'], {}).get(

+                 maven_info['version'])

              if not build_id:

                  if not ignore_unknown:

                      # just warn for now. might be in ignore list. the loop below will check.
@@ -13798,23 +14136,29 @@ 

                      pass

                  else:

                      if not ignore_unknown:

-                         logger.error("Unknown file for %(group_id)s:%(artifact_id)s:%(version)s", maven_info)

+                         logger.error("Unknown file for %(group_id)s:%(artifact_id)s:%(version)s",

+                                      maven_info)

                          if build_id:

                              build = get_build(build_id)

                              logger.error("g:a:v supplied by build %(nvr)s", build)

-                             logger.error("Build supplies %i archives: %r", len(build_archives), to_list(build_archives.keys()))

+                             logger.error("Build supplies %i archives: %r",

+                                          len(build_archives), to_list(build_archives.keys()))

                          if tag_archive:

-                             logger.error("Size mismatch, br: %i, db: %i", fileinfo['size'], tag_archive['size'])

-                         raise koji.BuildrootError('Unknown file in build environment: %s, size: %s' % \

-                               ('%s/%s' % (fileinfo['path'], fileinfo['filename']), fileinfo['size']))

+                             logger.error("Size mismatch, br: %i, db: %i",

+                                          fileinfo['size'], tag_archive['size'])

+                         raise koji.BuildrootError(

+                             'Unknown file in build environment: %s, size: %s' %

+                             ('%s/%s' % (fileinfo['path'], fileinfo['filename']), fileinfo['size']))

  

          return br.updateArchiveList(archives, project)

  

-     def repoInit(self, tag, with_src=False, with_debuginfo=False, event=None, with_separate_src=False):

+     def repoInit(self, tag, with_src=False, with_debuginfo=False, event=None,

+                  with_separate_src=False):

          """Initialize a new repo for tag"""

          host = Host()

          host.verify()

-         return repo_init(tag, with_src=with_src, with_debuginfo=with_debuginfo, event=event, with_separate_src=with_separate_src)

+         return repo_init(tag, with_src=with_src, with_debuginfo=with_debuginfo, event=event,

+                          with_separate_src=with_separate_src)

  

      def repoDone(self, repo_id, data, expire=False):

          """Finalize a repo
@@ -13863,26 +14207,25 @@ 

              repo_expire(repo_id)

              koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire)

              return

-         #else:

+         # else:

          repo_ready(repo_id)

          repo_expire_older(rinfo['tag_id'], rinfo['create_event'], rinfo['dist'])

  

-         #make a latest link

+         # make a latest link

          if rinfo['dist']:

              latestrepolink = koji.pathinfo.distrepo('latest', rinfo['tag_name'])

          else:

              latestrepolink = koji.pathinfo.repo('latest', rinfo['tag_name'])

-             #XXX - this is a slight abuse of pathinfo

+             # XXX - this is a slight abuse of pathinfo

          try:

              if os.path.lexists(latestrepolink):

                  os.unlink(latestrepolink)

              os.symlink(str(repo_id), latestrepolink)

          except OSError:

-             #making this link is nonessential

+             # making this link is nonessential

              log_error("Unable to create latest link for repo: %s" % repodir)

          koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire)

  

- 

      def distRepoMove(self, repo_id, uploadpath, arch):

          """

          Move one arch of a dist repo into its final location
@@ -13941,7 +14284,7 @@ 

          build_dirs = {}

          rpmdata = {}

          rpm_check_keys = ['name', 'version', 'release', 'arch', 'epoch',

-                 'size', 'payloadhash', 'build_id']

+                           'size', 'payloadhash', 'build_id']

          for bnp in kojipkgs:

              rpminfo = kojipkgs[bnp]

              rpm_id = rpminfo['id']
@@ -13950,8 +14293,8 @@ 

              for key in rpm_check_keys:

                  if key not in rpminfo or rpminfo[key] != _rpminfo[key]:

                      raise koji.GenericError(

-                             'kojipkgs entry does not match db: file %s, key %s'

-                             % (bnp, key))

+                         'kojipkgs entry does not match db: file %s, key %s'

+                         % (bnp, key))

              if sigkey is None or sigkey == '':

                  relpath = koji.pathinfo.rpm(rpminfo)

              else:
@@ -13994,7 +14337,6 @@ 

                  else:

                      raise

  

- 

      def isEnabled(self):

          host = Host()

          host.verify()
@@ -14037,12 +14379,14 @@ 

              if os.path.exists(u_fn):

                  user_id = int(open(u_fn, 'r').read())

                  if context.session.user_id != user_id:

-                     raise koji.GenericError("Invalid upload directory, not owner: %s" % orig_reldir)

+                     raise koji.GenericError("Invalid upload directory, not owner: %s" %

+                                             orig_reldir)

              else:

                  with open(u_fn, 'w') as fo:

                      fo.write(str(context.session.user_id))

      return joinpath(udir, name)

  

+ 

  def get_verify_class(verify):

      if verify == 'md5':

          return hashlib.md5
@@ -14061,7 +14405,7 @@ 

      if not context.session.logged_in:

          raise koji.ActionNotAllowed('you must be logged-in to upload a file')

      args = parse_qs(environ.get('QUERY_STRING', ''), strict_parsing=True)

-     #XXX - already parsed by auth

+     # XXX - already parsed by auth

      name = args['filename'][0]

      path = args.get('filepath', ('',))[0]

      verify = args.get('fileverify', ('',))[0]
@@ -14082,7 +14426,7 @@ 

      fd = os.open(fn, os.O_RDWR | os.O_CREAT, 0o666)

      try:

          try:

-             fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)

+             fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)

          except IOError as e:

              raise koji.LockError(e)

          if offset == -1:
@@ -14111,6 +14455,6 @@ 

          ret['hexdigest'] = chksum.hexdigest()

      logger.debug("Upload result: %r", ret)

      logger.info("Completed upload for session %s (#%s): %f seconds, %i bytes, %s",

-                     context.session.id, context.session.callnum,

-                     time.time()-start, size, fn)

+                 context.session.id, context.session.callnum,

+                 time.time() - start, size, fn)

      return ret

file modified
+65 -45
@@ -64,7 +64,7 @@ 

  

      def __init__(self):

          self.funcs = {}

-         #introspection functions

+         # introspection functions

          self.register_function(self.list_api, name="_listapi")

          self.register_function(self.system_listMethods, name="system.listMethods")

          self.register_function(self.system_methodSignature, name="system.methodSignature")
@@ -93,7 +93,7 @@ 

              if not callable(function):

                  continue

              if prefix is not None:

-                 name = "%s.%s" %(prefix, name)

+                 name = "%s.%s" % (prefix, name)

              self.register_function(function, name=name)

  

      def register_instance(self, instance):
@@ -106,7 +106,7 @@ 

          """

          for v in six.itervalues(vars(plugin)):

              if isinstance(v, type):

-                 #skip classes

+                 # skip classes

                  continue

              if callable(v):

                  if getattr(v, 'exported', False):
@@ -128,7 +128,7 @@ 

              # bound method, remove first arg

              args, varargs, varkw, defaults = ret

              if args:

-                 aname = args[0] #generally "self"

+                 aname = args[0]  # generally "self"

                  del args[0]

                  if defaults and aname in defaults:

                      # shouldn't happen, but...
@@ -138,8 +138,8 @@ 

      def list_api(self):

          funcs = []

          for name, func in self.funcs.items():

-             #the keys in self.funcs determine the name of the method as seen over xmlrpc

-             #func.__name__ might differ (e.g. for dotted method names)

+             # the keys in self.funcs determine the name of the method as seen over xmlrpc

+             # func.__name__ might differ (e.g. for dotted method names)

              args = self._getFuncArgs(func)

              argspec = self.getargspec(func)

              funcs.append({'name': name,
@@ -155,7 +155,9 @@ 

              if x == 0 and func.__code__.co_varnames[x] == "self":

                  continue

              if func.__defaults__ and func.__code__.co_argcount - x <= len(func.__defaults__):

-                 args.append((func.__code__.co_varnames[x], func.__defaults__[x - func.__code__.co_argcount + len(func.__defaults__)]))

+                 args.append(

+                     (func.__code__.co_varnames[x],

+                      func.__defaults__[x - func.__code__.co_argcount + len(func.__defaults__)]))

              else:

                  args.append(func.__code__.co_varnames[x])

          return args
@@ -164,7 +166,7 @@ 

          return koji.util.to_list(self.funcs.keys())

  

      def system_methodSignature(self, method):

-         #it is not possible to autogenerate this data

+         # it is not possible to autogenerate this data

          return 'signatures not supported'

  

      def system_methodHelp(self, method):
@@ -202,7 +204,7 @@ 

  

      def __init__(self, handlers):

          self.traceback = False

-         self.handlers = handlers  #expecting HandlerRegistry instance

+         self.handlers = handlers  # expecting HandlerRegistry instance

          self.logger = logging.getLogger('koji.xmlrpc')

  

      def _get_handler(self, name):
@@ -240,7 +242,7 @@ 

          except Fault as fault:

              self.traceback = True

              response = dumps(fault, marshaller=Marshaller)

-         except:

+         except Exception:

              self.traceback = True

              # report exception back to server

              e_class, e = sys.exc_info()[:2]
@@ -268,7 +270,7 @@ 

          return response

  

      def handle_upload(self, environ):

-         #uploads can't be in a multicall

+         # uploads can't be in a multicall

          context.method = None

          self.check_session()

          self.enforce_lockout()
@@ -280,20 +282,20 @@ 

  

      def check_session(self):

          if not hasattr(context, "session"):

-             #we may be called again by one of our meta-calls (like multiCall)

-             #so we should only create a session if one does not already exist

+             # we may be called again by one of our meta-calls (like multiCall)

+             # so we should only create a session if one does not already exist

              context.session = koji.auth.Session()

              try:

                  context.session.validate()

              except koji.AuthLockError:

-                 #might be ok, depending on method

+                 # might be ok, depending on method

                  if context.method not in ('exclusiveSession', 'login', 'krbLogin', 'logout'):

                      raise

  

      def enforce_lockout(self):

          if context.opts.get('LockOut') and \

-             context.method not in ('login', 'krbLogin', 'sslLogin', 'logout') and \

-             not context.session.hasPerm('admin'):

+                 context.method not in ('login', 'krbLogin', 'sslLogin', 'logout') and \

+                 not context.session.hasPerm('admin'):

              raise koji.ServerOffline("Server disabled for maintenance")

  

      def _dispatch(self, method, params):
@@ -307,7 +309,7 @@ 

  

          if self.logger.isEnabledFor(logging.INFO):

              self.logger.info("Handling method %s for session %s (#%s)",

-                             method, context.session.id, context.session.callnum)

+                              method, context.session.id, context.session.callnum)

              if method != 'uploadFile' and self.logger.isEnabledFor(logging.DEBUG):

                  self.logger.debug("Params: %s", pprint.pformat(params))

                  self.logger.debug("Opts: %s", pprint.pformat(opts))
@@ -317,10 +319,11 @@ 

  

          if self.logger.isEnabledFor(logging.INFO):

              rusage = resource.getrusage(resource.RUSAGE_SELF)

-             self.logger.info("Completed method %s for session %s (#%s): %f seconds, rss %s, stime %f",

-                             method, context.session.id, context.session.callnum,

-                             time.time()-start,

-                             rusage.ru_maxrss, rusage.ru_stime)

+             self.logger.info(

+                 "Completed method %s for session %s (#%s): %f seconds, rss %s, stime %f",

+                 method, context.session.id, context.session.callnum,

+                 time.time() - start,

+                 rusage.ru_maxrss, rusage.ru_stime)

  

          return ret

  
@@ -344,8 +347,11 @@ 

                  faultCode = getattr(exc_type, 'faultCode', 1)

                  faultString = ', '.join(exc_value.args)

                  trace = traceback.format_exception(*sys.exc_info())

-                 # traceback is not part of the multicall spec, but we include it for debugging purposes

-                 results.append({'faultCode': faultCode, 'faultString': faultString, 'traceback': trace})

+                 # traceback is not part of the multicall spec,

+                 # but we include it for debugging purposes

+                 results.append({'faultCode': faultCode,

+                                 'faultString': faultString,

+                                 'traceback': trace})

              else:

                  results.append([result])

  
@@ -359,7 +365,7 @@ 

          """Handle a single XML-RPC request"""

  

          pass

-         #XXX no longer used

+         # XXX no longer used

  

  

  def offline_reply(start_response, msg=None):
@@ -379,6 +385,7 @@ 

      start_response('200 OK', headers)

      return [response]

  

+ 

  def load_config(environ):

      """Load configuration options

  
@@ -395,14 +402,13 @@ 

          - all PythonOptions (except ConfigFile) are now deprecated and support for them

            will disappear in a future version of Koji

      """

-     logger = logging.getLogger("koji")

-     #get our config file(s)

+     # get our config file(s)

      cf = environ.get('koji.hub.ConfigFile', '/etc/koji-hub/hub.conf')

      cfdir = environ.get('koji.hub.ConfigDir', '/etc/koji-hub/hub.conf.d')

      config = koji.read_config_files([cfdir, (cf, True)], raw=True)

  

      cfgmap = [

-         #option, type, default

+         # option, type, default

          ['DBName', 'string', None],

          ['DBUser', 'string', None],

          ['DBHost', 'string', None],
@@ -438,7 +444,9 @@ 

          ['VerbosePolicy', 'boolean', False],

  

          ['LogLevel', 'string', 'WARNING'],

-         ['LogFormat', 'string', '%(asctime)s [%(levelname)s] m=%(method)s u=%(user_name)s p=%(process)s r=%(remoteaddr)s %(name)s: %(message)s'],

+         ['LogFormat', 'string',

+          '%(asctime)s [%(levelname)s] m=%(method)s u=%(user_name)s p=%(process)s r=%(remoteaddr)s '

+          '%(name)s: %(message)s'],

  

          ['MissingPolicyOk', 'boolean', True],

          ['EnableMaven', 'boolean', False],
@@ -480,7 +488,7 @@ 

      # load policies

      # (only from config file)

      if config and config.has_section('policy'):

-         #for the moment, we simply transfer the policy conf to opts

+         # for the moment, we simply transfer the policy conf to opts

          opts['policy'] = dict(config.items('policy'))

      else:

          opts['policy'] = {}
@@ -505,11 +513,12 @@ 

              tracker.load(name)

          except Exception:

              logger.error(''.join(traceback.format_exception(*sys.exc_info())))

-             #make this non-fatal, but set ServerOffline

+             # make this non-fatal, but set ServerOffline

              opts['ServerOffline'] = True

              opts['OfflineMessage'] = 'configuration error'

      return tracker

  

+ 

  _default_policies = {

      'build_from_srpm': '''

              has_perm admin :: allow
@@ -540,10 +549,11 @@ 

              ''',

  }

  

+ 

  def get_policy(opts, plugins):

      if not opts.get('policy'):

          return

-     #first find available policy tests

+     # first find available policy tests

      alltests = [koji.policy.findSimpleTests([vars(kojihub), vars(koji.policy)])]

      # we delay merging these to allow a test to be overridden for a specific policy

      for plugin_name in opts.get('Plugins', '').split():
@@ -553,7 +563,7 @@ 

          alltests.append(koji.policy.findSimpleTests(vars(plugin)))

      policy = {}

      for pname, text in six.iteritems(opts['policy']):

-         #filter/merge tests

+         # filter/merge tests

          merged = {}

          for tests in alltests:

              # tests can be limited to certain policies by setting a class variable
@@ -594,22 +604,24 @@ 

              record.user_name = None

          return logging.Formatter.format(self, record)

  

+ 

  def setup_logging1():

      """Set up basic logging, before options are loaded"""

      global log_handler

      logger = logging.getLogger("koji")

      logger.setLevel(logging.WARNING)

-     #stderr logging (stderr goes to httpd logs)

+     # stderr logging (stderr goes to httpd logs)

      log_handler = logging.StreamHandler()

      log_format = '%(asctime)s [%(levelname)s] SETUP p=%(process)s %(name)s: %(message)s'

      log_handler.setFormatter(HubFormatter(log_format))

      log_handler.setLevel(logging.DEBUG)

      logger.addHandler(log_handler)

  

+ 

  def setup_logging2(opts):

      global log_handler

      """Adjust logging based on configuration options"""

-     #determine log level

+     # determine log level

      level = opts['LogLevel']

      valid_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')

      # the config value can be a single level name or a series of
@@ -625,7 +637,7 @@ 

              default = level

          if level not in valid_levels:

              raise koji.GenericError("Invalid log level: %s" % level)

-         #all our loggers start with koji

+         # all our loggers start with koji

          if name == '':

              name = 'koji'

              default = level
@@ -640,9 +652,9 @@ 

      if opts.get('KojiDebug'):

          logger.setLevel(logging.DEBUG)

      elif default is None:

-         #LogLevel did not configure a default level

+         # LogLevel did not configure a default level

          logger.setLevel(logging.WARNING)

-     #log_handler defined in setup_logging1

+     # log_handler defined in setup_logging1

      log_handler.setFormatter(HubFormatter(opts['LogFormat']))

  

  
@@ -656,10 +668,12 @@ 

  

  def get_memory_usage():

      pagesize = resource.getpagesize()

-     statm = [pagesize * int(y) // 1024 for y in "".join(open("/proc/self/statm").readlines()).strip().split()]

+     statm = [pagesize * int(y) // 1024

+              for y in "".join(open("/proc/self/statm").readlines()).strip().split()]

      size, res, shr, text, lib, data, dirty = statm

      return res - shr

  

+ 

  def server_setup(environ):

      global opts, plugins, registry, policy

      logger = logging.getLogger('koji')
@@ -683,7 +697,7 @@ 

          opts = {

              'ServerOffline': True,

              'OfflineMessage': 'server startup error',

-             }

+         }

  

  

  #
@@ -693,6 +707,7 @@ 

  firstcall = True

  firstcall_lock = threading.Lock()

  

+ 

  def application(environ, start_response):

      global firstcall

      if firstcall:
@@ -707,7 +722,8 @@ 

              ('Allow', 'POST'),

          ]

          start_response('405 Method Not Allowed', headers)

-         response = "Method Not Allowed\nThis is an XML-RPC server. Only POST requests are accepted."

+         response = "Method Not Allowed\n" \

+                    "This is an XML-RPC server. Only POST requests are accepted."

          if six.PY3:

              response = response.encode()

          headers = [
@@ -747,7 +763,7 @@ 

              ]

              start_response('200 OK', headers)

              if h.traceback:

-                 #rollback

+                 # rollback

                  context.cnx.rollback()

              elif context.commit_pending:

                  # Currently there is not much data we can provide to the
@@ -761,18 +777,22 @@ 

                  paramstr = repr(getattr(context, 'params', 'UNKNOWN'))

                  if len(paramstr) > 120:

                      paramstr = paramstr[:117] + "..."

-                 h.logger.warning("Memory usage of process %d grew from %d KiB to %d KiB (+%d KiB) processing request %s with args %s" % (os.getpid(), memory_usage_at_start, memory_usage_at_end, memory_usage_at_end - memory_usage_at_start, context.method, paramstr))

+                 h.logger.warning(

+                     "Memory usage of process %d grew from %d KiB to %d KiB (+%d KiB) processing "

+                     "request %s with args %s" %

+                     (os.getpid(), memory_usage_at_start, memory_usage_at_end,

+                      memory_usage_at_end - memory_usage_at_start, context.method, paramstr))

              h.logger.debug("Returning %d bytes after %f seconds", len(response),

-                         time.time() - start)

+                            time.time() - start)

          finally:

-             #make sure context gets cleaned up

+             # make sure context gets cleaned up

              if hasattr(context, 'cnx'):

                  try:

                      context.cnx.close()

                  except Exception:

                      pass

              context._threadclear()

-         return [response] #XXX

+         return [response]  # XXX

  

  

  def get_registry(opts, plugins):

file modified
+314 -212
@@ -75,7 +75,7 @@ 

      from OpenSSL.SSL import Error as SSL_Error

  except Exception:  # pragma: no cover

      # the hub imports koji, and sometimes this import fails there

-     # see: https://cryptography.io/en/latest/faq/#starting-cryptography-using-mod-wsgi-produces-an-internalerror-during-a-call-in-register-osrandom-engine

+     # see: https://cryptography.io/en/latest/faq/#starting-cryptography-using-mod-wsgi-produces-an-internalerror-during-a-call-in-register-osrandom-engine  # noqa: E501

      # unfortunately the workaround at the above link does not always work, so

      # we ignore it here

      pass
@@ -91,12 +91,14 @@ 

  

  PROFILE_MODULES = {}  # {module_name: module_instance}

  

+ 

  def _(args):

      """Stub function for translation"""

      return args  # pragma: no cover

  

  ## Constants ##

  

+ 

  RPM_HEADER_MAGIC = six.b('\x8e\xad\xe8')

  RPM_TAG_HEADERSIGNATURES = 62

  RPM_TAG_FILEDIGESTALGO = 5011
@@ -108,14 +110,14 @@ 

      # Taken from RFC 4880

      # A missing algo ID means md5

      None: 'MD5',

-     1:    'MD5',

-     2:    'SHA1',

-     3:    'RIPEMD160',

-     8:    'SHA256',

-     9:    'SHA384',

-     10:   'SHA512',

-     11:   'SHA224'

-     }

+     1: 'MD5',

+     2: 'SHA1',

+     3: 'RIPEMD160',

+     8: 'SHA256',

+     9: 'SHA384',

+     10: 'SHA512',

+     11: 'SHA224'

+ }

  

  # rpm 4.12 introduces optional deps, but they can also be backported in some

  # rpm installations. So, we need to check their real support, not only rpm
@@ -128,7 +130,8 @@ 

          'RECOMMENDNAME', 'RECOMMENDVERSION', 'RECOMMENDFLAGS'):

      SUPPORTED_OPT_DEP_HDRS[h] = hasattr(rpm, 'RPMTAG_%s' % h)

  

- ## BEGIN kojikamid dup

+ # BEGIN kojikamid dup #

+ 

  

  class Enum(dict):

      """A simple class to track our enumerated constants
@@ -167,7 +170,7 @@ 

  

      # deprecated

      getvalue = _notImplemented

-     #read-only

+     # read-only

      __setitem__ = _notImplemented

      __delitem__ = _notImplemented

      clear = _notImplemented
@@ -176,7 +179,8 @@ 

      update = _notImplemented

      setdefault = _notImplemented

  

- ## END kojikamid dup

+ # END kojikamid dup #

+ 

  

  API_VERSION = 1

  
@@ -215,7 +219,7 @@ 

  AUTHTYPE_SSL = 2

  AUTHTYPE_GSSAPI = 3

  

- #dependency types

+ # dependency types

  DEP_REQUIRE = 0

  DEP_PROVIDE = 1

  DEP_OBSOLETE = 2
@@ -225,7 +229,7 @@ 

  DEP_SUPPLEMENT = 6

  DEP_RECOMMEND = 7

  

- #dependency flags

+ # dependency flags

  RPMSENSE_LESS = 2

  RPMSENSE_GREATER = 4

  RPMSENSE_EQUAL = 8
@@ -266,7 +270,7 @@ 

      'MANUAL',

  ))

  

- ## BEGIN kojikamid dup

+ # BEGIN kojikamid dup #

  

  CHECKSUM_TYPES = Enum((

      'md5',
@@ -274,9 +278,9 @@ 

      'sha256',

  ))

  

- ## END kojikamid dup

+ # END kojikamid dup #

  

- #PARAMETERS

+ # PARAMETERS

  BASEDIR = '/mnt/koji'

  # default task priority

  PRIO_DEFAULT = 20
@@ -285,116 +289,141 @@ 

  DEFAULT_REQUEST_TIMEOUT = 60 * 60 * 12

  DEFAULT_AUTH_TIMEOUT = 60

  

- ## BEGIN kojikamid dup

+ # BEGIN kojikamid dup #

+ 

+ # Exceptions

+ PythonImportError = ImportError  # will be masked by koji's one

  

- #Exceptions

- PythonImportError = ImportError # will be masked by koji's one

  

  class GenericError(Exception):

      """Base class for our custom exceptions"""

      faultCode = 1000

      fromFault = False

+ 

      def __str__(self):

          try:

              return str(self.args[0]['args'][0])

-         except:

+         except Exception:

              try:

                  return str(self.args[0])

-             except:

+             except Exception:

                  return str(self.__dict__)

- ## END kojikamid dup

+ # END kojikamid dup #

+ 

  

  class LockError(GenericError):

      """Raised when there is a lock conflict"""

      faultCode = 1001

  

+ 

  class AuthError(GenericError):

      """Raised when there is an error in authentication"""

      faultCode = 1002

  

+ 

  class TagError(GenericError):

      """Raised when a tagging operation fails"""

      faultCode = 1003

  

+ 

  class ActionNotAllowed(GenericError):

      """Raised when the session does not have permission to take some action"""

      faultCode = 1004

  

- ## BEGIN kojikamid dup

+ # BEGIN kojikamid dup #

+ 

  

  class BuildError(GenericError):

      """Raised when a build fails"""

      faultCode = 1005

- ## END kojikamid dup

+ # END kojikamid dup #

+ 

  

  class AuthLockError(AuthError):

      """Raised when a lock prevents authentication"""

      faultCode = 1006

  

+ 

  class AuthExpired(AuthError):

      """Raised when a session has expired"""

      faultCode = 1007

  

+ 

  class SequenceError(AuthError):

      """Raised when requests are received out of sequence"""

      faultCode = 1008

  

+ 

  class RetryError(AuthError):

      """Raised when a request is received twice and cannot be rerun"""

      faultCode = 1009

  

+ 

  class PreBuildError(BuildError):

      """Raised when a build fails during pre-checks"""

      faultCode = 1010

  

+ 

  class PostBuildError(BuildError):

      """Raised when a build fails during post-checks"""

      faultCode = 1011

  

+ 

  class BuildrootError(BuildError):

      """Raised when there is an error with the buildroot"""

      faultCode = 1012

  

+ 

  class FunctionDeprecated(GenericError):

      """Raised by a deprecated function"""

      faultCode = 1013

  

+ 

  class ServerOffline(GenericError):

      """Raised when the server is offline"""

      faultCode = 1014

  

+ 

  class LiveCDError(GenericError):

      """Raised when LiveCD Image creation fails"""

      faultCode = 1015

  

+ 

  class PluginError(GenericError):

      """Raised when there is an error with a plugin"""

      faultCode = 1016

  

+ 

  class CallbackError(PluginError):

      """Raised when there is an error executing a callback"""

      faultCode = 1017

  

+ 

  class ApplianceError(GenericError):

      """Raised when Appliance Image creation fails"""

      faultCode = 1018

  

+ 

  class ParameterError(GenericError):

      """Raised when an rpc call receives incorrect arguments"""

      faultCode = 1019

  

+ 

  class ImportError(GenericError):

      """Raised when an import fails"""

      faultCode = 1020

  

+ 

  class ConfigurationError(GenericError):

      """Raised when load of koji configuration fails"""

      faultCode = 1021

  

+ 

  class LiveMediaError(GenericError):

      """Raised when LiveMedia Image creation fails"""

      faultCode = 1022

  

+ 

  class MultiCallInProgress(object):

      """

      Placeholder class to be returned by method calls when in the process of
@@ -403,7 +432,7 @@ 

      pass

  

  

- #A function to get create an exception from a fault

+ # A function to get create an exception from a fault

  def convertFault(fault):

      """Convert a fault to the corresponding Exception type, if possible"""

      code = getattr(fault, 'faultCode', None)
@@ -415,9 +444,10 @@ 

              ret = v(fault.faultString)

              ret.fromFault = True

              return ret

-     #otherwise...

+     # otherwise...

      return fault

  

+ 

  def listFaults():

      """Return a list of faults

  
@@ -440,7 +470,8 @@ 

      ret.sort(key=lambda x: x['faultCode'])

      return ret

  

- #functions for encoding/decoding optional arguments

+ # functions for encoding/decoding optional arguments

+ 

  

  def encode_args(*args, **opts):

      """The function encodes optional arguments as regular arguments.
@@ -453,6 +484,7 @@ 

          args = args + (opts,)

      return args

  

+ 

  def decode_args(*args):

      """Decodes optional arguments from a flat argument list

  
@@ -468,6 +500,7 @@ 

              args = args[:-1]

      return args, opts

  

+ 

  def decode_args2(args, names, strict=True):

      "An alternate form of decode_args, returns a dictionary"

      args, opts = decode_args(*args)
@@ -477,14 +510,16 @@ 

      ret.update(opts)

      return ret

  

+ 

  def decode_int(n):

      """If n is not an integer, attempt to convert it"""

      if isinstance(n, six.integer_types):

          return n

-     #else

+     # else

      return int(n)

  

- #commonly used functions

+ # commonly used functions

+ 

  

  def safe_xmlrpc_loads(s):

      """Load xmlrpc data from a string, but catch faults"""
@@ -493,7 +528,7 @@ 

      except Fault as f:

          return f

  

- ## BEGIN kojikamid dup

+ # BEGIN kojikamid dup #

  

  

  def ensuredir(directory):
@@ -521,14 +556,15 @@ 

          # note: if head is blank, then we've reached the top of a relative path

          try:

              os.mkdir(directory)

-         except OSError as e:

+         except OSError:

              # do not thrown when dir already exists (could happen in a race)

              if not os.path.isdir(directory):

                  # something else must have gone wrong

                  raise

      return directory

  

- ## END kojikamid dup

+ # END kojikamid dup #

+ 

  

  def daemonize():

      """Detach and run in background"""
@@ -537,12 +573,12 @@ 

          os._exit(0)

      os.setsid()

      signal.signal(signal.SIGHUP, signal.SIG_IGN)

-     #fork again

+     # fork again

      pid = os.fork()

      if pid:

          os._exit(0)

      os.chdir("/")

-     #redirect stdin/stdout/sterr

+     # redirect stdin/stdout/sterr

      fd0 = os.open('/dev/null', os.O_RDONLY)

      fd1 = os.open('/dev/null', os.O_RDWR)

      fd2 = os.open('/dev/null', os.O_RDWR)
@@ -553,6 +589,7 @@ 

      os.close(fd1)

      os.close(fd2)

  

+ 

  def multibyte(data):

      """Convert a list of bytes to an integer (network byte order)"""

      sum = 0
@@ -561,6 +598,7 @@ 

          sum += data[i] << (8 * (n - i - 1))

      return sum

  

+ 

  def find_rpm_sighdr(path):

      """Finds the offset and length of the signature header."""

      # see Maximum RPM Appendix A: Format of the RPM File
@@ -570,6 +608,7 @@ 

      sigsize = rpm_hdr_size(path, sig_start)

      return (sig_start, sigsize)

  

+ 

  def rpm_hdr_size(f, ofs=None):

      """Returns the length (in bytes) of the rpm header

  
@@ -580,7 +619,7 @@ 

          fo = open(f, 'rb')

      else:

          fo = f

-     if ofs != None:

+     if ofs is not None:

          fo.seek(ofs, 0)

      magic = fo.read(3)

      if magic != RPM_HEADER_MAGIC:
@@ -597,7 +636,7 @@ 

      il = multibyte(data[0:4])

      dl = multibyte(data[4:8])

  

-     #this is what the section data says the size should be

+     # this is what the section data says the size should be

      hdrsize = 8 + 16 * il + dl

  

      # hdrsize rounded up to nearest 8 bytes
@@ -624,7 +663,7 @@ 

          self._index()

  

      def version(self):

-         #fourth byte is the version

+         # fourth byte is the version

          return _ord(self.header[3])

  

      def _index(self):
@@ -635,38 +674,39 @@ 

          il = multibyte(data[:4])

          dl = multibyte(data[4:8])

  

-         #read the index (starts at offset 16)

+         # read the index (starts at offset 16)

          index = {}

          for i in range(il):

              entry = []

              for j in range(4):

-                 ofs = 16 + i*16 + j*4

-                 data = [_ord(x) for x in self.header[ofs:ofs+4]]

+                 ofs = 16 + i * 16 + j * 4

+                 data = [_ord(x) for x in self.header[ofs:ofs + 4]]

                  entry.append(multibyte(data))

-             #print("Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry))

+ 

+             # print("Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry))

              index[entry[0]] = entry

          self.datalen = dl

          self.index = index

  

      def dump(self):

          print("HEADER DUMP:")

-         #calculate start of store

+         # calculate start of store

          il = len(self.index)

          store = 16 + il * 16

-         #print("start is: %d" % start)

-         #print("index length: %d" % il)

+         # print("start is: %d" % start)

+         # print("index length: %d" % il)

          print("Store at offset %d (%0x)" % (store, store))

-         #sort entries by offset, dtype

-         #also rearrange: tag, dtype, offset, count -> offset, dtype, tag, count

+         # sort entries by offset, dtype

+         # also rearrange: tag, dtype, offset, count -> offset, dtype, tag, count

          order = sorted([(x[2], x[1], x[0], x[3]) for x in six.itervalues(self.index)])

          next = store

-         #map some rpmtag codes

+         # map some rpmtag codes

          tags = {}

          for name, code in six.iteritems(rpm.__dict__):

              if name.startswith('RPMTAG_') and isinstance(code, int):

                  tags[code] = name[7:].lower()

          for entry in order:

-             #tag, dtype, offset, count = entry

+             # tag, dtype, offset, count = entry

              offset, dtype, tag, count = entry

              pos = store + offset

              if next is not None:
@@ -676,23 +716,23 @@ 

                      print("Data: %r" % self.header[next:pos])

                  elif pos < next:

                      print("** OVERLAPPING entries")

-             print("Tag: %d [%s], Type: %d, Offset: %x, Count: %d" \

-                     % (tag, tags.get(tag, '?'), dtype, offset, count))

+             print("Tag: %d [%s], Type: %d, Offset: %x, Count: %d"

+                   % (tag, tags.get(tag, '?'), dtype, offset, count))

              if dtype == 0:

-                 #null

+                 # null

                  print("[NULL entry]")

                  next = pos

              elif dtype == 1:

-                 #char

+                 # char

                  for i in range(count):

                      print("Char: %r" % self.header[pos])

                      pos += 1

                  next = pos

              elif dtype >= 2 and dtype <= 5:

-                 #integer

+                 # integer

                  n = 1 << (dtype - 2)

                  for i in range(count):

-                     data = [_ord(x) for x in self.header[pos:pos+n]]

+                     data = [_ord(x) for x in self.header[pos:pos + n]]

                      print("%r" % data)

                      num = multibyte(data)

                      print("Int(%d): %d" % (n, num))
@@ -701,23 +741,23 @@ 

              elif dtype == 6:

                  # string (null terminated)

                  end = self.header.find(six.b('\0'), pos)

-                 print("String(%d): %r" % (end-pos, self.header[pos:end]))

+                 print("String(%d): %r" % (end - pos, self.header[pos:end]))

                  next = end + 1

              elif dtype == 7:

-                 print("Data: %s" % hex_string(self.header[pos:pos+count]))

-                 next = pos+count

+                 print("Data: %s" % hex_string(self.header[pos:pos + count]))

+                 next = pos + count

              elif dtype == 8:

                  # string array

                  for i in range(count):

                      end = self.header.find(six.b('\0'), pos)

-                     print("String(%d): %r" % (end-pos, self.header[pos:end]))

+                     print("String(%d): %r" % (end - pos, self.header[pos:end]))

                      pos = end + 1

                  next = pos

              elif dtype == 9:

                  # unicode string array

                  for i in range(count):

                      end = self.header.find(six.b('\0'), pos)

-                     print("i18n(%d): %r" % (end-pos, self.header[pos:end]))

+                     print("i18n(%d): %r" % (end - pos, self.header[pos:end]))

                      pos = end + 1

                  next = pos

              else:
@@ -738,24 +778,24 @@ 

          return self._getitem(dtype, offset, count)

  

      def _getitem(self, dtype, offset, count):

-         #calculate start of store

+         # calculate start of store

          il = len(self.index)

          store = 16 + il * 16

          pos = store + offset

          if dtype >= 2 and dtype <= 5:

              n = 1 << (dtype - 2)

              # n-byte integer

-             data = [_ord(x) for x in self.header[pos:pos+n]]

+             data = [_ord(x) for x in self.header[pos:pos + n]]

              return multibyte(data)

          elif dtype == 6:

              # string (null terminated)

              end = self.header.find('\0', pos)

              return self.header[pos:end]

          elif dtype == 7:

-             #raw data

-             return self.header[pos:pos+count]

+             # raw data

+             return self.header[pos:pos + count]

          else:

-             #XXX - not all valid data types are handled

+             # XXX - not all valid data types are handled

              raise GenericError("Unable to read header data type: %x" % dtype)

  

      def get(self, key, default=None):
@@ -775,6 +815,7 @@ 

      fo.close()

      return sighdr

  

+ 

  def rip_rpm_hdr(src):

      """Rip the main header out of an rpm"""

      (start, size) = find_rpm_sighdr(src)
@@ -786,6 +827,7 @@ 

      fo.close()

      return hdr

  

+ 

  def _ord(s):

      # in python2 it is char/str, while in py3 it is already int/bytes

      if isinstance(s, int):
@@ -793,6 +835,7 @@ 

      else:

          return ord(s)

  

+ 

  def __parse_packet_header(pgp_packet):

      """Parse pgp_packet header, return tag type and the rest of pgp_packet"""

      byte0 = _ord(pgp_packet[0])
@@ -805,7 +848,7 @@ 

              offset = 1

              length = len(pgp_packet) - offset

          else:

-             (fmt, offset) = {0:('>B', 2), 1:('>H', 3), 2:('>I', 5)}[len_type]

+             (fmt, offset) = {0: ('>B', 2), 1: ('>H', 3), 2: ('>I', 5)}[len_type]

              length = struct.unpack(fmt, pgp_packet[1:offset])[0]

      else:

          tag = byte0 & 0x3F
@@ -827,6 +870,7 @@ 

          raise ValueError('Invalid OpenPGP packet length')

      return (tag, pgp_packet[offset:])

  

+ 

  def __subpacket_key_ids(subs):

      """Parse v4 signature subpackets and return a list of issuer key IDs"""

      res = []
@@ -842,10 +886,11 @@ 

              length = struct.unpack('>I', subs[1:5])[0]

              off = 5

          if _ord(subs[off]) == 16:

-             res.append(subs[off+1 : off+length])

-         subs = subs[off+length:]

+             res.append(subs[off + 1: off + length])

+         subs = subs[off + length:]

      return res

  

+ 

  def get_sigpacket_key_id(sigpacket):

      """Return ID of the key used to create sigpacket as a hexadecimal string"""

      (tag, sigpacket) = __parse_packet_header(sigpacket)
@@ -857,9 +902,9 @@ 

          sub_len = struct.unpack('>H', sigpacket[4:6])[0]

          off = 6 + sub_len

          key_ids = __subpacket_key_ids(sigpacket[6:off])

-         sub_len = struct.unpack('>H', sigpacket[off : off+2])[0]

+         sub_len = struct.unpack('>H', sigpacket[off: off + 2])[0]

          off += 2

-         key_ids += __subpacket_key_ids(sigpacket[off : off+sub_len])

+         key_ids += __subpacket_key_ids(sigpacket[off: off + sub_len])

          if len(key_ids) != 1:

              raise NotImplementedError(

                  'Unexpected number of key IDs: %s' % len(key_ids))
@@ -869,6 +914,7 @@ 

              'Unknown PGP signature packet version %s' % _ord(sigpacket[0]))

      return hex_string(key_id)

  

+ 

  def get_sighdr_key(sighdr):

      """Parse the sighdr and return the sigkey"""

      rh = RawHeader(sighdr)
@@ -880,6 +926,7 @@ 

      else:

          return get_sigpacket_key_id(sig)

  

+ 

  def splice_rpm_sighdr(sighdr, src, dst=None, bufsize=8192):

      """Write a copy of an rpm with signature header spliced in"""

      (start, size) = find_rpm_sighdr(src)
@@ -900,13 +947,14 @@ 

      dst_fo.close()

      return dst

  

+ 

  def get_rpm_header(f, ts=None):

      """Return the rpm header."""

      if rpm is None:

          raise GenericError("rpm's python bindings are not installed")

      if ts is None:

          ts = rpm.TransactionSet()

-         ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)

+         ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS)

      if isinstance(f, six.string_types):

          fo = open(f, "rb")

      else:
@@ -940,11 +988,9 @@ 

      if not SUPPORTED_OPT_DEP_HDRS.get(name, True):

          return []

  

-     if (src_arch and name == "ARCH"

-                 and get_header_field(hdr, "sourcepackage")):

+     if src_arch and name == "ARCH" and get_header_field(hdr, "sourcepackage"):

          # return "src" or "nosrc" arch instead of build arch for src packages

-         if (get_header_field(hdr, "nosource")

-                     or get_header_field(hdr, "nopatch")):

+         if get_header_field(hdr, "nosource") or get_header_field(hdr, "nopatch"):

              return "nosrc"

          return "src"

  
@@ -997,6 +1043,7 @@ 

          ret[f] = get_header_field(hdr, f, src_arch=src_arch)

      return ret

  

+ 

  def parse_NVR(nvr):

      """split N-V-R into dictionary of data"""

      ret = {}
@@ -1006,8 +1053,8 @@ 

      p1 = nvr.rfind("-", 0, p2)

      if p1 == -1 or p1 == p2 - 1:

          raise GenericError("invalid format: %s" % nvr)

-     ret['release'] = nvr[p2+1:]

-     ret['version'] = nvr[p1+1:p2]

+     ret['release'] = nvr[p2 + 1:]

+     ret['version'] = nvr[p1 + 1:p2]

      ret['name'] = nvr[:p1]

      epochIndex = ret['name'].find(':')

      if epochIndex == -1:
@@ -1017,6 +1064,7 @@ 

          ret['name'] = ret['name'][epochIndex + 1:]

      return ret

  

+ 

  def parse_NVRA(nvra):

      """split N-V-R.A.rpm into dictionary of data

  
@@ -1030,7 +1078,7 @@ 

      p3 = nvra.rfind(".")

      if p3 == -1 or p3 == len(nvra) - 1:

          raise GenericError("invalid format: %s" % nvra)

-     arch = nvra[p3+1:]

+     arch = nvra[p3 + 1:]

      ret = parse_NVR(nvra[:p3])

      ret['arch'] = arch

      if arch == 'src':
@@ -1060,6 +1108,7 @@ 

          else:

              return False

  

+ 

  def _check_NVR(nvr):

      if isinstance(nvr, six.string_types):

          nvr = parse_NVR(nvr)
@@ -1091,7 +1140,7 @@ 

  

  def _check_NVRA(nvra):

      if isinstance(nvra, six.string_types):

-             nvra = parse_NVRA(nvra)

+         nvra = parse_NVRA(nvra)

      if '-' in nvra['version']:

          raise GenericError('The "-" character not allowed in version field')

      if '-' in nvra['release']:
@@ -1106,9 +1155,10 @@ 

      return (name.endswith('-debuginfo') or name.endswith('-debugsource') or

              '-debuginfo-' in name)

  

+ 

  def canonArch(arch):

      """Given an arch, return the "canonical" arch"""

-     #XXX - this could stand to be smarter, and we should probably

+     # XXX - this could stand to be smarter, and we should probably

      #   have some other related arch-mangling functions.

      if fnmatch(arch, 'i?86') or arch == 'athlon':

          return 'i386'
@@ -1131,6 +1181,7 @@ 

      else:

          return arch

  

+ 

  def parse_arches(arches, to_list=False, strict=False, allow_none=False):

      """Normalize user input for a list of arches.

  
@@ -1194,8 +1245,10 @@ 

          self.tag_content = None

          self.values.clear()

  

+ 

  ENTITY_RE = re.compile(r'&[A-Za-z0-9]+;')

  

+ 

  def parse_pom(path=None, contents=None):

      """

      Parse the Maven .pom file return a map containing information
@@ -1215,7 +1268,8 @@ 

          fd.close()

  

      if not contents:

-         raise GenericError('either a path to a pom file or the contents of a pom file must be specified')

+         raise GenericError(

+             'either a path to a pom file or the contents of a pom file must be specified')

  

      # A common problem is non-UTF8 characters in XML files, so we'll convert the string first

  
@@ -1232,9 +1286,11 @@ 

  

      for field in fields:

          if field not in util.to_list(values.keys()):

-             raise GenericError('could not extract %s from POM: %s' % (field, (path or '<contents>')))

+             raise GenericError('could not extract %s from POM: %s' %

+                                (field, (path or '<contents>')))

      return values

  

+ 

  def pom_to_maven_info(pominfo):

      """

      Convert the output of parsing a POM into a format compatible
@@ -1249,6 +1305,7 @@ 

                   'version': pominfo['version']}

      return maveninfo

  

+ 

  def maven_info_to_nvr(maveninfo):

      """

      Convert the maveninfo to NVR-compatible format.
@@ -1263,6 +1320,7 @@ 

      nvr['package_name'] = nvr['name']

      return nvr

  

+ 

  def mavenLabel(maveninfo):

      """

      Return a user-friendly label for the given maveninfo.  maveninfo is
@@ -1270,6 +1328,7 @@ 

      """

      return '%(group_id)s-%(artifact_id)s-%(version)s' % maveninfo

  

+ 

  def hex_string(s):

      """Converts a string to a string of hex digits"""

      return ''.join(['%02x' % _ord(x) for x in s])
@@ -1280,13 +1339,13 @@ 

      if buildgroup is None:

          buildgroup = name

      data = [

- """#

+         """#

  # This specfile represents buildgroups for mock

  # Autogenerated by the build system

  #

  Summary: The base set of packages for a mock chroot\n""",

- """Name: %s\n""" % name,

- """Version: 1

+         """Name: %s\n""" % name,

+         """Version: 1

  Release: 1

  License: GPL

  Group: Development/Build Tools
@@ -1295,12 +1354,12 @@ 

  

  #package requirements

  """]

-     #add a requires entry for all the packages in buildgroup, and in

-     #groups required by buildgroup

+     # add a requires entry for all the packages in buildgroup, and in

+     # groups required by buildgroup

      need = [buildgroup]

      seen_grp = {}

      seen_pkg = {}

-     #index groups

+     # index groups

      groups = dict([(g['name'], g) for g in grplist])

      for group_name in need:

          if group_name in seen_grp:
@@ -1338,6 +1397,7 @@ 

  """)

      return ''.join(data)

  

+ 

  def generate_comps(groups, expand_groups=False):

      """Generate comps content from groups data"""

      def boolean_text(x):
@@ -1346,7 +1406,7 @@ 

          else:

              return "false"

      data = [

- """<?xml version="1.0"?>

+         """<?xml version="1.0"?>

  <!DOCTYPE comps PUBLIC "-//Red Hat, Inc.//DTD Comps info//EN" "comps.dtd">

  

  <!-- Auto-generated by the build system -->
@@ -1363,7 +1423,7 @@ 

          default = boolean_text(g['is_default'])

          uservisible = boolean_text(g['uservisible'])

          data.append(

- """  <group>

+             """  <group>

      <id>%(group_id)s</id>

      <name>%(name)s</name>

      <description>%(description)s</description>
@@ -1372,18 +1432,18 @@ 

  """ % locals())

          if g['biarchonly']:

              data.append(

- """    <biarchonly>%s</biarchonly>

+                 """    <biarchonly>%s</biarchonly>

  """ % boolean_text(True))

  

-         #print grouplist, if any

+         # print grouplist, if any

          if g['grouplist'] and not expand_groups:

              data.append(

- """    <grouplist>

+                 """    <grouplist>

  """)

              grouplist = list(g['grouplist'])

              grouplist.sort(key=lambda x: x['name'])

              for x in grouplist:

-                 #['req_id','type','is_metapkg','name']

+                 # ['req_id','type','is_metapkg','name']

                  name = x['name']

                  thetype = x['type']

                  tag = "groupreq"
@@ -1391,19 +1451,19 @@ 

                      tag = "metapkg"

                  if thetype:

                      data.append(

- """      <%(tag)s type="%(thetype)s">%(name)s</%(tag)s>

+                         """      <%(tag)s type="%(thetype)s">%(name)s</%(tag)s>

  """ % locals())

                  else:

                      data.append(

- """      <%(tag)s>%(name)s</%(tag)s>

+                         """      <%(tag)s>%(name)s</%(tag)s>

  """ % locals())

              data.append(

- """    </grouplist>

+                 """    </grouplist>

  """)

  

-         #print packagelist, if any

+         # print packagelist, if any

          def package_entry(pkg):

-             #p['package_id','type','basearchonly','requires','name']

+             # p['package_id','type','basearchonly','requires','name']

              name = pkg['package']

              opts = 'type="%s"' % pkg['type']

              if pkg['basearchonly']:
@@ -1413,20 +1473,20 @@ 

              return "<packagereq %(opts)s>%(name)s</packagereq>" % locals()

  

          data.append(

- """    <packagelist>

+             """    <packagelist>

  """)

          if g['packagelist']:

              packagelist = list(g['packagelist'])

              packagelist.sort(key=lambda x: x['package'])

              for p in packagelist:

                  data.append(

- """      %s

+                     """      %s

  """ % package_entry(p))

              # also include expanded list, if needed

          if expand_groups and g['grouplist']:

-             #add a requires entry for all packages in groups required by buildgroup

+             # add a requires entry for all packages in groups required by buildgroup

              need = [req['name'] for req in g['grouplist']]

-             seen_grp = {g['name'] : 1}

+             seen_grp = {g['name']: 1}

              seen_pkg = {}

              for p in g['packagelist']:

                  seen_pkg[p['package']] = 1
@@ -1437,11 +1497,11 @@ 

                  group = group_idx.get(group_name)

                  if group is None:

                      data.append(

- """      <!-- MISSING GROUP: %s -->

+                         """      <!-- MISSING GROUP: %s -->

  """ % group_name)

                      continue

                  data.append(

- """      <!-- Expanding Group: %s -->

+                     """      <!-- Expanding Group: %s -->

  """ % group_name)

                  pkglist = list(group['packagelist'])

                  pkglist.sort(key=lambda x: x['package'])
@@ -1450,7 +1510,7 @@ 

                      if pkg_name in seen_pkg:

                          continue

                      data.append(

- """      %s

+                         """      %s

  """ % package_entry(pkg))

                  for req in group['grouplist']:

                      req_name = req['name']
@@ -1458,13 +1518,13 @@ 

                          continue

                      need.append(req_name)

          data.append(

- """    </packagelist>

+             """    </packagelist>

  """)

          data.append(

- """  </group>

+             """  </group>

  """)

      data.append(

- """</comps>

+         """</comps>

  """)

      return ''.join(data)

  
@@ -1484,12 +1544,12 @@ 

              raise GenericError("please provide a repo and tag")

          topurls = opts.get('topurls')

          if not topurls:

-             #cli command still passes plain topurl

+             # cli command still passes plain topurl

              topurl = opts.get('topurl')

              if topurl:

                  topurls = [topurl]

          if topurls:

-             #XXX - PathInfo isn't quite right for this, but it will do for now

+             # XXX - PathInfo isn't quite right for this, but it will do for now

              pathinfos = [PathInfo(topdir=_u) for _u in topurls]

              urls = ["%s/%s" % (_p.repo(repoid, tag_name), arch) for _p in pathinfos]

          else:
@@ -1502,9 +1562,9 @@ 

      # rely on the mock defaults being correct

      # and only includes changes from the defaults here

      config_opts = {

-         'root' : name,

-         'basedir' : mockdir,

-         'target_arch' : opts.get('target_arch', arch),

+         'root': name,

+         'basedir': mockdir,

+         'target_arch': opts.get('target_arch', arch),

          'chroothome': '/builddir',

          # Use the group data rather than a generated rpm

          'chroot_setup_cmd': 'groupinstall %s' % opts.get('install_group', 'build'),
@@ -1539,7 +1599,7 @@ 

      if mavenrc:

          files['etc/mavenrc'] = mavenrc

  

-     #generate yum.conf

+     # generate yum.conf

      yc_parts = ["[main]\n"]

      # HTTP proxy for yum

      if opts.get('yum_proxy'):
@@ -1577,9 +1637,9 @@ 

      }

  

      macros = {

-         '%_rpmfilename' : '%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm',

-         '%vendor' : opts.get('vendor', 'Koji'),

-         '%packager' : opts.get('packager', 'Koji'),

+         '%_rpmfilename': '%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm',

+         '%vendor': opts.get('vendor', 'Koji'),

+         '%packager': opts.get('packager', 'Koji'),

          '%distribution': opts.get('distribution', 'Unknown')

      }

  
@@ -1589,7 +1649,8 @@ 

      # The following macro values cannot be overridden by tag options

      macros['%_topdir'] = '%s/build' % config_opts['chroothome']

      macros['%_host_cpu'] = opts.get('target_arch', arch)

-     macros['%_host'] = '%s-%s' % (opts.get('target_arch', arch), opts.get('mockhost', 'koji-linux-gnu'))

+     macros['%_host'] = '%s-%s' % (opts.get('target_arch', arch),

+                                   opts.get('mockhost', 'koji-linux-gnu'))

  

      parts = ["""# Auto-generated by the Koji build system

  """]
@@ -1621,7 +1682,9 @@ 

      if bind_opts:

          for key in bind_opts.keys():

              for mnt_src, mnt_dest in six.iteritems(bind_opts.get(key)):

-                 parts.append("config_opts['plugin_conf']['bind_mount_opts'][%r].append((%r, %r))\n" % (key, mnt_src, mnt_dest))

+                 parts.append(

+                     "config_opts['plugin_conf']['bind_mount_opts'][%r].append((%r, %r))\n" %

+                     (key, mnt_src, mnt_dest))

          parts.append("\n")

  

      for key in sorted(macros):
@@ -1634,11 +1697,14 @@ 

  

      return ''.join(parts)

  

+ 

  def get_sequence_value(cursor, sequence):

      cursor.execute("""SELECT nextval(%(sequence)s)""", locals())

      return cursor.fetchone()[0]

  

  # From Python Cookbook 2nd Edition, Recipe 8.6

+ 

+ 

  def format_exc_plus():

      """ Format the usual traceback information, followed by a listing of

          all the local variables in each frame.
@@ -1664,10 +1730,11 @@ 

              # COULD cause any exception, so we MUST catch any...:

              try:

                  rv += "%s\n" % value

-             except:

+             except Exception:

                  rv += "<ERROR WHILE PRINTING VALUE>\n"

      return rv

  

+ 

  def openRemoteFile(relpath, topurl=None, topdir=None, tempdir=None):

      """Open a file on the main server (read-only)

  
@@ -1713,12 +1780,12 @@ 

      try:

          hdr = ts.hdrFromFdno(fo.fileno())

      except rpm.error as ex:

-         raise GenericError("rpm's header can't be extracted: %s (rpm error: %s)" % \

+         raise GenericError("rpm's header can't be extracted: %s (rpm error: %s)" %

                             (fo.name, ', '.join(ex.args)))

      try:

          rpm.TransactionSet().hdrCheck(hdr.unload())

      except rpm.error as ex:

-         raise GenericError("rpm's header can't be checked: %s (rpm error: %s)" % \

+         raise GenericError("rpm's header can't be checked: %s (rpm error: %s)" %

                             (fo.name, ', '.join(ex.args)))

      fo.seek(0)

  
@@ -1746,18 +1813,18 @@ 

  

  def read_config(profile_name, user_config=None):

      config_defaults = {

-         'server' : 'http://localhost/kojihub',

-         'weburl' : 'http://localhost/koji',

-         'topurl' : None,

-         'pkgurl' : None,

-         'topdir' : '/mnt/koji',

-         'max_retries' : None,

+         'server': 'http://localhost/kojihub',

+         'weburl': 'http://localhost/koji',

+         'topurl': None,

+         'pkgurl': None,

+         'topdir': '/mnt/koji',

+         'max_retries': None,

          'retry_interval': None,

-         'anon_retry' : None,

-         'offline_retry' : None,

-         'offline_retry_interval' : None,

-         'timeout' : DEFAULT_REQUEST_TIMEOUT,

-         'auth_timeout' : DEFAULT_AUTH_TIMEOUT,

+         'anon_retry': None,

+         'offline_retry': None,

+         'offline_retry_interval': None,

+         'timeout': DEFAULT_REQUEST_TIMEOUT,

+         'auth_timeout': DEFAULT_AUTH_TIMEOUT,

          'use_fast_upload': False,

          'upload_blocksize': 1048576,

          'poll_interval': 6,
@@ -1780,7 +1847,7 @@ 

  

      result = config_defaults.copy()

  

-     #note: later config files override earlier ones

+     # note: later config files override earlier ones

  

      # /etc/koji.conf.d

      configs = ['/etc/koji.conf.d']
@@ -1807,9 +1874,9 @@ 

          got_conf = True

          result['profile'] = profile_name

          for name, value in config.items(profile_name):

-             #note the config_defaults dictionary also serves to indicate which

-             #options *can* be set via the config file. Such options should

-             #not have a default value set in the option parser.

+             # note the config_defaults dictionary also serves to indicate which

+             # options *can* be set via the config file. Such options should

+             # not have a default value set in the option parser.

              if name in result:

                  if name in ('anon_retry', 'offline_retry',

                              'use_fast_upload', 'krb_rdns', 'debug',
@@ -1822,7 +1889,8 @@ 

                      try:

                          result[name] = int(value)

                      except ValueError:

-                         raise ConfigurationError("value for %s config option must be a valid integer" % name)

+                         raise ConfigurationError(

+                             "value for %s config option must be a valid integer" % name)

                  else:

                      result[name] = value

  
@@ -1835,7 +1903,7 @@ 

      cert_defaults = {

          'cert': '~/.koji/client.crt',

          'serverca': '~/.koji/serverca.crt',

-         }

+     }

      for name in cert_defaults:

          if result.get(name) is None:

              fn = os.path.expanduser(cert_defaults[name])
@@ -1966,7 +2034,8 @@ 

  

  class PathInfo(object):

      # ASCII numbers and upper- and lower-case letter for use in tmpdir()

-     ASCII_CHARS = [chr(i) for i in list(range(48, 58)) + list(range(65, 91)) + list(range(97, 123))]

+     ASCII_CHARS = [chr(i)

+                    for i in list(range(48, 58)) + list(range(65, 91)) + list(range(97, 123))]

  

      def __init__(self, topdir=None):

          self._topdir = topdir
@@ -1984,15 +2053,17 @@ 

      def volumedir(self, volume):

          if volume == 'DEFAULT' or volume is None:

              return self.topdir

-         #else

+         # else

          return self.topdir + ("/vol/%s" % volume)

  

      def build(self, build):

          """Return the directory where a build belongs"""

-         return self.volumedir(build.get('volume_name')) + ("/packages/%(name)s/%(version)s/%(release)s" % build)

+         return self.volumedir(build.get('volume_name')) + \

+             ("/packages/%(name)s/%(version)s/%(release)s" % build)

  

      def mavenbuild(self, build):

-         """Return the directory where the Maven build exists in the global store (/mnt/koji/packages)"""

+         """Return the directory where the Maven build exists in the global store

+            (/mnt/koji/packages)"""

          return self.build(build) + '/maven'

  

      def mavenrepo(self, maveninfo):
@@ -2073,7 +2144,8 @@ 

          """Return a path to a unique directory under work()/tmp/"""

          tmp = None

          while tmp is None or os.path.exists(tmp):

-             tmp = self.work(volume) + '/tmp/' + ''.join([random.choice(self.ASCII_CHARS) for dummy in '123456'])

+             tmp = self.work(volume) + '/tmp/' + ''.join([random.choice(self.ASCII_CHARS)

+                                                          for dummy in '123456'])

          return tmp

  

      def scratch(self):
@@ -2084,6 +2156,7 @@ 

          """Return the output directory for the task with the given id"""

          return self.work(volume=volume) + '/' + self.taskrelpath(task_id)

  

+ 

  pathinfo = PathInfo()

  

  
@@ -2097,9 +2170,9 @@ 

      # are way more ugly.

      errstr = str(e)

      if ('Permission denied' in errstr or  # certificate not readable

-         'certificate revoked' in errstr or

-         'certificate expired' in errstr or

-         'certificate verify failed' in errstr):

+             'certificate revoked' in errstr or

+             'certificate expired' in errstr or

+             'certificate verify failed' in errstr):

          return True

  

      return False
@@ -2108,7 +2181,7 @@ 

  def is_cert_error(e):

      """Determine if an OpenSSL error is due to a bad cert"""

  

-     if SSL_Error is None:  #pragma: no cover

+     if SSL_Error is None:  # pragma: no cover

          # import failed, so we can't determine

          raise Exception("OpenSSL library did not load")

      if not isinstance(e, SSL_Error):
@@ -2141,7 +2214,7 @@ 

                      'certificate expired' in ssl_reason):

                  return True

  

-     #otherwise

+     # otherwise

      return False

  

  
@@ -2200,7 +2273,7 @@ 

                  self.__session._apidoc = dict(

                      [(f["name"], f) for f in self.__func("_listapi", [], {})]

                  )

-             except:

+             except Exception:

                  self.__session._apidoc = {}

  

          funcdoc = self.__session._apidoc.get(self.__name)
@@ -2258,7 +2331,7 @@ 

  

      def __init__(self, baseurl, opts=None, sinfo=None):

          assert baseurl, "baseurl argument must not be empty"

-         if opts == None:

+         if opts is None:

              opts = {}

          else:

              opts = opts.copy()
@@ -2357,13 +2430,13 @@ 

          if not ctx:

              ctx = krbV.default_context()

  

-         if ccache != None:

+         if ccache is not None:

              ccache = krbV.CCache(name=ccache, context=ctx)

          else:

              ccache = ctx.default_ccache()

  

-         if principal != None:

-             if keytab != None:

+         if principal is not None:

+             if keytab is not None:

                  cprinc = krbV.Principal(name=principal, context=ctx)

                  keytab = krbV.Keytab(name=keytab, context=ctx)

                  ccache.init(cprinc)
@@ -2472,8 +2545,8 @@ 

              if principal:

                  if re.match(r'0[.][1-8]\b', requests_kerberos.__version__):

                      raise PythonImportError(

-                             'python-requests-kerberos >= 0.9.0 required for '

-                             'keytab auth'

+                         'python-requests-kerberos >= 0.9.0 required for '

+                         'keytab auth'

                      )

                  else:

                      kwargs['principal'] = principal
@@ -2553,7 +2626,7 @@ 

              handler, headers, request = self._prepCall('logout', ())

              self._sendCall(handler, headers, request)

          except AuthExpired:

-             #this can happen when an exclusive session is forced

+             # this can happen when an exclusive session is forced

              pass

          self.setSession(None)

  
@@ -2578,15 +2651,15 @@ 

              return

          self.setSession(None)

  

-     #we've had some trouble with this method causing strange problems

-     #(like infinite recursion). Possibly triggered by initialization failure,

-     #and possibly due to some interaction with __getattr__.

-     #Re-enabling with a small improvement

+     # we've had some trouble with this method causing strange problems

+     # (like infinite recursion). Possibly triggered by initialization failure,

+     # and possibly due to some interaction with __getattr__.

+     # Re-enabling with a small improvement

      def __del__(self):

          if self.__dict__:

              try:

                  self.logout()

-             except:

+             except Exception:

                  pass

  

      def callMethod(self, name, *args, **opts):
@@ -2594,7 +2667,7 @@ 

          return self._callMethod(name, args, opts)

  

      def _prepCall(self, name, args, kwargs=None):

-         #pass named opts in a way the server can understand

+         # pass named opts in a way the server can understand

          if kwargs is None:

              kwargs = {}

          if name == 'rawUpload':
@@ -2713,27 +2786,28 @@ 

                  self.retries += 1

                  try:

                      return self._sendCall(handler, headers, request)

-                 #basically, we want to retry on most errors, with a few exceptions

+                 # basically, we want to retry on most errors, with a few exceptions

                  #  - faults (this means the call completed and failed)

                  #  - SystemExit, KeyboardInterrupt

-                 # note that, for logged-in sessions the server should tell us (via a RetryError fault)

-                 # if the call cannot be retried. For non-logged-in sessions, all calls should be read-only

-                 # and hence retryable.

+                 # note that, for logged-in sessions the server should tell us (via a RetryError

+                 # fault) if the call cannot be retried. For non-logged-in sessions, all calls

+                 # should be read-only and hence retryable.

                  except Fault as fault:

-                     #try to convert the fault to a known exception

+                     # try to convert the fault to a known exception

                      err = convertFault(fault)

                      if isinstance(err, ServerOffline):

                          if self.opts.get('offline_retry', False):

                              secs = self.opts.get('offline_retry_interval', interval)

                              self.logger.debug("Server offline. Retrying in %i seconds", secs)

                              time.sleep(secs)

-                             #reset try count - this isn't a typical error, this is a running server

-                             #correctly reporting an outage

+                             # reset try count - this isn't a typical error, this is a running

+                             # server correctly reporting an outage

                              tries = 0

                              continue

                      raise err

                  except (SystemExit, KeyboardInterrupt):

-                     #(depending on the python version, these may or may not be subclasses of Exception)

+                     # (depending on the python version, these may or may not be subclasses of

+                     # Exception)

                      raise

                  except Exception as e:

                      tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
@@ -2744,8 +2818,9 @@ 

                          raise

  

                      if not self.logged_in:

-                         #in the past, non-logged-in sessions did not retry. For compatibility purposes

-                         #this behavior is governed by the anon_retry opt.

+                         # in the past, non-logged-in sessions did not retry.

+                         # For compatibility purposes this behavior is governed by the anon_retry

+                         # opt.

                          if not self.opts.get('anon_retry', False):

                              raise

  
@@ -2754,14 +2829,15 @@ 

  

                      if tries > max_retries:

                          raise

-                     #otherwise keep retrying

+                     # otherwise keep retrying

                      if self.logger.isEnabledFor(logging.DEBUG):

                          self.logger.debug(tb_str)

-                     self.logger.info("Try #%s for call %s (%s) failed: %s", tries, self.callnum, name, e)

+                     self.logger.info("Try #%s for call %s (%s) failed: %s",

+                                      tries, self.callnum, name, e)

                  if tries > 1:

                      # first retry is immediate, after that we honor retry_interval

                      time.sleep(interval)

-             #not reached

+             # not reached

  

      def multiCall(self, strict=False, batch=None):

          """Execute a prepared multicall
@@ -2799,7 +2875,8 @@ 

          transaction.

          """

          if not self.multicall:

-             raise GenericError('ClientSession.multicall must be set to True before calling multiCall()')

+             raise GenericError(

+                 'ClientSession.multicall must be set to True before calling multiCall()')

          self.multicall = False

          if len(self._calls) == 0:

              return []
@@ -2816,7 +2893,7 @@ 

          else:

              ret = self._callMethod('multiCall', (calls,), {})

          if strict:

-             #check for faults and raise first one

+             # check for faults and raise first one

              for entry in ret:

                  if isinstance(entry, dict):

                      fault = Fault(entry['faultCode'], entry['faultString'])
@@ -2825,13 +2902,14 @@ 

          return ret

  

      def __getattr__(self, name):

-         #if name[:1] == '_':

+         # if name[:1] == '_':

          #    raise AttributeError("no attribute %r" % name)

          if name == '_apidoc':

              return self.__dict__['_apidoc']

          return VirtualMethod(self._callMethod, name, self)

  

-     def fastUpload(self, localfile, path, name=None, callback=None, blocksize=None, overwrite=False, volume=None):

+     def fastUpload(self, localfile, path, name=None, callback=None, blocksize=None,

+                    overwrite=False, volume=None):

          if blocksize is None:

              blocksize = self.opts.get('upload_blocksize', 1048576)

  
@@ -2865,10 +2943,11 @@ 

              hexdigest = util.adler32_constructor(chunk).hexdigest()

              full_chksum.update(chunk)

              if result['size'] != len(chunk):

-                 raise GenericError("server returned wrong chunk size: %s != %s" % (result['size'], len(chunk)))

+                 raise GenericError("server returned wrong chunk size: %s != %s" %

+                                    (result['size'], len(chunk)))

              if result['hexdigest'] != hexdigest:

-                 raise GenericError('upload checksum failed: %s != %s' \

-                         % (result['hexdigest'], hexdigest))

+                 raise GenericError('upload checksum failed: %s != %s'

+                                    % (result['hexdigest'], hexdigest))

              ofs += len(chunk)

              now = time.time()

              t1 = max(now - lap, 0.00001)
@@ -2887,14 +2966,16 @@ 

          if result is None:

              raise GenericError("File upload failed: %s/%s" % (path, name))

          if int(result['size']) != ofs:

-             raise GenericError("Uploaded file is wrong length: %s/%s, %s != %s" \

-                     % (path, name, result['size'], ofs))

+             raise GenericError("Uploaded file is wrong length: %s/%s, %s != %s"

+                                % (path, name, result['size'], ofs))

          if problems and result['hexdigest'] != full_chksum.hexdigest():

-             raise GenericError("Uploaded file has wrong checksum: %s/%s, %s != %s" \

-                     % (path, name, result['hexdigest'], full_chksum.hexdigest()))

-         self.logger.debug("Fast upload: %s complete. %i bytes in %.1f seconds", localfile, size, t2)

+             raise GenericError("Uploaded file has wrong checksum: %s/%s, %s != %s"

+                                % (path, name, result['hexdigest'], full_chksum.hexdigest()))

+         self.logger.debug("Fast upload: %s complete. %i bytes in %.1f seconds",

+                           localfile, size, t2)

  

-     def _prepUpload(self, chunk, offset, path, name, verify="adler32", overwrite=False, volume=None):

+     def _prepUpload(self, chunk, offset, path, name, verify="adler32", overwrite=False,

+                     volume=None):

          """prep a rawUpload call"""

          if not self.logged_in:

              raise ActionNotAllowed("you must be logged in to upload")
@@ -2924,7 +3005,8 @@ 

              request = chunk

          return handler, headers, request

  

-     def uploadWrapper(self, localfile, path, name=None, callback=None, blocksize=None, overwrite=True, volume=None):

+     def uploadWrapper(self, localfile, path, name=None, callback=None, blocksize=None,

+                       overwrite=True, volume=None):

          """upload a file in chunks using the uploadFile call"""

          if blocksize is None:

              blocksize = self.opts.get('upload_blocksize', 1048576)
@@ -2953,7 +3035,7 @@ 

          start = time.time()

          # XXX - stick in a config or something

          retries = 3

-         fo = open(localfile, "rb")  #specify bufsize?

+         fo = open(localfile, "rb")  # specify bufsize?

          totalsize = os.path.getsize(localfile)

          ofs = 0

          md5sum = hashlib.md5()
@@ -2979,14 +3061,15 @@ 

              tries = 0

              while True:

                  if debug:

-                     self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" %(path, name, sz, digest, offset))

+                     self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" %

+                                       (path, name, sz, digest, offset))

                  if self.callMethod('uploadFile', path, name, sz, digest, offset, data, **volopts):

                      break

                  if tries <= retries:

                      tries += 1

                      continue

                  else:

-                     raise GenericError("Error uploading file %s, offset %d" %(path, offset))

+                     raise GenericError("Error uploading file %s, offset %d" % (path, offset))

              if size == 0:

                  break

              ofs += size
@@ -2998,9 +3081,11 @@ 

              if t2 <= 0:

                  t2 = 1

              if debug:

-                 self.logger.debug("Uploaded %d bytes in %f seconds (%f kbytes/sec)" % (size, t1, size / t1 / 1024.0))

+                 self.logger.debug("Uploaded %d bytes in %f seconds (%f kbytes/sec)" %

+                                   (size, t1, size / t1 / 1024.0))

              if debug:

-                 self.logger.debug("Total: %d bytes in %f seconds (%f kbytes/sec)" % (ofs, t2, ofs / t2 / 1024.0))

+                 self.logger.debug("Total: %d bytes in %f seconds (%f kbytes/sec)" %

+                                   (ofs, t2, ofs / t2 / 1024.0))

              if callback:

                  callback(ofs, totalsize, size, t1, t2)

          fo.close()
@@ -3124,9 +3209,9 @@ 

          self._calls = []

          if batch:

              self._session.logger.debug(

-                     "MultiCall with batch size %i, calls/groups(%i/%i)",

-                     batch, len(calls), round(len(calls) // batch))

-             batches = [calls[i:i+batch] for i in range(0, len(calls), batch)]

+                 "MultiCall with batch size %i, calls/groups(%i/%i)",

+                 batch, len(calls), round(len(calls) // batch))

+             batches = [calls[i:i + batch] for i in range(0, len(calls), batch)]

          else:

              batches = [calls]

          results = []
@@ -3170,6 +3255,7 @@ 

      A handler class which writes logging records, appropriately formatted,

      to a database.

      """

+ 

      def __init__(self, cnx, table, mapping=None):

          """

          Initialize the handler.
@@ -3207,19 +3293,20 @@ 

                  columns.append(key)

                  values.append("%%(%s)s" % key)

                  data[key] = value % record.__dict__

-                 #values.append(_quote(value % record.__dict__))

+                 # values.append(_quote(value % record.__dict__))

              columns = ",".join(columns)

              values = ",".join(values)

              command = "INSERT INTO %s (%s) VALUES (%s)" % (self.table, columns, values)

-             #note we're letting cursor.execute do the escaping

+             # note we're letting cursor.execute do the escaping

              cursor.execute(command, data)

              cursor.close()

-             #self.cnx.commit()

-             #XXX - committing here is most likely wrong, but we need to set commit_pending or something

-             #      ...and this is really the wrong place for that

-         except:

+             # self.cnx.commit()

+             # XXX - committing here is most likely wrong, but we need to set commit_pending or

+             #       something...and this is really the wrong place for that

+         except Exception:

              self.handleError(record)

  

+ 

  def formatTime(value):

      """Format a timestamp so it looks nicer"""

      if not value:
@@ -3236,6 +3323,7 @@ 

          else:

              return value

  

+ 

  def formatTimeLong(value):

      """Format a timestamp to a more human-reable format, i.e.:

      Sat, 07 Sep 2002 00:00:01 GMT
@@ -3247,10 +3335,11 @@ 

          localtime = time.mktime(time.strptime(formatTime(value), '%Y-%m-%d %H:%M:%S'))

          return time.strftime('%a, %d %b %Y %H:%M:%S %Z', time.localtime(localtime))

  

+ 

  def buildLabel(buildInfo, showEpoch=False):

      """Format buildInfo (dict) into a descriptive label."""

      epoch = buildInfo.get('epoch')

-     if showEpoch and epoch != None:

+     if showEpoch and epoch is not None:

          epochStr = '%i:' % epoch

      else:

          epochStr = ''
@@ -3261,6 +3350,7 @@ 

                             buildInfo.get('version'),

                             buildInfo.get('release'))

  

+ 

  def _module_info(url):

      module_info = ''

      if '?' in url:
@@ -3279,12 +3369,14 @@ 

      else:

          return '%s:%s' % (repo_info, rev_info)

  

+ 

  def taskLabel(taskInfo):

      try:

          return _taskLabel(taskInfo)

      except Exception:

          return "malformed task"

  

+ 

  def _taskLabel(taskInfo):

      """Format taskInfo (dict) into a descriptive label."""

      method = taskInfo['method']
@@ -3328,7 +3420,7 @@ 

                  extra = build_target['name']

      elif method == 'winbuild':

          if 'request' in taskInfo:

-             #vm = taskInfo['request'][0]

+             # vm = taskInfo['request'][0]

              url = taskInfo['request'][1]

              target = taskInfo['request'][2]

              module_info = _module_info(url)
@@ -3389,7 +3481,7 @@ 

              else:

                  kickstart = os.path.basename(stuff[7])

              extra = '%s, %s-%s-%s, %s, %s' % (stuff[4]['name'], stuff[0],

-                 stuff[1], stuff[2], kickstart, stuff[3])

+                                               stuff[1], stuff[2], kickstart, stuff[3])

      elif method == 'restart':

          if 'request' in taskInfo:

              host = taskInfo['request'][0]
@@ -3404,10 +3496,13 @@ 

      else:

          return '%s (%s)' % (method, arch)

  

+ 

  CONTROL_CHARS = [chr(i) for i in range(32)]

  NONPRINTABLE_CHARS = ''.join([c for c in CONTROL_CHARS if c not in '\r\n\t'])

  if six.PY3:

      NONPRINTABLE_CHARS_TABLE = dict.fromkeys(map(ord, NONPRINTABLE_CHARS), None)

+ 

+ 

  def removeNonprintable(value):

      # expects raw-encoded string, not unicode

      if six.PY2:
@@ -3458,7 +3553,7 @@ 

  

      # play encoding tricks for py2 strings

      if six.PY2:

-         if isinstance(value, unicode):

+         if isinstance(value, unicode):  # noqa: F821

              # just convert it to a utf8-encoded str

              value = value.encode('utf8')

          elif isinstance(value, str):
@@ -3501,16 +3596,20 @@ 

          return

      if not os.access(fn, os.W_OK):

          return

-     handler = logging.handlers.RotatingFileHandler(fn, maxBytes=1024*1024*10, backupCount=5)

+     handler = logging.handlers.RotatingFileHandler(fn, maxBytes=1024 * 1024 * 10, backupCount=5)

      handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s'))

      logging.getLogger(logger).addHandler(handler)

  

+ 

  def add_stderr_logger(logger):

      handler = logging.StreamHandler()

-     handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] {%(process)d} %(name)s:%(lineno)d %(message)s'))

+     handler.setFormatter(

+         logging.Formatter(

+             '%(asctime)s [%(levelname)s] {%(process)d} %(name)s:%(lineno)d %(message)s'))

      handler.setLevel(logging.DEBUG)

      logging.getLogger(logger).addHandler(handler)

  

+ 

  def add_sys_logger(logger):

      # For remote logging;

      # address = ('host.example.com', logging.handlers.SysLogHandler.SYSLOG_UDP_PORT)
@@ -3521,6 +3620,7 @@ 

      handler.setLevel(logging.INFO)

      logging.getLogger(logger).addHandler(handler)

  

+ 

  def add_mail_logger(logger, addr):

      """Adding e-mail logger

  
@@ -3534,12 +3634,14 @@ 

          return

      addresses = addr.split(',')

      handler = logging.handlers.SMTPHandler("localhost",

-                                            "%s@%s" % (pwd.getpwuid(os.getuid())[0], socket.getfqdn()),

+                                            "%s@%s" % (pwd.getpwuid(os.getuid())[0],

+                                                       socket.getfqdn()),

                                             addresses,

                                             "%s: error notice" % socket.getfqdn())

      handler.setFormatter(logging.Formatter('%(pathname)s:%(lineno)d [%(levelname)s] %(message)s'))

      handler.setLevel(logging.ERROR)

      logging.getLogger(logger).addHandler(handler)

  

+ 

  def remove_log_handler(logger, handler):

      logging.getLogger(logger).removeHandler(handler)

file modified
+57 -38
@@ -10,14 +10,14 @@ 

  _ppc64_native_is_best = True

  

  # dict mapping arch -> ( multicompat, best personality, biarch personality )

- multilibArches = { "x86_64":  ( "athlon", "x86_64", "athlon" ),

-                    "sparc64v": ( "sparcv9v", "sparcv9v", "sparc64v" ),

-                    "sparc64": ( "sparcv9", "sparcv9", "sparc64" ),

-                    "ppc64":   ( "ppc", "ppc", "ppc64" ),

-                    "s390x":   ( "s390", "s390x", "s390" ),

-                    }

+ multilibArches = {"x86_64": ("athlon", "x86_64", "athlon"),

+                   "sparc64v": ("sparcv9v", "sparcv9v", "sparc64v"),

+                   "sparc64": ("sparcv9", "sparcv9", "sparc64"),

+                   "ppc64": ("ppc", "ppc", "ppc64"),

+                   "s390x": ("s390", "s390x", "s390"),

+                   }

  if _ppc64_native_is_best:

-     multilibArches["ppc64"] = ( "ppc", "ppc64", "ppc64" )

+     multilibArches["ppc64"] = ("ppc", "ppc64", "ppc64")

  

  arches = {

      # ia32
@@ -33,8 +33,8 @@ 

      "amd64": "x86_64",

      "ia32e": "x86_64",

  

-     #ppc64le

-     "ppc64le":  "noarch",

+     # ppc64le

+     "ppc64le": "noarch",

  

      # ppc

      "ppc64p7": "ppc64",
@@ -56,16 +56,16 @@ 

      "sparc": "noarch",

  

      # alpha

-     "alphaev7":   "alphaev68",

-     "alphaev68":  "alphaev67",

-     "alphaev67":  "alphaev6",

-     "alphaev6":   "alphapca56",

+     "alphaev7": "alphaev68",

+     "alphaev68": "alphaev67",

+     "alphaev67": "alphaev6",

+     "alphaev6": "alphapca56",

      "alphapca56": "alphaev56",

-     "alphaev56":  "alphaev5",

-     "alphaev5":   "alphaev45",

-     "alphaev45":  "alphaev4",

-     "alphaev4":   "alpha",

-     "alpha":      "noarch",

+     "alphaev56": "alphaev5",

+     "alphaev5": "alphaev45",

+     "alphaev45": "alphaev4",

+     "alphaev4": "alpha",

+     "alpha": "noarch",

  

      # arm

      "armv7l": "armv6l",
@@ -73,7 +73,7 @@ 

      "armv5tejl": "armv5tel",

      "armv5tel": "noarch",

  

-     #arm hardware floating point

+     # arm hardware floating point

      "armv7hnl": "armv7hl",

      "armv7hl": "armv6hl",

      "armv6hl": "noarch",
@@ -86,16 +86,17 @@ 

      "sh4": "noarch",

      "sh3": "noarch",

  

-     #itanium

+     # itanium

      "ia64": "noarch",

-     }

+ }

  

  #  Will contain information parsed from /proc/self/auxv via _parse_auxv().

  # Should move into rpm really.

  _aux_vector = {

      "platform": "",

      "hwcap": 0,

-     }

+ }

+ 

  

  def legitMultiArchesInSameLib(arch=None):

      # this is completely crackrock - if anyone has a better way I
@@ -124,7 +125,7 @@ 

      # if both are a multlibarch then we can't coinstall  (x86_64, ia32e)

      # if both are not multilibarches then we can't coinstall (i386, i686)

  

-     if 'noarch' in [arch1, arch2]: # noarch can never coinstall

+     if 'noarch' in [arch1, arch2]:  # noarch can never coinstall

          return False

  

      if isMultiLibArch(arch=arch1) == isMultiLibArch(arch=arch2):
@@ -135,6 +136,8 @@ 

      return False

  

  # this computes the difference between myarch and targetarch

+ 

+ 

  def archDifference(myarch, targetarch):

      if myarch == targetarch:

          return 1
@@ -145,15 +148,17 @@ 

          return 0

      return 0

  

+ 

  def score(arch):

      return archDifference(canonArch, arch)

  

+ 

  def isMultiLibArch(arch=None):

      """returns true if arch is a multilib arch, false if not"""

      if arch is None:

          arch = canonArch

  

-     if arch not in arches: # or we could check if it is noarch

+     if arch not in arches:  # or we could check if it is noarch

          return 0

  

      if arch in multilibArches:
@@ -164,6 +169,7 @@ 

  

      return 0

  

+ 

  def getBestArchFromList(archlist, myarch=None):

      """

          return the best arch from the list for myarch if - myarch is not given,
@@ -176,12 +182,10 @@ 

      if myarch is None:

          myarch = canonArch

  

-     mybestarch = getBestArch(myarch)

- 

      bestarch = getBestArch(myarch)

      if bestarch != myarch:

          bestarchchoice = getBestArchFromList(archlist, bestarch)

-         if bestarchchoice != None and bestarchchoice != "noarch":

+         if bestarchchoice is not None and bestarchchoice != "noarch":

              return bestarchchoice

  

      thisarch = archlist[0]
@@ -221,28 +225,30 @@ 

      # hack hack hack

      # sparc64v is also sparc64 compat

      if archlist[0] == "sparc64v":

-         archlist.insert(1,"sparc64")

+         archlist.insert(1, "sparc64")

  

      # if we're a weirdo arch - add noarch on there.

      if len(archlist) == 1 and archlist[0] == thisarch:

          archlist.append('noarch')

      return archlist

  

+ 

  def _try_read_cpuinfo():

      """ Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not

          mounted). """

      try:

          return open("/proc/cpuinfo", "r")

-     except:

+     except Exception:

          return []

  

+ 

  def _parse_auxv():

      """ Read /proc/self/auxv and parse it into global dict for easier access

          later on, very similar to what rpm does. """

      # In case we can't open and read /proc/self/auxv, just return

      try:

          data = open("/proc/self/auxv", "rb").read()

-     except:

+     except Exception:

          return

  

      # Define values from /usr/include/elf.h
@@ -262,6 +268,7 @@ 

              _aux_vector["hwcap"] = at_val

          offset = offset + fmtlen

  

+ 

  def getCanonX86Arch(arch):

      #

      if arch == "i586":
@@ -287,6 +294,7 @@ 

  

      return arch

  

+ 

  def getCanonARMArch(arch):

      # the %{_target_arch} macro in rpm will let us know the abi we are using

      target = rpm.expandMacro('%{_target_cpu}')
@@ -296,6 +304,7 @@ 

          return target

      return arch

  

+ 

  def getCanonPPCArch(arch):

      # FIXME: should I do better handling for mac, etc?

      if arch != "ppc64":
@@ -314,7 +323,7 @@ 

      try:

          if platform.startswith("power") and int(platform[5:].rstrip('+')) >= 7:

              return "ppc64p7"

-     except:

+     except Exception:

          pass

  

      if machine is None:
@@ -326,6 +335,7 @@ 

          return "ppc64iseries"

      return arch

  

+ 

  def getCanonSPARCArch(arch):

      # Deal with sun4v, sun4u, sun4m cases

      SPARCtype = None
@@ -350,6 +360,7 @@ 

          return "sparcv8"

      return arch

  

+ 

  def getCanonX86_64Arch(arch):

      if arch != "x86_64":

          return arch
@@ -368,7 +379,8 @@ 

          return "ia32e"

      return arch

  

- def getCanonArch(skipRpmPlatform = 0):

+ 

+ def getCanonArch(skipRpmPlatform=0):

      if not skipRpmPlatform and os.access("/etc/rpm/platform", os.R_OK):

          try:

              f = open("/etc/rpm/platform", "r")
@@ -376,7 +388,7 @@ 

              f.close()

              (arch, vendor, opersys) = line.split("-", 2)

              return arch

-         except:

+         except Exception:

              pass

  

      arch = os.uname()[4]
@@ -397,19 +409,24 @@ 

  

      return arch

  

+ 

  canonArch = getCanonArch()

  

  # this gets you the "compat" arch of a biarch pair

- def getMultiArchInfo(arch = canonArch):

+ 

+ 

+ def getMultiArchInfo(arch=canonArch):

      if arch in multilibArches:

          return multilibArches[arch]

      if arch in arches and arches[arch] != "noarch":

-         return getMultiArchInfo(arch = arches[arch])

+         return getMultiArchInfo(arch=arches[arch])

      return None

  

  # get the best usual userspace arch for the arch we're on.  this is

  # our arch unless we're on an arch that uses the secondary as its

  # userspace (eg ppc64, sparc64)

+ 

+ 

  def getBestArch(myarch=None):

      if myarch:

          arch = myarch
@@ -424,6 +441,7 @@ 

  

      return arch

  

+ 

  def getBaseArch(myarch=None):

      """returns 'base' arch for myarch, if specified, or canonArch if not.

         base arch is the arch before noarch in the arches dict if myarch is not
@@ -432,7 +450,7 @@ 

      if not myarch:

          myarch = canonArch

  

-     if myarch not in arches: # this is dumb, but <shrug>

+     if myarch not in arches:  # this is dumb, but <shrug>

          return myarch

  

      if myarch.startswith("sparc64"):
@@ -469,6 +487,7 @@ 

  class ArchStorage(object):

      """class for keeping track of what arch we have set and doing various

         permutations based on it"""

+ 

      def __init__(self):

          self.canonarch = None

          self.basearch = None
@@ -487,7 +506,7 @@ 

          self.basearch = getBaseArch(myarch=self.canonarch)

          self.archlist = getArchList(thisarch=self.canonarch)

  

-         if not archlist_includes_compat_arch: # - do we bother including i686 and below on x86_64

+         if not archlist_includes_compat_arch:  # - do we bother including i686 and below on x86_64

              limit_archlist = []

              for a in self.archlist:

                  if isMultiLibArch(a) or a == 'noarch':
@@ -497,7 +516,7 @@ 

          self.bestarch = getBestArch(myarch=self.canonarch)

          self.compatarches = getMultiArchInfo(arch=self.canonarch)

          self.multilib = isMultiLibArch(arch=self.canonarch)

-         self.legit_multi_arches = legitMultiArchesInSameLib(arch = self.canonarch)

+         self.legit_multi_arches = legitMultiArchesInSameLib(arch=self.canonarch)

  

      def get_best_arch_from_list(self, archlist, fromarch=None):

          if not fromarch:

file modified
+68 -55
@@ -79,7 +79,7 @@ 

          self._perms = None

          self._groups = None

          self._host_id = ''

-         #get session data from request

+         # get session data from request

          if args is None:

              environ = getattr(context, 'environ', {})

              args = environ.get('QUERY_STRING', '')
@@ -95,9 +95,9 @@ 

              raise koji.AuthError('%s not specified in session args' % field)

          try:

              callnum = args['callnum'][0]

-         except:

+         except Exception:

              callnum = None

-         #lookup the session

+         # lookup the session

          c = context.cnx.cursor()

          fields = {

              'authtype': 'authtype',
@@ -110,7 +110,7 @@ 

              'EXTRACT(EPOCH FROM start_time)': 'start_ts',

              'EXTRACT(EPOCH FROM update_time)': 'update_ts',

              'user_id': 'user_id',

-             }

+         }

          # sort for stability (unittests)

          fields, aliases = zip(*sorted(fields.items(), key=lambda x: x[1]))

          q = """
@@ -125,10 +125,10 @@ 

          if not row:

              raise koji.AuthError('Invalid session or bad credentials')

          session_data = dict(zip(aliases, row))

-         #check for expiration

+         # check for expiration

          if session_data['expired']:

              raise koji.AuthExpired('session "%i" has expired' % id)

-         #check for callnum sanity

+         # check for callnum sanity

          if callnum is not None:

              try:

                  callnum = int(callnum)
@@ -137,25 +137,25 @@ 

              lastcall = session_data['callnum']

              if lastcall is not None:

                  if lastcall > callnum:

-                     raise koji.SequenceError("%d > %d (session %d)" \

-                             % (lastcall, callnum, id))

+                     raise koji.SequenceError("%d > %d (session %d)"

+                                              % (lastcall, callnum, id))

                  elif lastcall == callnum:

-                     #Some explanation:

-                     #This function is one of the few that performs its own commit.

-                     #However, our storage of the current callnum is /after/ that

-                     #commit. This means the the current callnum only gets committed if

-                     #a commit happens afterward.

-                     #We only schedule a commit for dml operations, so if we find the

-                     #callnum in the db then a previous attempt succeeded but failed to

-                     #return. Data was changed, so we cannot simply try the call again.

+                     # Some explanation:

+                     # This function is one of the few that performs its own commit.

+                     # However, our storage of the current callnum is /after/ that

+                     # commit. This means the the current callnum only gets committed if

+                     # a commit happens afterward.

+                     # We only schedule a commit for dml operations, so if we find the

+                     # callnum in the db then a previous attempt succeeded but failed to

+                     # return. Data was changed, so we cannot simply try the call again.

                      method = getattr(context, 'method', 'UNKNOWN')

                      if method not in RetryWhitelist:

                          raise koji.RetryError(

-                             "unable to retry call %d (method %s) for session %d" \

+                             "unable to retry call %d (method %s) for session %d"

                              % (callnum, method, id))

  

          # read user data

-         #historical note:

+         # historical note:

          # we used to get a row lock here as an attempt to maintain sanity of exclusive

          # sessions, but it was an imperfect approach and the lock could cause some

          # performance issues.
@@ -166,25 +166,25 @@ 

  

          if user_data['status'] != koji.USER_STATUS['NORMAL']:

              raise koji.AuthError('logins by %s are not allowed' % user_data['name'])

-         #check for exclusive sessions

+         # check for exclusive sessions

          if session_data['exclusive']:

-             #we are the exclusive session for this user

+             # we are the exclusive session for this user

              self.exclusive = True

          else:

-             #see if an exclusive session exists

+             # see if an exclusive session exists

              q = """SELECT id FROM sessions WHERE user_id=%(user_id)s

              AND "exclusive" = TRUE AND expired = FALSE"""

-             #should not return multiple rows (unique constraint)

+             # should not return multiple rows (unique constraint)

              c.execute(q, session_data)

              row = c.fetchone()

              if row:

                  (excl_id,) = row

                  if excl_id == session_data['master']:

-                     #(note excl_id cannot be None)

-                     #our master session has the lock

+                     # (note excl_id cannot be None)

+                     # our master session has the lock

                      self.exclusive = True

                  else:

-                     #a session unrelated to us has the lock

+                     # a session unrelated to us has the lock

                      self.lockerror = "User locked by another session"

                      # we don't enforce here, but rely on the dispatcher to enforce

                      # if appropriate (otherwise it would be impossible to steal
@@ -193,11 +193,11 @@ 

          # update timestamp

          q = """UPDATE sessions SET update_time=NOW() WHERE id = %(id)i"""

          c.execute(q, locals())

-         #save update time

+         # save update time

          context.cnx.commit()

  

-         #update callnum (this is deliberately after the commit)

-         #see earlier note near RetryError

+         # update callnum (this is deliberately after the commit)

+         # see earlier note near RetryError

          if callnum is not None:

              q = """UPDATE sessions SET callnum=%(callnum)i WHERE id = %(id)i"""

              c.execute(q, locals())
@@ -218,7 +218,7 @@ 

          # grab perm and groups data on the fly

          if name == 'perms':

              if self._perms is None:

-                 #in a dict for quicker lookup

+                 # in a dict for quicker lookup

                  self._perms = dict([[name, 1] for name in get_user_perms(self.user_id)])

              return self._perms

          elif name == 'groups':
@@ -254,7 +254,7 @@ 

              return override

          else:

              hostip = context.environ['REMOTE_ADDR']

-             #XXX - REMOTE_ADDR not promised by wsgi spec

+             # XXX - REMOTE_ADDR not promised by wsgi spec

              if hostip == '127.0.0.1':

                  hostip = socket.gethostbyname(socket.gethostname())

              return hostip
@@ -294,7 +294,7 @@ 

  

          self.checkLoginAllowed(user_id)

  

-         #create session and return

+         # create session and return

          sinfo = self.createSession(user_id, hostip, koji.AUTHTYPE_NORMAL)

          session_id = sinfo['session-id']

          context.cnx.commit()
@@ -321,7 +321,7 @@ 

          srvkt = krbV.Keytab(name=context.opts.get('AuthKeytab'), context=ctx)

  

          ac = krbV.AuthContext(context=ctx)

-         ac.flags = krbV.KRB5_AUTH_CONTEXT_DO_SEQUENCE|krbV.KRB5_AUTH_CONTEXT_DO_TIME

+         ac.flags = krbV.KRB5_AUTH_CONTEXT_DO_SEQUENCE | krbV.KRB5_AUTH_CONTEXT_DO_TIME

          conninfo = self.getConnInfo()

          ac.addrs = conninfo

  
@@ -334,12 +334,13 @@ 

  

          # Successfully authenticated via Kerberos, now log in

          if proxyuser:

-             proxyprincs = [princ.strip() for princ in context.opts.get('ProxyPrincipals', '').split(',')]

+             proxyprincs = [princ.strip()

+                            for princ in context.opts.get('ProxyPrincipals', '').split(',')]

              if cprinc.name in proxyprincs:

                  login_principal = proxyuser

              else:

                  raise koji.AuthError(

-                       'Kerberos principal %s is not authorized to log in other users' % cprinc.name)

+                     'Kerberos principal %s is not authorized to log in other users' % cprinc.name)

          else:

              login_principal = cprinc.name

  
@@ -386,7 +387,7 @@ 

          # so get the local ip via a different method

          local_ip = socket.gethostbyname(context.environ['SERVER_NAME'])

          remote_ip = context.environ['REMOTE_ADDR']

-         #XXX - REMOTE_ADDR not promised by wsgi spec

+         # XXX - REMOTE_ADDR not promised by wsgi spec

  

          # it appears that calling setports() with *any* value results in authentication

          # failing with "Incorrect net address", so return 0 (which prevents
@@ -408,12 +409,15 @@ 

              authtype = koji.AUTHTYPE_GSSAPI

          else:

              if context.environ.get('SSL_CLIENT_VERIFY') != 'SUCCESS':

-                 raise koji.AuthError('could not verify client: %s' % context.environ.get('SSL_CLIENT_VERIFY'))

+                 raise koji.AuthError('could not verify client: %s' %

+                                      context.environ.get('SSL_CLIENT_VERIFY'))

  

              name_dn_component = context.opts.get('DNUsernameComponent', 'CN')

              username = context.environ.get('SSL_CLIENT_S_DN_%s' % name_dn_component)

              if not username:

-                 raise koji.AuthError('unable to get user information (%s) from client certificate' % name_dn_component)

+                 raise koji.AuthError(

+                     'unable to get user information (%s) from client certificate' %

+                     name_dn_component)

              client_dn = context.environ.get('SSL_CLIENT_S_DN')

              authtype = koji.AUTHTYPE_SSL

  
@@ -466,11 +470,11 @@ 

          if self.master is not None:

              raise koji.GenericError("subsessions cannot become exclusive")

          if self.exclusive:

-             #shouldn't happen

+             # shouldn't happen

              raise koji.GenericError("session is already exclusive")

          user_id = self.user_id

          session_id = self.id

-         #acquire a row lock on the user entry

+         # acquire a row lock on the user entry

          q = """SELECT id FROM users WHERE id=%(user_id)s FOR UPDATE"""

          c.execute(q, locals())

          # check that no other sessions for this user are exclusive
@@ -481,13 +485,13 @@ 

          row = c.fetchone()

          if row:

              if force:

-                 #expire the previous exclusive session and try again

+                 # expire the previous exclusive session and try again

                  (excl_id,) = row

                  q = """UPDATE sessions SET expired=TRUE,"exclusive"=NULL WHERE id=%(excl_id)s"""

                  c.execute(q, locals())

              else:

                  raise koji.AuthLockError("Cannot get exclusive session")

-         #mark this session exclusive

+         # mark this session exclusive

          q = """UPDATE sessions SET "exclusive"=TRUE WHERE id=%(session_id)s"""

          c.execute(q, locals())

          context.cnx.commit()
@@ -503,12 +507,12 @@ 

      def logout(self):

          """expire a login session"""

          if not self.logged_in:

-             #XXX raise an error?

+             # XXX raise an error?

              raise koji.AuthError("Not logged in")

          update = """UPDATE sessions

          SET expired=TRUE,exclusive=NULL

          WHERE id = %(id)i OR master = %(id)i"""

-         #note we expire subsessions as well

+         # note we expire subsessions as well

          c = context.cnx.cursor()

          c.execute(update, {'id': self.id})

          context.cnx.commit()
@@ -517,7 +521,7 @@ 

      def logoutChild(self, session_id):

          """expire a subsession"""

          if not self.logged_in:

-             #XXX raise an error?

+             # XXX raise an error?

              raise koji.AuthError("Not logged in")

          update = """UPDATE sessions

          SET expired=TRUE,exclusive=NULL
@@ -537,8 +541,8 @@ 

  

          # generate a random key

          alnum = string.ascii_letters + string.digits

-         key = "%s-%s" %(user_id,

-                 ''.join([random.choice(alnum) for x in range(1, 20)]))

+         key = "%s-%s" % (user_id,

+                          ''.join([random.choice(alnum) for x in range(1, 20)]))

          # use sha? sha.new(phrase).hexdigest()

  

          # get a session id
@@ -546,8 +550,7 @@ 

          c.execute(q, {})

          (session_id,) = c.fetchone()

  

- 

-         #add session id to database

+         # add session id to database

          q = """

          INSERT INTO sessions (id, user_id, key, hostip, authtype, master)

          VALUES (%(session_id)i, %(user_id)i, %(key)s, %(hostip)s, %(authtype)i, %(master)s)
@@ -555,8 +558,8 @@ 

          c.execute(q, locals())

          context.cnx.commit()

  

-         #return session info

-         return {'session-id' : session_id, 'session-key' : key}

+         # return session info

+         return {'session-id': session_id, 'session-key': key}

  

      def subsession(self):

          "Create a subsession"
@@ -566,7 +569,7 @@ 

          if master is None:

              master = self.id

          return self.createSession(self.user_id, self.hostip, self.authtype,

-                     master=master)

+                                   master=master)

  

      def getPerms(self):

          if not self.logged_in:
@@ -589,7 +592,7 @@ 

      def hasGroup(self, group_id):

          if not self.logged_in:

              return False

-         #groups indexed by id

+         # groups indexed by id

          return group_id in self.groups

  

      def isUser(self, user_id):
@@ -607,7 +610,7 @@ 

              return None

          c = context.cnx.cursor()

          q = """SELECT id FROM host WHERE user_id = %(uid)d"""

-         c.execute(q, {'uid' : self.user_id})

+         c.execute(q, {'uid': self.user_id})

          r = c.fetchone()

          c.close()

          if r:
@@ -616,7 +619,7 @@ 

              return None

  

      def getHostId(self):

-         #for compatibility

+         # for compatibility

          return self.host_id

  

      def getUserId(self, username):
@@ -799,15 +802,17 @@ 

      c.execute(q, locals())

      return dict(c.fetchall())

  

+ 

  def get_user_perms(user_id):

      c = context.cnx.cursor()

      q = """SELECT name

      FROM user_perms JOIN permissions ON perm_id = permissions.id

      WHERE active = TRUE AND user_id=%(user_id)s"""

      c.execute(q, locals())

-     #return a list of permissions by name

+     # return a list of permissions by name

      return [row[0] for row in c.fetchall()]

  

+ 

  def get_user_data(user_id):

      c = context.cnx.cursor()

      fields = ('name', 'status', 'usertype')
@@ -818,28 +823,36 @@ 

          return None

      return dict(zip(fields, row))

  

+ 

  def login(*args, **opts):

      return context.session.login(*args, **opts)

  

+ 

  def krbLogin(*args, **opts):

      return context.session.krbLogin(*args, **opts)

  

+ 

  def sslLogin(*args, **opts):

      return context.session.sslLogin(*args, **opts)

  

+ 

  def logout():

      return context.session.logout()

  

+ 

  def subsession():

      return context.session.subsession()

  

+ 

  def logoutChild(session_id):

      return context.session.logoutChild(session_id)

  

+ 

  def exclusiveSession(*args, **opts):

      """Make this session exclusive"""

      return context.session.makeExclusive(*args, **opts)

  

+ 

  def sharedSession():

      """Drop out of exclusive mode"""

      return context.session.makeShared()

file modified
+3 -2
@@ -32,6 +32,7 @@ 

  class _data(object):

      pass

  

+ 

  class ThreadLocal(object):

      def __init__(self):

          object.__setattr__(self, '_tdict', {})
@@ -67,8 +68,8 @@ 

      def __str__(self):

          id = six.moves._thread.get_ident()

          tdict = object.__getattribute__(self, '_tdict')

-         return "(current thread: %s) {" % id  + \

-             ", ".join(["%s : %s" %(k, v.__dict__) for (k, v) in six.iteritems(tdict)]) + \

+         return "(current thread: %s) {" % id + \

+             ", ".join(["%s : %s" % (k, v.__dict__) for (k, v) in six.iteritems(tdict)]) + \

              "}"

  

      def _threadclear(self):

file modified
+142 -100
@@ -40,8 +40,13 @@ 

  import koji.tasks

  import koji.xmlrpcplus

  from koji.tasks import safe_rmtree

- from koji.util import (adler32_constructor, base64encode, dslice, parseStatus,

-                        to_list)

+ from koji.util import (

+     adler32_constructor,

+     base64encode,

+     dslice,

+     parseStatus,

+     to_list

+ )

  

  

  def incremental_upload(session, fname, fd, path, retries=5, logger=None):
@@ -79,6 +84,7 @@ 

                  logger.error("Error uploading file %s to %s at offset %d" % (fname, path, offset))

                  break

  

+ 

  def fast_incremental_upload(session, fname, fd, path, retries, logger):

      """Like incremental_upload, but use the fast upload mechanism"""

  
@@ -103,8 +109,10 @@ 

                  logger.error("Error uploading file %s to %s at offset %d" % (fname, path, offset))

                  break

  

- def log_output(session, path, args, outfile, uploadpath, cwd=None, logerror=0, append=0, chroot=None, env=None):

-     """Run command with output redirected.  If chroot is not None, chroot to the directory specified

+ 

+ def log_output(session, path, args, outfile, uploadpath, cwd=None, logerror=0, append=0,

+                chroot=None, env=None):

+     """Run command with output redirected. If chroot is not None, chroot to the directory specified

      before running the command."""

      pid = os.fork()

      fd = None
@@ -131,7 +139,7 @@ 

              if env:

                  environ.update(env)

              os.execvpe(path, args, environ)

-         except:

+         except BaseException:

              msg = ''.join(traceback.format_exception(*sys.exc_info()))

              if fd:

                  try:
@@ -140,7 +148,7 @@ 

                      else:

                          os.write(fd, msg)

                      os.close(fd)

-                 except:

+                 except Exception:

                      pass

              print(msg)

              os._exit(1)
@@ -159,7 +167,7 @@ 

                  except IOError:

                      # will happen if the forked process has not created the logfile yet

                      continue

-                 except:

+                 except Exception:

                      print('Error reading log file: %s' % outfile)

                      print(''.join(traceback.format_exception(*sys.exc_info())))

  
@@ -171,7 +179,7 @@ 

                  return status[1]

  

  

- ## BEGIN kojikamid dup

+ # BEGIN kojikamid dup #

  

  class SCM(object):

      "SCM abstraction class"
@@ -195,7 +203,7 @@ 

          # otherwise not valid

          if strict:

              raise koji.GenericError('Invalid scheme in scm url. Valid schemes '

-                     'are: %s' % ' '.join(sorted(schemes)))

+                                     'are: %s' % ' '.join(sorted(schemes)))

          else:

              return False

  
@@ -280,11 +288,13 @@ 

          elif len(userhost) > 2:

              raise koji.GenericError('Invalid username@hostname specified: %s' % netloc)

          if not netloc:

-             raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the netloc element.' % self.url)

+             raise koji.GenericError(

+                 'Unable to parse SCM URL: %s . Could not find the netloc element.' % self.url)

  

          # check for empty path before we apply normpath

          if not path:

-             raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the path element.' % self.url)

+             raise koji.GenericError(

+                 'Unable to parse SCM URL: %s . Could not find the path element.' % self.url)

  

          path = os.path.normpath(path)

  
@@ -299,14 +309,19 @@ 

              # any such url should have already been caught by is_scm_url

              raise koji.GenericError('Invalid SCM URL. Path should begin with /: %s) ')

  

-         # check for validity: params should be empty, query may be empty, everything else should be populated

+         # check for validity: params should be empty, query may be empty, everything else should be

+         # populated

          if params:

-             raise koji.GenericError('Unable to parse SCM URL: %s . Params element %s should be empty.' % (self.url, params))

-         if not scheme:  #pragma: no cover

+             raise koji.GenericError(

+                 'Unable to parse SCM URL: %s . Params element %s should be empty.' %

+                 (self.url, params))

+         if not scheme:  # pragma: no cover

              # should not happen because of is_scm_url check earlier

-             raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the scheme element.' % self.url)

+             raise koji.GenericError(

+                 'Unable to parse SCM URL: %s . Could not find the scheme element.' % self.url)

          if not fragment:

-             raise koji.GenericError('Unable to parse SCM URL: %s . Could not find the fragment element.' % self.url)

+             raise koji.GenericError(

+                 'Unable to parse SCM URL: %s . Could not find the fragment element.' % self.url)

  

          # return parsed values

          return (scheme, user, netloc, path, query, fragment)
@@ -349,7 +364,8 @@ 

          for allowed_scm in allowed.split():

              scm_tuple = allowed_scm.split(':')

              if len(scm_tuple) < 2:

-                 self.logger.warn('Ignoring incorrectly formatted SCM host:repository: %s' % allowed_scm)

+                 self.logger.warn('Ignoring incorrectly formatted SCM host:repository: %s' %

+                                  allowed_scm)

                  continue

              host_pat = scm_tuple[0]

              repo_pat = scm_tuple[1]
@@ -371,11 +387,13 @@ 

                      if scm_tuple[3]:

                          self.source_cmd = scm_tuple[3].split(',')

                      else:

-                         # there was nothing after the trailing :, so they don't want to run a source_cmd at all

+                         # there was nothing after the trailing :,

+                         # so they don't want to run a source_cmd at all

                          self.source_cmd = None

                  break

          if not is_allowed:

-             raise koji.BuildError('%s:%s is not in the list of allowed SCMs' % (self.host, self.repository))

+             raise koji.BuildError(

+                 '%s:%s is not in the list of allowed SCMs' % (self.host, self.repository))

  

      def checkout(self, scmdir, session=None, uploadpath=None, logfile=None):

          """
@@ -395,29 +413,34 @@ 

          update_checkout_cmd = None

          update_checkout_dir = None

          env = None

+ 

          def _run(cmd, chdir=None, fatal=False, log=True, _count=[0]):

              if globals().get('KOJIKAMID'):

-                 #we've been inserted into kojikamid, use its run()

-                 return run(cmd, chdir=chdir, fatal=fatal, log=log)

+                 # we've been inserted into kojikamid, use its run()

+                 return run(cmd, chdir=chdir, fatal=fatal, log=log)  # noqa: F821

              else:

                  append = (_count[0] > 0)

                  _count[0] += 1

                  if log_output(session, cmd[0], cmd, logfile, uploadpath,

                                cwd=chdir, logerror=1, append=append, env=env):

-                     raise koji.BuildError('Error running %s command "%s", see %s for details' % \

-                         (self.scmtype, ' '.join(cmd), os.path.basename(logfile)))

+                     raise koji.BuildError('Error running %s command "%s", see %s for details' %

+                                           (self.scmtype, ' '.join(cmd), os.path.basename(logfile)))

  

          if self.scmtype == 'CVS':

-             pserver = ':pserver:%s@%s:%s' % ((self.user or 'anonymous'), self.host, self.repository)

-             module_checkout_cmd = ['cvs', '-d', pserver, 'checkout', '-r', self.revision, self.module]

+             pserver = ':pserver:%s@%s:%s' % ((self.user or 'anonymous'), self.host,

+                                              self.repository)

+             module_checkout_cmd = ['cvs', '-d', pserver, 'checkout', '-r', self.revision,

+                                    self.module]

              common_checkout_cmd = ['cvs', '-d', pserver, 'checkout', 'common']

  

          elif self.scmtype == 'CVS+SSH':

              if not self.user:

-                 raise koji.BuildError('No user specified for repository access scheme: %s' % self.scheme)

+                 raise koji.BuildError(

+                     'No user specified for repository access scheme: %s' % self.scheme)

  

              cvsserver = ':ext:%s@%s:%s' % (self.user, self.host, self.repository)

-             module_checkout_cmd = ['cvs', '-d', cvsserver, 'checkout', '-r', self.revision, self.module]

+             module_checkout_cmd = ['cvs', '-d', cvsserver, 'checkout', '-r', self.revision,

+                                    self.module]

              common_checkout_cmd = ['cvs', '-d', cvsserver, 'checkout', 'common']

              env = {'CVS_RSH': 'ssh'}

  
@@ -445,14 +468,16 @@ 

              update_checkout_cmd = ['git', 'reset', '--hard', self.revision]

              update_checkout_dir = sourcedir

  

-             # self.module may be empty, in which case the specfile should be in the top-level directory

+             # self.module may be empty, in which case the specfile should be in the top-level

+             # directory

              if self.module:

                  # Treat the module as a directory inside the git repository

                  sourcedir = '%s/%s' % (sourcedir, self.module)

  

          elif self.scmtype == 'GIT+SSH':

              if not self.user:

-                 raise koji.BuildError('No user specified for repository access scheme: %s' % self.scheme)

+                 raise koji.BuildError(

+                     'No user specified for repository access scheme: %s' % self.scheme)

              gitrepo = 'git+ssh://%s@%s%s' % (self.user, self.host, self.repository)

              commonrepo = os.path.dirname(gitrepo) + '/common'

              checkout_path = os.path.basename(self.repository)
@@ -473,7 +498,8 @@ 

              update_checkout_cmd = ['git', 'reset', '--hard', self.revision]

              update_checkout_dir = sourcedir

  

-             # self.module may be empty, in which case the specfile should be in the top-level directory

+             # self.module may be empty, in which case the specfile should be in the top-level

+             # directory

              if self.module:

                  # Treat the module as a directory inside the git repository

                  sourcedir = '%s/%s' % (sourcedir, self.module)
@@ -484,15 +510,18 @@ 

                  scheme = scheme.split('+')[1]

  

              svnserver = '%s%s%s' % (scheme, self.host, self.repository)

-             module_checkout_cmd = ['svn', 'checkout', '-r', self.revision, '%s/%s' % (svnserver, self.module), self.module]

+             module_checkout_cmd = ['svn', 'checkout', '-r', self.revision,

+                                    '%s/%s' % (svnserver, self.module), self.module]

              common_checkout_cmd = ['svn', 'checkout', '%s/common' % svnserver]

  

          elif self.scmtype == 'SVN+SSH':

              if not self.user:

-                 raise koji.BuildError('No user specified for repository access scheme: %s' % self.scheme)

+                 raise koji.BuildError(

+                     'No user specified for repository access scheme: %s' % self.scheme)

  

              svnserver = 'svn+ssh://%s@%s%s' % (self.user, self.host, self.repository)

-             module_checkout_cmd = ['svn', 'checkout', '-r', self.revision, '%s/%s' % (svnserver, self.module), self.module]

+             module_checkout_cmd = ['svn', 'checkout', '-r', self.revision,

+                                    '%s/%s' % (svnserver, self.module), self.module]

              common_checkout_cmd = ['svn', 'checkout', '%s/common' % svnserver]

  

          else:
@@ -505,8 +534,10 @@ 

              # Currently only required for GIT checkouts

              # Run the command in the directory the source was checked out into

              if self.scmtype.startswith('GIT') and globals().get('KOJIKAMID'):

-                 _run(['git', 'config', 'core.autocrlf', 'true'], chdir=update_checkout_dir, fatal=True)

-                 _run(['git', 'config', 'core.safecrlf', 'true'], chdir=update_checkout_dir, fatal=True)

+                 _run(['git', 'config', 'core.autocrlf', 'true'],

+                      chdir=update_checkout_dir, fatal=True)

+                 _run(['git', 'config', 'core.safecrlf', 'true'],

+                      chdir=update_checkout_dir, fatal=True)

              _run(update_checkout_cmd, chdir=update_checkout_dir, fatal=True)

  

          if self.use_common and not globals().get('KOJIKAMID'):
@@ -546,7 +577,7 @@ 

              # just use the same url

              r['source'] = self.url

          return r

- ## END kojikamid dup

+ # END kojikamid dup #

  

  

  class TaskManager(object):
@@ -575,7 +606,8 @@ 

  

      def registerHandler(self, entry):

          """register and index task handler"""

-         if isinstance(entry, type(koji.tasks.BaseTaskHandler)) and issubclass(entry, koji.tasks.BaseTaskHandler):

+         if isinstance(entry, type(koji.tasks.BaseTaskHandler)) and \

+                 issubclass(entry, koji.tasks.BaseTaskHandler):

              for method in entry.Methods:

                  self.handlers[method] = entry

  
@@ -613,7 +645,7 @@ 

  

          If nolocal is True, do not try to scan local buildroots.

          """

-         #query buildroots in db that are not expired

+         # query buildroots in db that are not expired

          states = [koji.BR_STATES[x] for x in ('INIT', 'WAITING', 'BUILDING')]

          db_br = self.session.listBuildroots(hostID=self.host_id, state=tuple(states))

          # index by id
@@ -627,10 +659,12 @@ 

                  self.logger.warn("Expiring taskless buildroot: %(id)i/%(tag_name)s/%(arch)s" % br)

                  self.session.host.setBuildRootState(id, st_expired)

              elif task_id not in self.tasks:

-                 #task not running - expire the buildroot

-                 #TODO - consider recycling hooks here (with strong sanity checks)

+                 # task not running - expire the buildroot

+                 # TODO - consider recycling hooks here (with strong sanity checks)

                  self.logger.info("Expiring buildroot: %(id)i/%(tag_name)s/%(arch)s" % br)

-                 self.logger.debug("Buildroot task: %r, Current tasks: %r" % (task_id, to_list(self.tasks.keys())))

+                 self.logger.debug(

+                     "Buildroot task: %r, Current tasks: %r" %

+                     (task_id, to_list(self.tasks.keys())))

                  self.session.host.setBuildRootState(id, st_expired)

                  continue

          if nolocal:
@@ -640,13 +674,13 @@ 

          local_only = [id for id in local_br if id not in db_br]

          if local_only:

              missed_br = self.session.listBuildroots(buildrootID=tuple(local_only))

-             #get all the task info in one call

+             # get all the task info in one call

              tasks = []

              for br in missed_br:

                  task_id = br['task_id']

                  if task_id:

                      tasks.append(task_id)

-             #index

+             # index

              missed_br = dict([(row['id'], row) for row in missed_br])

              tasks = dict([(row['id'], row) for row in self.session.getTaskInfo(tasks)])

              for id in local_only:
@@ -670,8 +704,9 @@ 

                      if not task:

                          self.logger.warn("%s: invalid task %s" % (desc, br['task_id']))

                          continue

-                     if (task['state'] == koji.TASK_STATES['FAILED'] and age < self.options.failed_buildroot_lifetime):

-                         #XXX - this could be smarter

+                     if task['state'] == koji.TASK_STATES['FAILED'] and \

+                             age < self.options.failed_buildroot_lifetime:

+                         # XXX - this could be smarter

                          # keep buildroots for failed tasks around for a little while

                          self.logger.debug("Keeping failed buildroot: %s" % desc)

                          continue
@@ -689,17 +724,17 @@ 

                              continue

                      else:

                          age = min(age, time.time() - st.st_mtime)

-                 #note: https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=192153)

-                 #If rpmlib is installing in this chroot, removing it entirely

-                 #can lead to a world of hurt.

-                 #We remove the rootdir contents but leave the rootdir unless it

-                 #is really old

-                 if age > 3600*24:

-                     #dir untouched for a day

+                 # note: https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=192153)

+                 # If rpmlib is installing in this chroot, removing it entirely

+                 # can lead to a world of hurt.

+                 # We remove the rootdir contents but leave the rootdir unless it

+                 # is really old

+                 if age > 3600 * 24:

+                     # dir untouched for a day

                      self.logger.info("Removing buildroot: %s" % desc)

                      if topdir and safe_rmtree(topdir, unmount=True, strict=False) != 0:

                          continue

-                     #also remove the config

+                     # also remove the config

                      try:

                          os.unlink(data['cfg'])

                      except OSError as e:
@@ -726,7 +761,7 @@ 

          self.logger.debug("Expired/stray buildroots: %d" % len(local_only))

  

      def _scanLocalBuildroots(self):

-         #XXX

+         # XXX

          configdir = '/etc/mock/koji'

          buildroots = {}

          for f in os.listdir(configdir):
@@ -785,13 +820,13 @@ 

              # by this host.

              id = task['id']

              if id not in self.pids:

-                 #We don't have a process for this

-                 #Expected to happen after a restart, otherwise this is an error

+                 # We don't have a process for this

+                 # Expected to happen after a restart, otherwise this is an error

                  stale.append(id)

                  continue

              tasks[id] = task

              if task.get('alert', False):

-                 #wake up the process

+                 # wake up the process

                  self.logger.info("Waking up task: %r" % task)

                  os.kill(self.pids[id], signal.SIGUSR2)

              if not task['waiting']:
@@ -801,8 +836,8 @@ 

          self.tasks = tasks

          self.logger.debug("Current tasks: %r" % self.tasks)

          if len(stale) > 0:

-             #A stale task is one which is opened to us, but we know nothing

-             #about). This will happen after a daemon restart, for example.

+             # A stale task is one which is opened to us, but we know nothing

+             # about). This will happen after a daemon restart, for example.

              self.logger.info("freeing stale tasks: %r" % stale)

              self.session.host.freeTasks(stale)

          for id, pid in list(self.pids.items()):
@@ -844,15 +879,15 @@ 

          self.logger.debug("Load Data:")

          self.logger.debug("  hosts: %r" % hosts)

          self.logger.debug("  tasks: %r" % tasks)

-         #now we organize this data into channel-arch bins

-         bin_hosts = {}  #hosts indexed by bin

-         bins = {}       #bins for this host

+         # now we organize this data into channel-arch bins

+         bin_hosts = {}  # hosts indexed by bin

+         bins = {}  # bins for this host

          our_avail = None

          for host in hosts:

              host['bins'] = []

              if host['id'] == self.host_id:

-                 #note: task_load reported by server might differ from what we

-                 #sent due to precision variation

+                 # note: task_load reported by server might differ from what we

+                 # sent due to precision variation

                  our_avail = host['capacity'] - host['task_load']

              for chan in host['channels']:

                  for arch in host['arches'].split() + ['noarch']:
@@ -867,7 +902,7 @@ 

          elif not bins:

              self.logger.info("No bins for this host. Missing channel/arch config?")

              # Note: we may still take an assigned task below

-         #sort available capacities for each of our bins

+         # sort available capacities for each of our bins

          avail = {}

          for bin in bins:

              avail[bin] = [host['capacity'] - host['task_load'] for host in bin_hosts[bin]]
@@ -889,7 +924,7 @@ 

              if task['state'] == koji.TASK_STATES['ASSIGNED']:

                  self.logger.debug("task is assigned")

                  if self.host_id == task['host_id']:

-                     #assigned to us, we can take it regardless

+                     # assigned to us, we can take it regardless

                      if self.takeTask(task):

                          return True

              elif task['state'] == koji.TASK_STATES['FREE']:
@@ -897,18 +932,18 @@ 

                  self.logger.debug("task is free, bin=%r" % bin)

                  if bin not in bins:

                      continue

-                 #see where our available capacity is compared to other hosts for this bin

-                 #(note: the hosts in this bin are exactly those that could

-                 #accept this task)

+                 # see where our available capacity is compared to other hosts for this bin

+                 # (note: the hosts in this bin are exactly those that could

+                 # accept this task)

                  bin_avail = avail.get(bin, [0])

                  if self.checkAvailDelay(task, bin_avail, our_avail):

                      # decline for now and give the upper half a chance

                      continue

-                 #otherwise, we attempt to open the task

+                 # otherwise, we attempt to open the task

                  if self.takeTask(task):

                      return True

              else:

-                 #should not happen

+                 # should not happen

                  raise Exception("Invalid task state reported by server")

          return False

  
@@ -940,7 +975,7 @@ 

          # return True if we should delay

          if now - ts < delay:

              self.logger.debug("skipping task %i, age=%s rank=%s"

-                                 % (task['id'], int(now - ts), rank))

+                               % (task['id'], int(now - ts), rank))

              return True

          # otherwise

          del self.skipped_tasks[task['id']]
@@ -968,11 +1003,11 @@ 

          try:

              (childpid, status) = os.waitpid(pid, os.WNOHANG)

          except OSError as e:

-             #check errno

+             # check errno

              if e.errno != errno.ECHILD:

-                 #should not happen

+                 # should not happen

                  raise

-             #otherwise assume the process is gone

+             # otherwise assume the process is gone

              self.logger.info("%s: %s" % (prefix, e))

              return True

          if childpid != 0:
@@ -996,7 +1031,9 @@ 

                  self.logger.info('%s (pid %i, taskID %i) is running' % (execname, pid, task_id))

              else:

                  if signaled:

-                     self.logger.info('%s (pid %i, taskID %i) was killed by signal %i' % (execname, pid, task_id, sig))

+                     self.logger.info(

+                         '%s (pid %i, taskID %i) was killed by signal %i' %

+                         (execname, pid, task_id, sig))

                  else:

                      self.logger.info('%s (pid %i, taskID %i) exited' % (execname, pid, task_id))

                  return True
@@ -1033,7 +1070,8 @@ 

              if not os.path.isfile(proc_path):

                  return None

              proc_file = open(proc_path)

-             procstats = [not field.isdigit() and field or int(field) for field in proc_file.read().split()]

+             procstats = [not field.isdigit() and field or int(field)

+                          for field in proc_file.read().split()]

              proc_file.close()

  

              cmd_path = '/proc/%i/cmdline' % pid
@@ -1076,9 +1114,9 @@ 

          while parents:

              for ppid in parents[:]:

                  for procstats in statsByPPID.get(ppid, []):

-                     # get the /proc entries with ppid as their parent, and append their pid to the list,

-                     # then recheck for their children

-                     # pid is the 0th field, ppid is the 3rd field

+                     # get the /proc entries with ppid as their parent, and append their pid to the

+                     # list, then recheck for their children pid is the 0th field, ppid is the 3rd

+                     # field

                      pids.append((procstats[0], procstats[1]))

                      parents.append(procstats[0])

                  parents.remove(ppid)
@@ -1118,15 +1156,15 @@ 

          if children:

              self._killChildren(task_id, children, sig=signal.SIGKILL, timeout=3.0)

  

-         #expire the task's subsession

+         # expire the task's subsession

          session_id = self.subsessions.get(task_id)

          if session_id:

              self.logger.info("Expiring subsession %i (task %i)" % (session_id, task_id))

              try:

                  self.session.logoutChild(session_id)

                  del self.subsessions[task_id]

-             except:

-                 #not much we can do about it

+             except Exception:

+                 # not much we can do about it

                  pass

          if wait:

              return self._waitTask(task_id, pid)
@@ -1146,7 +1184,8 @@ 

          availableMB = available // 1024 // 1024

          self.logger.debug("disk space available in '%s': %i MB", br_path, availableMB)

          if availableMB < self.options.minspace:

-             self.status = "Insufficient disk space at %s: %i MB, %i MB required" % (br_path, availableMB, self.options.minspace)

+             self.status = "Insufficient disk space at %s: %i MB, %i MB required" % \

+                           (br_path, availableMB, self.options.minspace)

              self.logger.warn(self.status)

              return False

          return True
@@ -1181,7 +1220,9 @@ 

              return False

          if self.task_load > self.hostdata['capacity']:

              self.status = "Over capacity"

-             self.logger.info("Task load (%.2f) exceeds capacity (%.2f)" % (self.task_load, self.hostdata['capacity']))

+             self.logger.info(

+                 "Task load (%.2f) exceeds capacity (%.2f)" %

+                 (self.task_load, self.hostdata['capacity']))

              return False

          if len(self.tasks) >= self.options.maxjobs:

              # This serves as a backup to the capacity check and prevents
@@ -1200,7 +1241,7 @@ 

              self.status = "Load average %.2f > %.2f" % (loadavgs[0], maxload)

              self.logger.info(self.status)

              return False

-         #XXX - add more checks

+         # XXX - add more checks

          return True

  

      def takeTask(self, task):
@@ -1225,12 +1266,13 @@ 

                  valid_host = handler.checkHost(self.hostdata)

              except (SystemExit, KeyboardInterrupt):

                  raise

-             except:

+             except Exception:

                  valid_host = False

                  self.logger.warn('Error during host check')

                  self.logger.warn(''.join(traceback.format_exception(*sys.exc_info())))

              if not valid_host:

-                 self.logger.info('Skipping task %s (%s) due to host check', task['id'], task['method'])

+                 self.logger.info(

+                     'Skipping task %s (%s) due to host check', task['id'], task['method'])

                  return False

          data = self.session.host.openTask(task['id'])

          if data is None:
@@ -1250,7 +1292,7 @@ 

              if state != 'OPEN':

                  self.logger.warn("Task %i changed is %s", task_id, state)

                  return False

-             #otherwise...

+             # otherwise...

              raise

          if handler.Foreground:

              self.logger.info("running task in foreground")
@@ -1263,27 +1305,27 @@ 

          return True

  

      def forkTask(self, handler):

-         #get the subsession before we fork

+         # get the subsession before we fork

          newhub = self.session.subsession()

          session_id = newhub.sinfo['session-id']

          pid = os.fork()

          if pid:

              newhub._forget()

              return pid, session_id

-         #in no circumstance should we return after the fork

-         #nor should any exceptions propagate past here

+         # in no circumstance should we return after the fork

+         # nor should any exceptions propagate past here

          try:

              self.session._forget()

-             #set process group

+             # set process group

              os.setpgrp()

-             #use the subsession

+             # use the subsession

              self.session = newhub

              handler.session = self.session

-             #set a do-nothing handler for sigusr2

+             # set a do-nothing handler for sigusr2

              signal.signal(signal.SIGUSR2, lambda *args: None)

              self.runTask(handler)

          finally:

-             #diediedie

+             # diediedie

              try:

                  self.session.logout()

              finally:
@@ -1302,20 +1344,20 @@ 

              tb = ''.join(traceback.format_exception(*sys.exc_info())).replace(r"\n", "\n")

              self.logger.warn("FAULT:\n%s" % tb)

          except (SystemExit, koji.tasks.ServerExit, KeyboardInterrupt):

-             #we do not trap these

+             # we do not trap these

              raise

          except koji.tasks.ServerRestart:

-             #freeing this task will allow the pending restart to take effect

+             # freeing this task will allow the pending restart to take effect

              self.session.host.freeTasks([handler.id])

              return

-         except:

+         except Exception:

              tb = ''.join(traceback.format_exception(*sys.exc_info()))

              self.logger.warn("TRACEBACK: %s" % tb)

              # report exception back to server

              e_class, e = sys.exc_info()[:2]

              faultCode = getattr(e_class, 'faultCode', 1)

              if issubclass(e_class, koji.GenericError):

-                 #just pass it through

+                 # just pass it through

                  tb = str(e)

              response = koji.xmlrpcplus.dumps(koji.xmlrpcplus.Fault(faultCode, tb))

  

file modified
+11 -5
@@ -55,6 +55,7 @@ 

  # but play it safe anyway.

  _DBconn = context.ThreadLocal()

  

+ 

  class DBWrapper:

      def __init__(self, cnx):

          self.cnx = cnx
@@ -75,8 +76,8 @@ 

          if not self.cnx:

              raise Exception('connection is closed')

          self.cnx.cursor().execute('ROLLBACK')

-         #We do this rather than cnx.rollback to avoid opening a new transaction

-         #If our connection gets recycled cnx.rollback will be called then.

+         # We do this rather than cnx.rollback to avoid opening a new transaction

+         # If our connection gets recycled cnx.rollback will be called then.

          self.cnx = None

  

  
@@ -104,11 +105,13 @@ 

          if hasattr(self.cursor, "mogrify"):

              quote = self.cursor.mogrify

          else:

-             quote = lambda a, b: a % b

+             def quote(a, b):

+                 return a % b

          try:

              return quote(operation, parameters)

          except Exception:

-             self.logger.exception('Unable to quote query:\n%s\nParameters: %s', operation, parameters)

+             self.logger.exception(

+                 'Unable to quote query:\n%s\nParameters: %s', operation, parameters)

              return "INVALID QUERY"

  

      def preformat(self, sql, params):
@@ -151,13 +154,16 @@ 

      if _DBopts is None:

          _DBopts = dict([i for i in opts.items() if i[1] is not None])

  

+ 

  def setDBopts(**opts):

      global _DBopts

      _DBopts = opts

  

+ 

  def getDBopts():

      return _DBopts

  

+ 

  def connect():

      logger = logging.getLogger('koji.db')

      global _DBconn
@@ -177,7 +183,7 @@ 

              return DBWrapper(conn)

          except psycopg2.Error:

              del _DBconn.conn

-     #create a fresh connection

+     # create a fresh connection

      opts = _DBopts

      if opts is None:

          opts = {}

file modified
+36 -30
@@ -34,35 +34,36 @@ 

  # the available callback hooks and a list

  # of functions to be called for each event

  callbacks = {

-     'prePackageListChange':   [],

-     'postPackageListChange':  [],

-     'preTaskStateChange':     [],

-     'postTaskStateChange':    [],

-     'preBuildStateChange':    [],

-     'postBuildStateChange':   [],

-     'preImport':              [],

-     'postImport':             [],

-     'preRPMSign':             [],

-     'postRPMSign':            [],

-     'preTag':                 [],

-     'postTag':                [],

-     'preUntag':               [],

-     'postUntag':              [],

-     'preRepoInit':            [],

-     'postRepoInit':           [],

-     'preRepoDone':            [],

-     'postRepoDone':           [],

-     'preCommit':              [],

-     'postCommit':             [],

-     'preSCMCheckout':         [],

-     'postSCMCheckout':        [],

-     }

+     'prePackageListChange': [],

+     'postPackageListChange': [],

+     'preTaskStateChange': [],

+     'postTaskStateChange': [],

+     'preBuildStateChange': [],

+     'postBuildStateChange': [],

+     'preImport': [],

+     'postImport': [],

+     'preRPMSign': [],

+     'postRPMSign': [],

+     'preTag': [],

+     'postTag': [],

+     'preUntag': [],

+     'postUntag': [],

+     'preRepoInit': [],

+     'postRepoInit': [],

+     'preRepoDone': [],

+     'postRepoDone': [],

+     'preCommit': [],

+     'postCommit': [],

+     'preSCMCheckout': [],

+     'postSCMCheckout': [],

+ }

+ 

  

  class PluginTracker(object):

  

      def __init__(self, path=None, prefix='_koji_plugin__'):

          self.searchpath = path

-         #prefix should not have a '.' in it, this can cause problems.

+         # prefix should not have a '.' in it, this can cause problems.

          self.prefix = prefix

          self.plugins = {}

  
@@ -71,9 +72,9 @@ 

              return self.plugins[name]

          mod_name = name

          if self.prefix:

-             #mod_name determines how the module is named in sys.modules

-             #Using a prefix helps prevent overlap with other modules

-             #(no '.' -- it causes problems)

+             # mod_name determines how the module is named in sys.modules

+             # Using a prefix helps prevent overlap with other modules

+             # (no '.' -- it causes problems)

              mod_name = self.prefix + name

          if mod_name in sys.modules and not reload:

              raise koji.PluginError('module name conflict: %s' % mod_name)
@@ -113,6 +114,7 @@ 

      setattr(f, 'exported', True)

      return f

  

+ 

  def export_cli(f):

      """a decorator that marks a function as exported for CLI

  
@@ -122,6 +124,7 @@ 

      setattr(f, 'exported_cli', True)

      return f

  

+ 

  def export_as(alias):

      """returns a decorator that marks a function as exported and gives it an alias

  
@@ -133,6 +136,7 @@ 

          return f

      return dec

  

+ 

  def export_in(module, alias=None):

      """returns a decorator that marks a function as exported with a module prepended

  
@@ -150,6 +154,7 @@ 

          return f

      return dec

  

+ 

  def callback(*cbtypes):

      """A decorator that indicates a function is a callback.

      cbtypes is a list of callback types to register for.  Valid
@@ -162,6 +167,7 @@ 

          return f

      return dec

  

+ 

  def ignore_error(f):

      """a decorator that marks a callback as ok to fail

  
@@ -178,7 +184,7 @@ 

  

  

  def register_callback(cbtype, func):

-     if not cbtype in callbacks:

+     if cbtype not in callbacks:

          raise koji.PluginError('"%s" is not a valid callback type' % cbtype)

      if not callable(func):

          raise koji.PluginError('%s is not callable' % getattr(func, '__name__', 'function'))
@@ -186,14 +192,14 @@ 

  

  

  def run_callbacks(cbtype, *args, **kws):

-     if not cbtype in callbacks:

+     if cbtype not in callbacks:

          raise koji.PluginError('"%s" is not a valid callback type' % cbtype)

      cache = {}

      for func in callbacks[cbtype]:

          cb_args, cb_kwargs = _fix_cb_args(func, args, kws, cache)

          try:

              func(cbtype, *cb_args, **cb_kwargs)

-         except:

+         except Exception:

              msg = 'Error running %s callback from %s' % (cbtype, func.__module__)

              if getattr(func, 'failure_is_an_option', False):

                  logging.getLogger('koji.plugin').warn(msg, exc_info=True)

file modified
+17 -13
@@ -31,7 +31,7 @@ 

  class BaseSimpleTest(object):

      """Abstract base class for simple tests"""

  

-     #Provide the name of the test

+     # Provide the name of the test

      name = None

  

      def __init__(self, str):
@@ -50,24 +50,26 @@ 

  

  class TrueTest(BaseSimpleTest):

      name = 'true'

+ 

      def run(self, data):

          return True

  

  

  class FalseTest(BaseSimpleTest):

      name = 'false'

+ 

      def run(self, data):

          return False

  

  

  class AllTest(TrueTest):

      name = 'all'

-     #alias for true

+     # alias for true

  

  

  class NoneTest(FalseTest):

      name = 'none'

-     #alias for false

+     # alias for false

  

  

  class HasTest(BaseSimpleTest):
@@ -97,6 +99,7 @@ 

      """

      name = 'bool'

      field = None

+ 

      def run(self, data):

          args = self.str.split()[1:]

          if self.field is None:
@@ -121,6 +124,7 @@ 

      """

      name = 'match'

      field = None

+ 

      def run(self, data):

          args = self.str.split()[1:]

          if self.field is None:
@@ -171,7 +175,7 @@ 

          '>=': lambda a, b: a >= b,

          '=': lambda a, b: a == b,

          '!=': lambda a, b: a != b,

-         }

+     }

  

      def __init__(self, str):

          """Read the test parameters from string"""
@@ -233,11 +237,11 @@ 

          for line in lines:

              rule = self.parse_line(line)

              if rule is None:

-                 #blank/etc

+                 # blank/etc

                  continue

              tests, negate, action = rule

              if action == '{':

-                 #nested rules

+                 # nested rules

                  child = []

                  cursor.append([tests, negate, child])

                  stack.append(cursor)
@@ -275,11 +279,11 @@ 

          """

          line = line.split('#', 1)[0].strip()

          if not line:

-             #blank or all comment

+             # blank or all comment

              return None

          if line == '}':

              return None, False, '}'

-             #?? allow }} ??

+             # ?? allow }} ??

          negate = False

          pos = line.rfind('::')

          if pos == -1:
@@ -288,7 +292,7 @@ 

                  raise Exception("bad policy line: %s" % line)

              negate = True

          tests = line[:pos]

-         action = line[pos+2:]

+         action = line[pos + 2:]

          tests = [self.get_test_handler(x) for x in tests.split('&&')]

          action = action.strip()

          # just return action = { for nested rules
@@ -328,7 +332,7 @@ 

                  if not check:

                      break

              else:

-                 #all tests in current rule passed

+                 # all tests in current rule passed

                  value = True

              if negate:

                  value = not value
@@ -393,11 +397,11 @@ 

              if isinstance(value, type(BaseSimpleTest)) and issubclass(value, BaseSimpleTest):

                  name = getattr(value, 'name', None)

                  if not name:

-                     #use the class name

+                     # use the class name

                      name = key

-                     #but trim 'Test' from the end

+                     # but trim 'Test' from the end

                      if name.endswith('Test') and len(name) > 4:

                          name = name[:-4]

                  ret.setdefault(name, value)

-                 #...so first test wins in case of name overlap

+                 # ...so first test wins in case of name overlap

      return ret

file modified
+44 -42
@@ -36,57 +36,58 @@ 

              return o.decode('utf-8')

          return json.JSONEncoder.default(self, o)

  

+ 

  class Rpmdiff:

  

      # constants

  

-     TAGS = ( rpm.RPMTAG_NAME, rpm.RPMTAG_SUMMARY,

-              rpm.RPMTAG_DESCRIPTION, rpm.RPMTAG_GROUP,

-              rpm.RPMTAG_LICENSE, rpm.RPMTAG_URL,

-              rpm.RPMTAG_PREIN, rpm.RPMTAG_POSTIN,

-              rpm.RPMTAG_PREUN, rpm.RPMTAG_POSTUN)

+     TAGS = (rpm.RPMTAG_NAME, rpm.RPMTAG_SUMMARY,

+             rpm.RPMTAG_DESCRIPTION, rpm.RPMTAG_GROUP,

+             rpm.RPMTAG_LICENSE, rpm.RPMTAG_URL,

+             rpm.RPMTAG_PREIN, rpm.RPMTAG_POSTIN,

+             rpm.RPMTAG_PREUN, rpm.RPMTAG_POSTUN)

  

-     PRCO = ( 'REQUIRES', 'PROVIDES', 'CONFLICTS', 'OBSOLETES')

+     PRCO = ('REQUIRES', 'PROVIDES', 'CONFLICTS', 'OBSOLETES')

  

-     #{fname : (size, mode, mtime, flags, dev, inode,

+     # {fname : (size, mode, mtime, flags, dev, inode,

      #          nlink, state, vflags, user, group, digest)}

-     __FILEIDX = [ ['S', 0],

-                   ['M', 1],

-                   ['5', 11],

-                   ['D', 4],

-                   ['N', 6],

-                   ['L', 7],

-                   ['V', 8],

-                   ['U', 9],

-                   ['G', 10],

-                   ['F', 3],

-                   ['T', 2] ]

+     __FILEIDX = [['S', 0],

+                  ['M', 1],

+                  ['5', 11],

+                  ['D', 4],

+                  ['N', 6],

+                  ['L', 7],

+                  ['V', 8],

+                  ['U', 9],

+                  ['G', 10],

+                  ['F', 3],

+                  ['T', 2]]

  

      try:

          if rpm.RPMSENSE_SCRIPT_PRE:

-             PREREQ_FLAG=rpm.RPMSENSE_PREREQ|rpm.RPMSENSE_SCRIPT_PRE|\

-                 rpm.RPMSENSE_SCRIPT_POST|rpm.RPMSENSE_SCRIPT_PREUN|\

+             PREREQ_FLAG = rpm.RPMSENSE_PREREQ | rpm.RPMSENSE_SCRIPT_PRE |\

+                 rpm.RPMSENSE_SCRIPT_POST | rpm.RPMSENSE_SCRIPT_PREUN |\

                  rpm.RPMSENSE_SCRIPT_POSTUN

      except AttributeError:

          try:

-             PREREQ_FLAG=rpm.RPMSENSE_PREREQ

-         except:

-             #(proyvind): This seems ugly, but then again so does

+             PREREQ_FLAG = rpm.RPMSENSE_PREREQ

+         except Exception:

+             # (proyvind): This seems ugly, but then again so does

              #            this whole check as well.

-             PREREQ_FLAG=False

+             PREREQ_FLAG = False

  

      DEPFORMAT = '%-12s%s %s %s %s'

      FORMAT = '%-12s%s'

  

-     ADDED   = 'added'

+     ADDED = 'added'

      REMOVED = 'removed'

  

      # code starts here

  

      def __init__(self, old, new, ignore=None):

          self.result = []

-         self.old_data = { 'tags': {}, 'ignore': ignore }

-         self.new_data = { 'tags': {}, 'ignore': ignore }

+         self.old_data = {'tags': {}, 'ignore': ignore}

+         self.new_data = {'tags': {}, 'ignore': ignore}

          if ignore is None:

              ignore = set()

          else:
@@ -103,24 +104,23 @@ 

              self.new_data['tags'][tag] = new[tag]

              if old_tag != new_tag:

                  tagname = rpm.tagnames[tag]

-                 if old_tag == None:

+                 if old_tag is None:

                      self.__add(self.FORMAT, (self.ADDED, tagname))

-                 elif new_tag == None:

+                 elif new_tag is None:

                      self.__add(self.FORMAT, (self.REMOVED, tagname))

                  else:

                      self.__add(self.FORMAT, ('S.5........', tagname))

  

          # compare Provides, Requires, ...

-         for  tag in self.PRCO:

+         for tag in self.PRCO:

              self.__comparePRCOs(old, new, tag)

  

          # compare the files

  

          old_files_dict = self.__fileIteratorToDict(old.fiFromHeader())

          new_files_dict = self.__fileIteratorToDict(new.fiFromHeader())

-         files = list(set(itertools.chain(six.iterkeys(old_files_dict),

-                                          six.iterkeys(new_files_dict))))

-         files.sort()

+         files = sorted(set(itertools.chain(six.iterkeys(old_files_dict),

+                                            six.iterkeys(new_files_dict))))

          self.old_data['files'] = old_files_dict

          self.new_data['files'] = new_files_dict

  
@@ -183,16 +183,18 @@ 

  

      # compare Provides, Requires, Conflicts, Obsoletes

      def __comparePRCOs(self, old, new, name):

-         oldflags = old[name[:-1]+'FLAGS']

-         newflags = new[name[:-1]+'FLAGS']

+         oldflags = old[name[:-1] + 'FLAGS']

+         newflags = new[name[:-1] + 'FLAGS']

          # fix buggy rpm binding not returning list for single entries

-         if not isinstance(oldflags, list): oldflags = [ oldflags ]

-         if not isinstance(newflags, list): newflags = [ newflags ]

+         if not isinstance(oldflags, list):

+             oldflags = [oldflags]

+         if not isinstance(newflags, list):

+             newflags = [newflags]

  

-         o = list(zip(old[name], oldflags, old[name[:-1]+'VERSION']))

-         n = list(zip(new[name], newflags, new[name[:-1]+'VERSION']))

+         o = list(zip(old[name], oldflags, old[name[:-1] + 'VERSION']))

+         n = list(zip(new[name], newflags, new[name[:-1] + 'VERSION']))

  

-         if name == 'PROVIDES': # filter our self provide

+         if name == 'PROVIDES':  # filter our self provide

              oldNV = (old['name'], rpm.RPMSENSE_EQUAL,

                       "%s-%s" % (old['version'], old['release']))

              newNV = (new['name'], rpm.RPMSENSE_EQUAL,
@@ -204,7 +206,7 @@ 

          self.new_data[name] = sorted(n)

  

          for oldentry in o:

-             if not oldentry in n:

+             if oldentry not in n:

                  if name == 'REQUIRES' and oldentry[1] & self.PREREQ_FLAG:

                      tagname = 'PREREQ'

                  else:
@@ -213,7 +215,7 @@ 

                             (self.REMOVED, tagname, oldentry[0],

                              self.sense2str(oldentry[1]), oldentry[2]))

          for newentry in n:

-             if not newentry in o:

+             if newentry not in o:

                  if name == 'REQUIRES' and newentry[1] & self.PREREQ_FLAG:

                      tagname = 'PREREQ'

                  else:

file modified
+2
@@ -19,8 +19,10 @@ 

  # Authors:

  #       Mike McLean <mikem@redhat.com>

  

+ 

  class ServerError(Exception):

      """Base class for our server-side-only exceptions"""

  

+ 

  class ServerRedirect(ServerError):

      """Used to handle redirects"""

file modified
+106 -73
@@ -51,10 +51,11 @@ 

                  logger.warning('Found deleted mountpoint: %s' % path)

              mplist.append(path)

      fo.close()

-     #reverse sort so deeper dirs come first

+     # reverse sort so deeper dirs come first

      mplist.sort(reverse=True)

      return mplist

  

+ 

  def umount_all(topdir):

      "Unmount every mount under topdir"

      logger = logging.getLogger("koji.build")
@@ -64,11 +65,12 @@ 

          rv = os.spawnvp(os.P_WAIT, cmd[0], cmd)

          if rv != 0:

              raise koji.GenericError('umount failed (exit code %r) for %s' % (rv, path))

-     #check mounts again

+     # check mounts again

      remain = scan_mounts(topdir)

      if remain:

          raise koji.GenericError("Unmounting incomplete: %r" % remain)

  

+ 

  def safe_rmtree(path, unmount=False, strict=True):

      logger = logging.getLogger("koji.build")

      if unmount:
@@ -77,7 +79,7 @@ 

          logger.debug("Removing: %s" % path)

          try:

              os.remove(path)

-         except:

+         except Exception:

              if strict:

                  raise

              else:
@@ -103,6 +105,7 @@ 

      """Raised to shutdown the server"""

      pass

  

+ 

  class ServerRestart(Exception):

      """Raised to restart the server"""

      pass
@@ -115,8 +118,7 @@ 

      """

  

      # check for new style

-     if (len(params) == 1 and isinstance(params[0], dict)

-                 and '__method__' in params[0]):

+     if len(params) == 1 and isinstance(params[0], dict) and '__method__' in params[0]:

          ret = params[0].copy()

          del ret['__method__']

          return ret
@@ -144,121 +146,135 @@ 

  LEGACY_SIGNATURES = {

      # key is method name, value is list of possible signatures

      # signatures are like getargspec -- args, varargs, keywords, defaults

-     'chainbuild' : [

+     'chainbuild': [

          [['srcs', 'target', 'opts'], None, None, (None,)],

      ],

-     'waitrepo' : [

+     'waitrepo': [

          [['tag', 'newer_than', 'nvrs'], None, None, (None, None)],

      ],

-     'createLiveMedia' : [

-         [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],

+     'createLiveMedia': [

+         [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile',

+           'opts'],

+          None, None, (None,)],

      ],

-     'createAppliance' : [

-         [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],

+     'createAppliance': [

+         [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile',

+           'opts'],

+          None, None, (None,)],

      ],

-     'livecd' : [

+     'livecd': [

          [['name', 'version', 'arch', 'target', 'ksfile', 'opts'], None, None, (None,)],

      ],

-     'buildNotification' : [

+     'buildNotification': [

          [['recipients', 'build', 'target', 'weburl'], None, None, None],

      ],

-     'buildMaven' : [

+     'buildMaven': [

          [['url', 'build_tag', 'opts'], None, None, (None,)],

      ],

-     'build' : [

+     'build': [

          [['src', 'target', 'opts'], None, None, (None,)],

      ],

-     'buildSRPMFromSCM' : [

+     'buildSRPMFromSCM': [

          [['url', 'build_tag', 'opts'], None, None, (None,)],

      ],

-     'rebuildSRPM' : [

+     'rebuildSRPM': [

          [['srpm', 'build_tag', 'opts'], None, None, (None,)],

      ],

-     'createrepo' : [

+     'createrepo': [

          [['repo_id', 'arch', 'oldrepo'], None, None, None],

      ],

-     'livemedia' : [

+     'livemedia': [

          [['name', 'version', 'arches', 'target', 'ksfile', 'opts'], None, None, (None,)],

      ],

-     'indirectionimage' : [

+     'indirectionimage': [

          [['opts'], None, None, None],

      ],

-     'wrapperRPM' : [

+     'wrapperRPM': [

          [['spec_url', 'build_target', 'build', 'task', 'opts'], None, None, (None,)],

      ],

-     'createLiveCD' : [

-         [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile', 'opts'], None, None, (None,)],

+     'createLiveCD': [

+         [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'ksfile',

+           'opts'],

+          None, None, (None,)],

      ],

-     'appliance' : [

+     'appliance': [

          [['name', 'version', 'arch', 'target', 'ksfile', 'opts'], None, None, (None,)],

      ],

-     'image' : [

+     'image': [

          [['name', 'version', 'arches', 'target', 'inst_tree', 'opts'], None, None, (None,)],

      ],

-     'tagBuild' : [

-         [['tag_id', 'build_id', 'force', 'fromtag', 'ignore_success'], None, None, (False, None, False)],

+     'tagBuild': [

+         [['tag_id', 'build_id', 'force', 'fromtag', 'ignore_success'],

+          None, None, (False, None, False)],

      ],

-     'chainmaven' : [

+     'chainmaven': [

          [['builds', 'target', 'opts'], None, None, (None,)],

      ],

-     'newRepo' : [

-         [['tag', 'event', 'src', 'debuginfo', 'separate_src'], None, None, (None, False, False, False)],

+     'newRepo': [

+         [['tag', 'event', 'src', 'debuginfo', 'separate_src'],

+          None, None, (None, False, False, False)],

      ],

-     'createImage' : [

-         [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info', 'inst_tree', 'opts'], None, None, (None,)],

+     'createImage': [

+         [['name', 'version', 'release', 'arch', 'target_info', 'build_tag', 'repo_info',

+           'inst_tree', 'opts'],

+          None, None, (None,)],

      ],

-     'tagNotification' : [

-         [['recipients', 'is_successful', 'tag_info', 'from_info', 'build_info', 'user_info', 'ignore_success', 'failure_msg'], None, None, (None, '')],

+     'tagNotification': [

+         [['recipients', 'is_successful', 'tag_info', 'from_info', 'build_info', 'user_info',

+           'ignore_success', 'failure_msg'],

+          None, None, (None, '')],

      ],

-     'buildArch' : [

+     'buildArch': [

          [['pkg', 'root', 'arch', 'keep_srpm', 'opts'], None, None, (None,)],

      ],

-     'maven' : [

+     'maven': [

          [['url', 'target', 'opts'], None, None, (None,)],

      ],

-     'waittest' : [

+     'waittest': [

          [['count', 'seconds'], None, None, (10,)],

      ],

-     'default' : [

+     'default': [

          [[], 'args', 'opts', None],

      ],

-     'shutdown' : [

+     'shutdown': [

          [[], None, None, None],

      ],

-     'restartVerify' : [

+     'restartVerify': [

          [['task_id', 'host'], None, None, None],

      ],

-     'someMethod' : [

+     'someMethod': [

          [[], 'args', None, None],

      ],

-     'restart' : [

+     'restart': [

          [['host'], None, None, None],

      ],

-     'fork' : [

+     'fork': [

          [['n', 'm'], None, None, (5, 37)],

      ],

-     'sleep' : [

+     'sleep': [

          [['n'], None, None, None],

      ],

-     'dependantTask' : [

+     'dependantTask': [

          [['wait_list', 'task_list'], None, None, None],

      ],

-     'subtask' : [

+     'subtask': [

          [['n'], None, None, (4,)],

      ],

-     'restartHosts' : [

+     'restartHosts': [

          [['options'], None, None, (None,)],

      ],

-     'runroot' : [

-         [['root', 'arch', 'command', 'keep', 'packages', 'mounts', 'repo_id', 'skip_setarch', 'weight', 'upload_logs', 'new_chroot'], None, None, (False, [], [], None, False, None, None, False)],

+     'runroot': [

+         [['root', 'arch', 'command', 'keep', 'packages', 'mounts', 'repo_id', 'skip_setarch',

+           'weight', 'upload_logs', 'new_chroot'],

+          None, None, (False, [], [], None, False, None, None, False)],

      ],

-     'distRepo' : [

+     'distRepo': [

          [['tag', 'repo_id', 'keys', 'task_opts'], None, None, None],

      ],

-     'createdistrepo' : [

+     'createdistrepo': [

          [['tag', 'repo_id', 'arch', 'keys', 'opts'], None, None, None],

      ],

-     'saveFailedTree' : [

+     'saveFailedTree': [

          [['buildrootID', 'full'], None, None, (False,)],

      ],

  }
@@ -278,7 +294,7 @@ 

      Foreground = False

  

      def __init__(self, id, method, params, session, options, workdir=None):

-         self.id = id   #task id

+         self.id = id  # task id

          if method not in self.Methods:

              raise koji.GenericError('method "%s" is not supported' % method)

          self.method = method
@@ -340,10 +356,10 @@ 

          if self.workdir is None:

              return

          safe_rmtree(self.workdir, unmount=False, strict=True)

-         #os.spawnvp(os.P_WAIT, 'rm', ['rm', '-rf', self.workdir])

+         # os.spawnvp(os.P_WAIT, 'rm', ['rm', '-rf', self.workdir])

  

      def wait(self, subtasks=None, all=False, failany=False, canfail=None,

-                 timeout=None):

+              timeout=None):

          """Wait on subtasks

  

          subtasks is a list of integers (or an integer). If more than one subtask
@@ -385,7 +401,7 @@ 

          while True:

              finished, unfinished = self.session.host.taskWait(self.id)

              if len(unfinished) == 0:

-                 #all done

+                 # all done

                  break

              elif len(finished) > 0:

                  if all:
@@ -397,7 +413,9 @@ 

                                  self.session.getTaskResult(task)

                                  checked.add(task)

                              except (koji.GenericError, six.moves.xmlrpc_client.Fault):

-                                 self.logger.info("task %s failed or was canceled, cancelling unfinished tasks" % task)

+                                 self.logger.info(

+                                     "task %s failed or was canceled, cancelling unfinished tasks" %

+                                     task)

                                  self.session.cancelTaskChildren(self.id)

                                  # reraise the original error now, rather than waiting for

                                  # an error in taskWaitResults()
@@ -417,7 +435,7 @@ 

                      self.logger.info('Subtasks timed out')

                      self.session.cancelTaskChildren(self.id)

                      raise koji.GenericError('Subtasks timed out after %.1f '

-                                 'seconds' % duration)

+                                             'seconds' % duration)

              else:

                  # signal handler set by TaskManager.forkTask

                  self.logger.debug("Pausing...")
@@ -429,8 +447,7 @@ 

          if all:

              finished = subtasks

          return dict(self.session.host.taskWaitResults(self.id, finished,

-                                                     canfail=canfail))

- 

+                                                       canfail=canfail))

  

      def getUploadDir(self):

          return koji.pathinfo.taskrelpath(self.id)
@@ -561,7 +578,7 @@ 

              repo_info = self.session.getRepo(tag)

              taginfo = self.session.getTag(tag, strict=True)

              if not repo_info:

-                 #make sure there is a target

+                 # make sure there is a target

                  targets = self.session.getBuildTargets(buildTagID=taginfo['id'])

                  if not targets:

                      raise koji.BuildError('no repo (and no target) for tag %s' % taginfo['name'])
@@ -579,7 +596,6 @@ 

              repo_info = self.wait(task_id)[task_id]

          return repo_info

  

- 

      def run_callbacks(self, plugin, *args, **kwargs):

          if 'taskinfo' not in kwargs:

              try:
@@ -595,6 +611,7 @@ 

  class FakeTask(BaseTaskHandler):

      Methods = ['someMethod']

      Foreground = True

+ 

      def handler(self, *args):

          self.logger.info("This is a fake task.  Args: " + str(args))

          return 42
@@ -603,17 +620,21 @@ 

  class SleepTask(BaseTaskHandler):

      Methods = ['sleep']

      _taskWeight = 0.25

+ 

      def handler(self, n):

          self.logger.info("Sleeping for %s seconds" % n)

          time.sleep(n)

          self.logger.info("Finished sleeping")

  

+ 

  class ForkTask(BaseTaskHandler):

      Methods = ['fork']

+ 

      def handler(self, n=5, m=37):

          for i in range(n):

              os.spawnvp(os.P_NOWAIT, 'sleep', ['sleep', str(m)])

  

+ 

  class WaitTestTask(BaseTaskHandler):

      """

      Tests self.wait()
@@ -624,6 +645,7 @@ 

      """

      Methods = ['waittest']

      _taskWeight = 0.1

+ 

      def handler(self, count, seconds=10):

          tasks = []

          for i in range(count):
@@ -638,10 +660,11 @@ 

  class SubtaskTask(BaseTaskHandler):

      Methods = ['subtask']

      _taskWeight = 0.1

+ 

      def handler(self, n=4):

          if n > 0:

              task_id = self.session.host.subtask(method='subtask',

-                                                 arglist=[n-1],

+                                                 arglist=[n - 1],

                                                  label='foo',

                                                  parent=self.id)

              self.wait(task_id)
@@ -657,6 +680,7 @@ 

      """Used when no matching method is found"""

      Methods = ['default']

      _taskWeight = 0.1

+ 

      def handler(self, *args, **opts):

          raise koji.GenericError("Invalid method: %s" % self.method)

  
@@ -665,8 +689,9 @@ 

      Methods = ['shutdown']

      _taskWeight = 0.0

      Foreground = True

+ 

      def handler(self):

-         #note: this is a foreground task

+         # note: this is a foreground task

          raise ServerExit

  

  
@@ -676,8 +701,9 @@ 

      Methods = ['restart']

      _taskWeight = 0.1

      Foreground = True

+ 

      def handler(self, host):

-         #note: this is a foreground task

+         # note: this is a foreground task

          if host['id'] != self.session.host.getID():

              raise koji.GenericError("Host mismatch")

          self.manager.restart_pending = True
@@ -690,8 +716,9 @@ 

      Methods = ['restartVerify']

      _taskWeight = 0.1

      Foreground = True

+ 

      def handler(self, task_id, host):

-         #note: this is a foreground task

+         # note: this is a foreground task

          tinfo = self.session.getTaskInfo(task_id)

          state = koji.TASK_STATES[tinfo['state']]

          if state != 'CLOSED':
@@ -708,6 +735,7 @@ 

  

      Methods = ['restartHosts']

      _taskWeight = 0.1

+ 

      def handler(self, options=None):

          if options is None:

              options = {}
@@ -715,14 +743,14 @@ 

          hostquery = {'enabled': True}

          if 'channel' in options:

              chan = self.session.getChannel(options['channel'], strict=True)

-             hostquery['channelID']= chan['id']

+             hostquery['channelID'] = chan['id']

          if 'arches' in options:

              hostquery['arches'] = options['arches']

          hosts = self.session.listHosts(**hostquery)

          if not hosts:

              raise koji.GenericError("No matching hosts")

  

-         timeout = options.get('timeout', 3600*24)

+         timeout = options.get('timeout', 3600 * 24)

  

          # fire off the subtasks

          this_host = self.session.host.getID()
@@ -730,8 +758,10 @@ 

          my_tasks = None

          for host in hosts:

              # note: currently task assignments bypass channel restrictions

-             task1 = self.subtask('restart', [host], assign=host['id'], label="restart %i" % host['id'])

-             task2 = self.subtask('restartVerify', [task1, host], assign=host['id'], label="sleep %i" % host['id'])

+             task1 = self.subtask('restart', [host],

+                                  assign=host['id'], label="restart %i" % host['id'])

+             task2 = self.subtask('restartVerify', [task1, host],

+                                  assign=host['id'], label="sleep %i" % host['id'])

              subtasks.append(task1)

              subtasks.append(task2)

              if host['id'] == this_host:
@@ -754,7 +784,7 @@ 

  class DependantTask(BaseTaskHandler):

  

      Methods = ['dependantTask']

-     #mostly just waiting on other tasks

+     # mostly just waiting on other tasks

      _taskWeight = 0.2

  

      def handler(self, wait_list, task_list):
@@ -777,13 +807,16 @@ 

  

          subtasks = []

          for task in task_list:

-             # **((len(task)>2 and task[2]) or {}) expands task[2] into opts if it exists, allows for things like 'priority=15'

-             task_id = self.session.host.subtask(method=task[0], arglist=task[1], parent=self.id, **((len(task) > 2 and task[2]) or {}))

+             # **((len(task)>2 and task[2]) or {}) expands task[2] into opts if it exists, allows

+             # for things like 'priority=15'

+             task_id = self.session.host.subtask(method=task[0], arglist=task[1], parent=self.id,

+                                                 **((len(task) > 2 and task[2]) or {}))

              if task_id:

                  subtasks.append(task_id)

          if subtasks:

              self.wait(subtasks, all=True)

  

+ 

  class MultiPlatformTask(BaseTaskHandler):

      def buildWrapperRPM(self, spec_url, build_task_id, build_target, build, repo_id, **opts):

          task = self.session.getTaskInfo(build_task_id)

file modified
+18 -15
@@ -52,8 +52,10 @@ 

          warnings.simplefilter('always', DeprecationWarning)

          warnings.warn(message, DeprecationWarning)

  

+ 

  def _changelogDate(cldate):

-     return time.strftime('%a %b %d %Y', time.strptime(koji.formatTime(cldate), '%Y-%m-%d %H:%M:%S'))

+     return time.strftime('%a %b %d %Y',

+                          time.strptime(koji.formatTime(cldate), '%Y-%m-%d %H:%M:%S'))

  

  

  def formatChangelog(entries):
@@ -65,10 +67,11 @@ 

  %s

  

  """ % (_changelogDate(entry['date']),

-        koji._fix_print(entry['author']),

-        koji._fix_print(entry['text']))

+             koji._fix_print(entry['author']),

+             koji._fix_print(entry['text']))

      return result

  

+ 

  DATE_RE = re.compile(r'(\d+)-(\d+)-(\d+)')

  TIME_RE = re.compile(r'(\d+):(\d+):(\d+)')

  
@@ -92,7 +95,7 @@ 

      if result:

          time = [int(r) for r in result.groups()]

      return calendar.timegm(

-             datetime.datetime(*(date + time)).timetuple())

+         datetime.datetime(*(date + time)).timetuple())

  

  

  def checkForBuilds(session, tag, builds, event, latest=False):
@@ -189,7 +192,7 @@ 

      ret = {}

      for key in keys:

          if strict or key in dict_:

-             #for strict we skip the has_key check and let the dict generate the KeyError

+             # for strict we skip the has_key check and let the dict generate the KeyError

              ret[key] = dict_[key]

      return ret

  
@@ -291,7 +294,7 @@ 

      for n, arg in enumerate(f_args):

          if arg not in data:

              raise koji.ParameterError('missing required argument %r (#%i)'

-                                         % (arg, n))

+                                       % (arg, n))

      return data

  

  
@@ -498,7 +501,6 @@ 

      os.symlink(dst, src)

  

  

- 

  def joinpath(path, *paths):

      """A wrapper around os.path.join that limits directory traversal"""

  
@@ -532,8 +534,8 @@ 

      if repo:

          rinfo = session.repoInfo(repo)

          if rinfo:

-             return {'id' : rinfo['create_event'],

-                     'ts' : rinfo['create_ts']}

+             return {'id': rinfo['create_event'],

+                     'ts': rinfo['create_ts']}

      return None

  

  
@@ -639,13 +641,13 @@ 

  

  class adler32_constructor(object):

  

-     #mimicing the hashlib constructors

+     # mimicing the hashlib constructors

      def __init__(self, arg=''):

          if six.PY3 and isinstance(arg, str):

              arg = bytes(arg, 'utf-8')

          self._value = adler32(arg) & 0xffffffff

-         #the bitwise and works around a bug in some versions of python

-         #see: https://bugs.python.org/issue1202

+         # the bitwise and works around a bug in some versions of python

+         # see: https://bugs.python.org/issue1202

  

      def update(self, arg):

          if six.PY3 and isinstance(arg, str):
@@ -664,7 +666,7 @@ 

          return dup

  

      digest_size = 4

-     block_size = 1      #I think

+     block_size = 1  # I think

  

  

  def tsort(parts):
@@ -696,7 +698,7 @@ 

      """

      MULTILINE = ['properties', 'envs']

      MULTIVALUE = ['goals', 'profiles', 'packages',

-                    'jvm_options', 'maven_options', 'buildrequires']

+                   'jvm_options', 'maven_options', 'buildrequires']

  

      def __init__(self, conf, section):

          self._conf = conf
@@ -812,7 +814,8 @@ 

          else:

              raise ValueError("Section %s does not exist in: %s" % (section, ', '.join(confs)))

      elif len(builds) > 1:

-         raise ValueError("Multiple sections in: %s, you must specify the section" % ', '.join(confs))

+         raise ValueError(

+             "Multiple sections in: %s, you must specify the section" % ', '.join(confs))

      return builds

  

  

file modified
+3 -4
@@ -52,8 +52,7 @@ 

  

  

  if six.PY2:

-     ExtendedMarshaller.dispatch[long] = ExtendedMarshaller.dump_int

- 

+     ExtendedMarshaller.dispatch[long] = ExtendedMarshaller.dump_int  # noqa: F821

  

  

  def dumps(params, methodname=None, methodresponse=None, encoding=None,
@@ -100,7 +99,7 @@ 

              "<methodName>", methodname, "</methodName>\n",

              data,

              "</methodCall>\n"

-             )

+         )

      elif methodresponse:

          # a method response, or a fault structure

          parts = (
@@ -108,7 +107,7 @@ 

              "<methodResponse>\n",

              data,

              "</methodResponse>\n"

-             )

+         )

      else:

          return data  # return as is

      return ''.join(parts)

file modified
+58 -42
@@ -47,17 +47,18 @@ 

                  options.append(o)

          rel_path = path[len(mount_data['mountpoint']):]

          rel_path = rel_path[1:] if rel_path.startswith('/') else rel_path

-         res = (os.path.join(mount_data['path'], rel_path), path, mount_data['fstype'], ','.join(options))

+         res = (os.path.join(mount_data['path'], rel_path), path, mount_data['fstype'],

+                ','.join(options))

          return res

  

      def _read_config(self):

          cp = koji.read_config_files(CONFIG_FILE)

          self.config = {

-            'default_mounts': [],

-            'safe_roots': [],

-            'path_subs': [],

-            'paths': [],

-            'internal_dev_setup': None,

+             'default_mounts': [],

+             'safe_roots': [],

+             'path_subs': [],

+             'paths': [],

+             'internal_dev_setup': None,

          }

  

          # main options
@@ -82,7 +83,7 @@ 

  

          # path section are in form 'path%d' while order is important as some

          # paths can be mounted inside other mountpoints

-         path_sections = [p for p in cp.sections() if re.match('path\d+', p)]

+         path_sections = [p for p in cp.sections() if re.match(r'path\d+', p)]

          for section_name in sorted(path_sections, key=lambda x: int(x[4:])):

              try:

                  self.config['paths'].append({
@@ -94,11 +95,15 @@ 

              except six.moves.configparser.NoOptionError:

                  raise koji.GenericError("bad config: missing options in %s section" % section_name)

  

-         for path in self.config['default_mounts'] + self.config['safe_roots'] + [x[0] for x in self.config['path_subs']]:

+         for path in self.config['default_mounts'] + self.config['safe_roots'] + \

+                 [x[0] for x in self.config['path_subs']]:

              if not path.startswith('/'):

-                 raise koji.GenericError("bad config: all paths (default_mounts, safe_roots, path_subs) needs to be absolute: %s" % path)

+                 raise koji.GenericError(

+                     "bad config: all paths (default_mounts, safe_roots, path_subs) needs to be "

+                     "absolute: %s" % path)

  

-     def handler(self, root, arch, command, keep=False, packages=[], mounts=[], repo_id=None, skip_setarch=False, weight=None, upload_logs=None, new_chroot=None):

+     def handler(self, root, arch, command, keep=False, packages=[], mounts=[], repo_id=None,

+                 skip_setarch=False, weight=None, upload_logs=None, new_chroot=None):

          """Create a buildroot and run a command (as root) inside of it

  

          Command may be a string or a list.
@@ -118,9 +123,10 @@ 

          if weight is not None:

              weight = max(weight, 0.5)

              self.session.host.setTaskWeight(self.id, weight)

-         #noarch is funny

+ 

+         # noarch is funny

          if arch == "noarch":

-             #we need a buildroot arch. Pick one that:

+             # we need a buildroot arch. Pick one that:

              #  a) this host can handle

              #  b) the build tag can support

              #  c) is canonical
@@ -130,46 +136,50 @@ 

              tag_arches = self.session.getBuildConfig(root)['arches']

              if not tag_arches:

                  raise koji.BuildError("No arch list for tag: %s" % root)

-             #index canonical host arches

+             # index canonical host arches

              host_arches = set([koji.canonArch(a) for a in host_arches.split()])

-             #pick the first suitable match from tag's archlist

+             # pick the first suitable match from tag's archlist

              for br_arch in tag_arches.split():

                  br_arch = koji.canonArch(br_arch)

                  if br_arch in host_arches:

-                     #we're done

+                     # we're done

                      break

              else:

-                 #no overlap

-                 raise koji.BuildError("host does not match tag arches: %s (%s)" % (root, tag_arches))

+                 # no overlap

+                 raise koji.BuildError(

+                     "host does not match tag arches: %s (%s)" % (root, tag_arches))

          else:

              br_arch = arch

          if repo_id:

              repo_info = self.session.repoInfo(repo_id, strict=True)

              if repo_info['tag_name'] != root:

-                 raise koji.BuildError("build tag (%s) does not match repo tag (%s)" % (root, repo_info['tag_name']))

+                 raise koji.BuildError(

+                     "build tag (%s) does not match repo tag (%s)" % (root, repo_info['tag_name']))

              if repo_info['state'] not in (koji.REPO_STATES['READY'], koji.REPO_STATES['EXPIRED']):

-                 raise koji.BuildError("repos in the %s state may not be used by runroot" % koji.REPO_STATES[repo_info['state']])

+                 raise koji.BuildError(

+                     "repos in the %s state may not be used by runroot" %

+                     koji.REPO_STATES[repo_info['state']])

          else:

              repo_info = self.session.getRepo(root)

          if not repo_info:

-             #wait for it

+             # wait for it

              task_id = self.session.host.subtask(method='waitrepo',

-                                            arglist=[root, None, None],

-                                            parent=self.id)

+                                                 arglist=[root, None, None],

+                                                 parent=self.id)

              repo_info = self.wait(task_id)[task_id]

          broot = BuildRoot(self.session, self.options, root, br_arch, self.id,

-                 repo_id=repo_info['id'], setup_dns=True,

-                 internal_dev_setup=self.config['internal_dev_setup'])

+                           repo_id=repo_info['id'], setup_dns=True,

+                           internal_dev_setup=self.config['internal_dev_setup'])

          broot.workdir = self.workdir

          broot.init()

          rootdir = broot.rootdir()

-         #workaround for rpm oddness

+         # workaround for rpm oddness

          os.system('rm -f "%s"/var/lib/rpm/__db.*' % rootdir)

-         #update buildroot state (so that updateBuildRootList() will work)

+         # update buildroot state (so that updateBuildRootList() will work)

          self.session.host.setBuildRootState(broot.id, 'BUILDING')

          try:

              if packages:

-                 #pkglog = '%s/%s' % (broot.resultdir(), 'packages.log')

+                 # pkglog = '%s/%s' % (broot.resultdir(), 'packages.log')

                  pkgcmd = ['--install'] + packages

                  status = broot.mock(pkgcmd)

                  self.session.host.updateBuildRootList(broot.id, broot.getPackageList())
@@ -179,26 +189,30 @@ 

              if isinstance(command, str):

                  cmdstr = command

              else:

-                 #we were passed an arglist

-                 #we still have to run this through the shell (for redirection)

-                 #but we can preserve the list structure precisely with careful escaping

+                 # we were passed an arglist

+                 # we still have to run this through the shell (for redirection)

+                 # but we can preserve the list structure precisely with careful escaping

                  cmdstr = ' '.join(["'%s'" % arg.replace("'", r"'\''") for arg in command])

              # A nasty hack to put command output into its own file until mock can be

              # patched to do something more reasonable than stuff everything into build.log

-             cmdargs = ['/bin/sh', '-c', "{ %s; } < /dev/null 2>&1 | /usr/bin/tee /builddir/runroot.log; exit ${PIPESTATUS[0]}" % cmdstr]

+             cmdargs = ['/bin/sh', '-c',

+                        "{ %s; } < /dev/null 2>&1 | /usr/bin/tee /builddir/runroot.log; exit "

+                        "${PIPESTATUS[0]}" % cmdstr]

  

              # always mount /mnt/redhat (read-only)

              # always mount /mnt/iso (read-only)

              # also need /dev bind mount

-             self.do_mounts(rootdir, [self._get_path_params(x) for x in self.config['default_mounts']])

+             self.do_mounts(rootdir,

+                            [self._get_path_params(x) for x in self.config['default_mounts']])

              self.do_extra_mounts(rootdir, mounts)

              mock_cmd = ['chroot']

              if new_chroot:

                  mock_cmd.append('--new-chroot')

-             elif new_chroot is False: # None -> no option added

+             elif new_chroot is False:  # None -> no option added

                  mock_cmd.append('--old-chroot')

              if skip_setarch:

-                 #we can't really skip it, but we can set it to the current one instead of of the chroot one

+                 # we can't really skip it, but we can set it to the current one instead of of the

+                 # chroot one

                  myarch = platform.uname()[5]

                  mock_cmd.extend(['--arch', myarch])

              mock_cmd.append('--')
@@ -235,9 +249,9 @@ 

                  if mount.startswith(safe_root):

                      break

              else:

-                 #no match

+                 # no match

                  raise koji.GenericError("read-write mount point is not safe: %s" % mount)

-             #normpath should have removed any .. dirs, but just in case...

+             # normpath should have removed any .. dirs, but just in case...

              if mount.find('/../') != -1:

                  raise koji.GenericError("read-write mount point is not safe: %s" % mount)

  
@@ -266,7 +280,7 @@ 

                  else:

                      opts = opts.split(',')

                  if 'bind' in opts:

-                     #make sure dir exists

+                     # make sure dir exists

                      if not os.path.isdir(dev):

                          error = koji.GenericError("No such directory or mount: %s" % dev)

                          break
@@ -278,10 +292,11 @@ 

                  cmd = ['mount', '-t', type, '-o', opts, dev, mpoint]

                  self.logger.info("Mount command: %r" % cmd)

                  koji.ensuredir(mpoint)

-                 status = log_output(self.session, cmd[0], cmd, logfile, uploadpath, logerror=True, append=True)

+                 status = log_output(self.session, cmd[0], cmd, logfile, uploadpath,

+                                     logerror=True, append=True)

                  if not isSuccess(status):

-                     error = koji.GenericError("Unable to mount %s: %s" \

-                             % (mpoint, parseStatus(status, cmd)))

+                     error = koji.GenericError("Unable to mount %s: %s"

+                                               % (mpoint, parseStatus(status, cmd)))

                      break

                  fslog.write("%s\n" % mpoint)

                  fslog.flush()
@@ -297,7 +312,7 @@ 

              with open(fn, 'r') as fslog:

                  for line in fslog.readlines():

                      mounts.add(line.strip())

-         #also, check /proc/mounts just in case

+         # also, check /proc/mounts just in case

          mounts |= set(scan_mounts(rootdir))

          mounts = sorted(mounts)

          # deeper directories first
@@ -305,7 +320,8 @@ 

          failed = []

          self.logger.info("Unmounting (runroot): %s" % mounts)

          for dir in mounts:

-             proc = subprocess.Popen(["umount", "-l", dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)

+             proc = subprocess.Popen(["umount", "-l", dir],

+                                     stdout=subprocess.PIPE, stderr=subprocess.PIPE)

              if proc.wait() != 0:

                  output = proc.stdout.read()

                  output += proc.stderr.read()

file modified
+25 -17
@@ -6,8 +6,13 @@ 

  

  import koji

  from koji.plugin import export_cli

- from koji_cli.lib import (_, activate_session, bytes_to_stdout,

-                           list_task_output_all_volumes, watch_tasks)

+ from koji_cli.lib import (

+     _,

+     activate_session,

+     bytes_to_stdout,

+     list_task_output_all_volumes,

+     watch_tasks

+ )

  

  

  @export_cli
@@ -17,24 +22,27 @@ 

      usage += _("\n(Specify the --help global option for a list of other help options)")

      parser = OptionParser(usage=usage)

      parser.disable_interspersed_args()

-     parser.add_option("-p", "--package", action="append", default=[], help=_("make sure this package is in the chroot"))

-     parser.add_option("-m", "--mount", action="append", default=[], help=_("mount this directory read-write in the chroot"))

+     parser.add_option("-p", "--package", action="append", default=[],

+                       help=_("make sure this package is in the chroot"))

+     parser.add_option("-m", "--mount", action="append", default=[],

+                       help=_("mount this directory read-write in the chroot"))

      parser.add_option("--skip-setarch", action="store_true", default=False,

-             help=_("bypass normal setarch in the chroot"))

+                       help=_("bypass normal setarch in the chroot"))

      parser.add_option("-w", "--weight", type='int', help=_("set task weight"))

      parser.add_option("--channel-override", help=_("use a non-standard channel"))

      parser.add_option("--task-id", action="store_true", default=False,

-             help=_("Print the ID of the runroot task"))

+                       help=_("Print the ID of the runroot task"))

      parser.add_option("--use-shell", action="store_true", default=False,

-             help=_("Run command through a shell, otherwise uses exec"))

+                       help=_("Run command through a shell, otherwise uses exec"))

      parser.add_option("--new-chroot", action="store_true", default=None,

-             help=_("Run command with the --new-chroot (systemd-nspawn) option to mock"))

+                       help=_("Run command with the --new-chroot (systemd-nspawn) option to mock"))

      parser.add_option("--old-chroot", action="store_false", default=None, dest='new_chroot',

-             help=_("Run command with the --old-chroot (systemd-nspawn) option to mock"))

+                       help=_("Run command with the --old-chroot (systemd-nspawn) option to mock"))

      parser.add_option("--repo-id", type="int", help=_("ID of the repo to use"))

      parser.add_option("--nowait", action="store_false", dest="wait",

-             default=True, help=_("Do not wait on task"))

-     parser.add_option("--watch", action="store_true", help=_("Watch task instead of printing runroot.log"))

+                       default=True, help=_("Do not wait on task"))

+     parser.add_option("--watch", action="store_true",

+                       help=_("Watch task instead of printing runroot.log"))

      parser.add_option("--quiet", action="store_true", default=options.quiet,

                        help=_("Do not print the task information"))

  
@@ -53,12 +61,12 @@ 

      else:

          command = args[2:]

      try:

-         kwargs = { 'channel':       opts.channel_override,

-                    'packages':      opts.package,

-                    'mounts':        opts.mount,

-                    'repo_id':       opts.repo_id,

-                    'skip_setarch':  opts.skip_setarch,

-                    'weight':        opts.weight }

+         kwargs = {'channel': opts.channel_override,

+                   'packages': opts.package,

+                   'mounts': opts.mount,

+                   'repo_id': opts.repo_id,

+                   'skip_setarch': opts.skip_setarch,

+                   'weight': opts.weight}

          # Only pass this kwarg if it is true - this prevents confusing older

          # builders with a different function signature

          if opts.new_chroot is not None:

@@ -14,13 +14,14 @@ 

      usage += _("\n(Specify the --help global option for a list of other help options)")

      parser = OptionParser(usage=usage)

      parser.add_option("-f", "--full", action="store_true", default=False,

-             help=_("Download whole tree, if not specified, only builddir will be downloaded"))

+                       help=_("Download whole tree, if not specified, "

+                              "only builddir will be downloaded"))

      parser.add_option("-t", "--task", action="store_const", dest="mode",

-             const="task", default="task",

-             help=_("Treat ID as a task ID (the default)"))

+                       const="task", default="task",

+                       help=_("Treat ID as a task ID (the default)"))

      parser.add_option("-r", "--buildroot", action="store_const", dest="mode",

-             const="buildroot",

-             help=_("Treat ID as a buildroot ID"))

+                       const="buildroot",

+                       help=_("Treat ID as a buildroot ID"))

      parser.add_option("--quiet", action="store_true", default=options.quiet,

                        help=_("Do not print the task information"))

      parser.add_option("--nowait", action="store_true",
@@ -63,10 +64,11 @@ 

      if not opts.quiet:

          print(_("Created task %s for buildroot %s") % (task_id, br_id))

          print("Task info: %s/taskinfo?taskID=%s"

-                 % (options.weburl, task_id))

+               % (options.weburl, task_id))

  

      if opts.nowait:

          return

      else:

          session.logout()

-         return watch_tasks(session, [task_id], quiet=opts.quiet, poll_interval=options.poll_interval)

+         return watch_tasks(session, [task_id],

+                            quiet=opts.quiet, poll_interval=options.poll_interval)

file modified
+3 -3
@@ -9,8 +9,8 @@ 

  

  import koji

  from koji.plugin import export_cli

- from koji_cli.lib import _, activate_session

  from koji_cli.commands import anon_handle_wait_repo

+ from koji_cli.lib import _, activate_session

  

  

  @export_cli
@@ -40,7 +40,7 @@ 

          parser.error(_("Policy violation"))

  

      if not opts.quiet:

-         print (tag["name"])

+         print(tag["name"])

  

      if opts.wait:

          args = ["--target", tag["name"]]
@@ -88,4 +88,4 @@ 

          user = opts.user

  

      for tag in session.listSideTags(basetag=opts.basetag, user=user):

-         print (tag["name"])

+         print(tag["name"])

file modified
+12
@@ -22,6 +22,7 @@ 

  CONFIG_FILE = '/etc/koji-hub/plugins/protonmsg.conf'

  CONFIG = None

  

+ 

  class TimeoutHandler(MessagingHandler):

      def __init__(self, url, msgs, conf, *args, **kws):

          super(TimeoutHandler, self).__init__(*args, **kws)
@@ -151,6 +152,7 @@ 

      body = json.dumps(data, default=json_serialize)

      msgs.append((address, props, body))

  

+ 

  @convert_datetime

  @callback('postPackageListChange')

  def prep_package_list_change(cbtype, *args, **kws):
@@ -162,6 +164,7 @@ 

               'user': kws['user']['name']}

      queue_msg(address, props, kws)

  

+ 

  @convert_datetime

  @callback('postTaskStateChange')

  def prep_task_state_change(cbtype, *args, **kws):
@@ -177,6 +180,7 @@ 

               'new': kws['new']}

      queue_msg(address, props, kws)

  

+ 

  @convert_datetime

  @callback('postBuildStateChange')

  def prep_build_state_change(cbtype, *args, **kws):
@@ -196,6 +200,7 @@ 

               'new': new}

      queue_msg(address, props, kws)

  

+ 

  @convert_datetime

  @callback('postImport')

  def prep_import(cbtype, *args, **kws):
@@ -207,6 +212,7 @@ 

               'release': kws['build']['release']}

      queue_msg(address, props, kws)

  

+ 

  @convert_datetime

  @callback('postRPMSign')

  def prep_rpm_sign(cbtype, *args, **kws):
@@ -224,6 +230,7 @@ 

               'rpm_arch': kws['rpm']['arch']}

      queue_msg(address, props, kws)

  

+ 

  def _prep_tag_msg(address, cbtype, kws):

      build = kws['build']

      props = {'type': cbtype[4:],
@@ -234,16 +241,19 @@ 

               'user': kws['user']['name']}

      queue_msg(address, props, kws)

  

+ 

  @convert_datetime

  @callback('postTag')

  def prep_tag(cbtype, *args, **kws):

      _prep_tag_msg('build.tag', cbtype, kws)

  

+ 

  @convert_datetime

  @callback('postUntag')

  def prep_untag(cbtype, *args, **kws):

      _prep_tag_msg('build.untag', cbtype, kws)

  

+ 

  @convert_datetime

  @callback('postRepoInit')

  def prep_repo_init(cbtype, *args, **kws):
@@ -253,6 +263,7 @@ 

               'repo_id': kws['repo_id']}

      queue_msg(address, props, kws)

  

+ 

  @convert_datetime

  @callback('postRepoDone')

  def prep_repo_done(cbtype, *args, **kws):
@@ -263,6 +274,7 @@ 

               'expire': kws['expire']}

      queue_msg(address, props, kws)

  

+ 

  @ignore_error

  @convert_datetime

  @callback('postCommit')

file modified
+3 -3
@@ -65,9 +65,9 @@ 

                              stdout=devnull, stderr=devnull,

                              close_fds=True)

      if rpm2cpio.wait() != 0 or cpio.wait() != 0:

-         raise koji.CallbackError('error extracting files from %s, ' \

-               'rpm2cpio returned %s, cpio returned %s' % \

-               (filepath, rpm2cpio.wait(), cpio.wait()))

+         raise koji.CallbackError('error extracting files from %s, '

+                                  'rpm2cpio returned %s, cpio returned %s' %

+                                  (filepath, rpm2cpio.wait(), cpio.wait()))

      devnull.close()

  

  

file modified
+9 -9
@@ -1,4 +1,4 @@ 

- #koji hub plugin

+ # koji hub plugin

  # There is a kojid plugin that goes with this hub plugin. The kojid builder

  # plugin has a config file.  This hub plugin has no config file.

  
@@ -9,12 +9,11 @@ 

  import sys

  

  import koji

- # XXX - have to import kojihub for make_task

- sys.path.insert(0, '/usr/share/koji-hub/')

- import kojihub

  from koji.context import context

  from koji.plugin import export

- 

+ # XXX - have to import kojihub for make_task

+ sys.path.insert(0, '/usr/share/koji-hub/')

+ import kojihub  # noqa: F402

  

  __all__ = ('runroot',)

  
@@ -28,6 +27,7 @@ 

              ret[koji.canonArch(a)] = 1

      return ret

  

+ 

  @export

  def runroot(tagInfo, arch, command, channel=None, **opts):

      """ Create a runroot task """
@@ -41,11 +41,11 @@ 

  

      tag = kojihub.get_tag(tagInfo, strict=True)

      if arch == 'noarch':

-         #not all arches can generate a proper buildroot for all tags

+         # not all arches can generate a proper buildroot for all tags

          if not tag['arches']:

              raise koji.GenericError('no arches defined for tag %s' % tag['name'])

  

-         #get all known arches for the system

+         # get all known arches for the system

          fullarches = kojihub.get_all_arches()

  

          tagarches = tag['arches'].split()
@@ -56,8 +56,8 @@ 

              chanarches = get_channel_arches(taskopts['channel'])

              choices = [x for x in tagarches if x in chanarches]

              if not choices:

-                 raise koji.GenericError('no common arches for tag/channel: %s/%s' \

-                             % (tagInfo, taskopts['channel']))

+                 raise koji.GenericError('no common arches for tag/channel: %s/%s'

+                                         % (tagInfo, taskopts['channel']))

              taskopts['arch'] = koji.canonArch(random.choice(choices))

  

      args = koji.encode_args(tagInfo, arch, command, **opts)

@@ -3,12 +3,10 @@ 

  import sys

  

  import koji

- sys.path.insert(0, '/usr/share/koji-hub/')

- import kojihub

  from koji.context import context

  from koji.plugin import export

- 

- 

+ sys.path.insert(0, '/usr/share/koji-hub/')

+ import kojihub  # noqa: F402

  

  __all__ = ('saveFailedTree',)

  
@@ -40,10 +38,12 @@ 

      taskID = brinfo['task_id']

      task_info = kojihub.Task(taskID).getInfo()

      if task_info['state'] != koji.TASK_STATES['FAILED']:

-         raise koji.PreBuildError("Task %s has not failed. Only failed tasks can upload their buildroots." % taskID)

+         raise koji.PreBuildError(

+             "Task %s has not failed. Only failed tasks can upload their buildroots." % taskID)

      elif allowed_methods != '*' and task_info['method'] not in allowed_methods:

-         raise koji.PreBuildError("Only %s tasks can upload their buildroots (Task %s is %s)." % \

-                (', '.join(allowed_methods), task_info['id'], task_info['method']))

+         raise koji.PreBuildError(

+             "Only %s tasks can upload their buildroots (Task %s is %s)." %

+             (', '.join(allowed_methods), task_info['id'], task_info['method']))

      elif task_info["owner"] != context.session.user_id and not context.session.hasPerm('admin'):

          raise koji.ActionNotAllowed("Only owner of failed task or 'admin' can run this task.")

      elif not kojihub.get_host(task_info['host_id'])['enabled']:

file modified
+13 -14
@@ -3,27 +3,26 @@ 

  # SPDX-License-Identifier: GPL-2.0-or-later

  import sys

  

- from koji.context import context

- from koji.plugin import export, callback

  import koji

- 

- CONFIG_FILE = "/etc/koji-hub/plugins/sidetag.conf"

- CONFIG = None

- 

+ from koji.context import context

+ from koji.plugin import callback, export

  sys.path.insert(0, "/usr/share/koji-hub/")

- from kojihub import (

+ from kojihub import (  # noqa: F402

+     QueryProcessor,

+     _create_build_target,

+     _create_tag,

+     _delete_build_target,

+     _delete_tag,

      assert_policy,

+     get_build_target,

      get_tag,

      get_user,

-     get_build_target,

-     _create_tag,

-     _create_build_target,

-     _delete_tag,

-     _delete_build_target,

-     QueryProcessor,

-     nextval,

+     nextval

  )

  

+ CONFIG_FILE = "/etc/koji-hub/plugins/sidetag.conf"

+ CONFIG = None

+ 

  

  @export

  def createSideTag(basetag):

file modified
+9 -8
@@ -16,9 +16,9 @@ 

          'requests',

          'requests-kerberos',

          'six',

-         #'libcomps',

-         #'rpm-py-installer', # it is optional feature

-         #'rpm',

+         # 'libcomps',

+         # 'rpm-py-installer', # it is optional feature

+         # 'rpm',

      ]

      if sys.version_info[0] < 3:

          # optional auth library for older hubs
@@ -33,6 +33,7 @@ 

  

      return requires

  

+ 

  setup(

      name="koji",

      version="1.20.0",
@@ -41,8 +42,8 @@ 

                   " interface."),

      license="LGPLv2 and GPLv2+",

      url="http://pagure.io/koji/",

-     author = 'Koji developers',

-     author_email = 'koji-devel@lists.fedorahosted.org',

+     author='Koji developers',

+     author_email='koji-devel@lists.fedorahosted.org',

      classifiers=[

          "Development Status :: 5 - Production/Stable",

          "Environment :: Console",
@@ -62,9 +63,9 @@ 

          'koji_cli_plugins': 'plugins/cli',

      },

      # doesn't make sense, as we have only example config

-     #data_files=[

-     #    ('/etc', ['cli/koji.conf']),

-     #],

+     # data_files=[

+     #     ('/etc', ['cli/koji.conf']),

+     # ],

      scripts=[

          'cli/koji',

          'util/koji-gc',

@@ -0,0 +1,6 @@ 

+ flake8

+ flake8-import-order

+ mock<=2.0.0

+ requests-mock

+ coverage

+ nose

@@ -251,7 +251,7 @@ 

    --quiet         Do not print the header information

    --paths         Show the file paths

    --sigs          Show signatures

-   --type=TYPE     Show builds of the given type only.  Currently supported

+   --type=TYPE     Show builds of the given type only. Currently supported

                    types: maven, win, image

    --event=EVENT#  query at event

    --ts=TIMESTAMP  query at last event before timestamp

file modified
+118 -96
@@ -7,8 +7,9 @@ 

  #       Mike McLean <mikem@redhat.com>

  

  from __future__ import absolute_import

- import fcntl

+ 

  import datetime

+ import fcntl

  import fnmatch

  import optparse

  import os
@@ -39,6 +40,7 @@ 

      """Stub function for translation"""

      return args

  

+ 

  def get_options():

      """process options from command line and config file"""

  
@@ -80,7 +82,7 @@ 

      parser.add_option("--smtp-user", dest="smtp_user", metavar="USER",

                        help=_("specify smtp username for notifications"))

      parser.add_option("--smtp-pass", dest="smtp_pass", metavar="PASSWORD",

-                       help=optparse.SUPPRESS_HELP) # do not allow passwords on a command line

+                       help=optparse.SUPPRESS_HELP)  # do not allow passwords on a command line

      parser.add_option("--no-mail", action='store_false', default=True, dest="mail",

                        help=_("don't send notifications"))

      parser.add_option("--send-mail", action='store_true', dest="mail",
@@ -92,7 +94,7 @@ 

      parser.add_option("--email-template", default="/etc/koji-gc/email.tpl",

                        help=_("notification template"))

      parser.add_option("--action", help=_("action(s) to take"))

-     parser.add_option("--delay", metavar="INTERVAL", default = '5 days',

+     parser.add_option("--delay", metavar="INTERVAL", default='5 days',

                        help="time before eligible builds are placed in trashcan")

      parser.add_option("--grace-period", default='4 weeks', metavar="INTERVAL",

                        help="time that builds are held in trashcan")
@@ -122,7 +124,7 @@ 

                                              "recommended."))

      parser.add_option("--exit-on-lock", action="store_true",

                        help=_("quit if --lock-file exists, don't wait"))

-     #parse once to get the config file

+     # parse once to get the config file

      (options, args) = parser.parse_args()

  

      defaults = parser.get_default_values()
@@ -138,7 +140,7 @@ 

          ['krbservice', None, 'string'],

          ['krb_rdns', None, 'boolean'],

          ['krb_canon_host', None, 'boolean'],

-             ['krb_server_realm', None, 'string'],

+         ['krb_server_realm', None, 'string'],

          ['runas', None, 'string'],

          ['user', None, 'string'],

          ['password', None, 'string'],
@@ -149,8 +151,8 @@ 

          ['server', None, 'string'],

          ['weburl', None, 'string'],

          ['smtp_host', None, 'string'],

-             ['smtp_user', None, 'string'],

-             ['smtp_pass', None, 'string'],

+         ['smtp_user', None, 'string'],

+         ['smtp_pass', None, 'string'],

          ['from_addr', None, 'string'],

          ['email_template', None, 'string'],

          ['email_domain', None, 'string'],
@@ -161,9 +163,9 @@ 

          ['trashcan_tag', None, 'string'],

          ['no_ssl_verify', None, 'boolean'],

          ['timeout', None, 'integer'],

-             ['lock_file', None, 'string'],

-             ['exit_on_lock', None, 'boolean'],

-         ]

+         ['lock_file', None, 'string'],

+         ['exit_on_lock', None, 'boolean'],

+     ]

      for name, alias, type in cfgmap:

          if alias is None:

              alias = ('main', name)
@@ -176,27 +178,27 @@ 

                  setattr(defaults, name, config.getboolean(*alias))

              else:

                  setattr(defaults, name, config.get(*alias))

-     #parse again with defaults

+     # parse again with defaults

      (options, args) = parser.parse_args(values=defaults)

      options.config = config

  

-     #figure out actions

+     # figure out actions

      actions = ('prune', 'trash', 'delete', 'salvage')

      if options.action:

-         options.action = options.action.lower().replace(',',' ').split()

+         options.action = options.action.lower().replace(',', ' ').split()

          for x in options.action:

              if x not in actions:

                  parser.error(_("Invalid action: %s") % x)

      else:

          options.action = ('delete', 'prune', 'trash')

  

-     #split patterns for unprotected keys

+     # split patterns for unprotected keys

      if options.unprotected_keys:

-         options.unprotected_key_patterns = options.unprotected_keys.replace(',',' ').split()

+         options.unprotected_key_patterns = options.unprotected_keys.replace(',', ' ').split()

      else:

          options.unprotected_key_patterns = []

  

-     #parse key aliases

+     # parse key aliases

      options.key_aliases = {}

      try:

          if config.has_option('main', 'key_aliases'):
@@ -207,9 +209,9 @@ 

                  options.key_aliases[parts[0].upper()] = parts[1]

      except ValueError as e:

          print(e)

-         parser.error(_("Invalid key alias data in config: %s") % config.get('main','key_aliases'))

+         parser.error(_("Invalid key alias data in config: %s") % config.get('main', 'key_aliases'))

  

-     #parse time intervals

+     # parse time intervals

      for key in ('delay', 'grace_period'):

          try:

              value = getattr(options, key)
@@ -237,6 +239,7 @@ 

  

      return options, args

  

+ 

  def check_tag(name):

      """Check tag name against options and determine if we should process it

  
@@ -251,12 +254,13 @@ 

          for pattern in options.tag_filter:

              if fnmatch.fnmatch(name, pattern):

                  return True

-         #doesn't match any pattern in filter

+         # doesn't match any pattern in filter

          return False

      else:

-         #not ignored and no filter specified

+         # not ignored and no filter specified

          return True

  

+ 

  def check_package(name):

      """Check package name against options and determine if we should process it

  
@@ -266,32 +270,36 @@ 

          for pattern in options.pkg_filter:

              if fnmatch.fnmatch(name, pattern):

                  return True

-         #doesn't match any pattern in filter

+         # doesn't match any pattern in filter

          return False

      else:

-         #no filter specified

+         # no filter specified

          return True

  

+ 

  time_units = {

-     'second' : 1,

-     'minute' : 60,

-     'hour' : 3600,

-     'day' : 86400,

-     'week' : 604800,

+     'second': 1,

+     'minute': 60,

+     'hour': 3600,

+     'day': 86400,

+     'week': 604800,

  }

  time_unit_aliases = [

-     #[unit, alias, alias, ...]

+     # [unit, alias, alias, ...]

      ['week', 'weeks', 'wk', 'wks'],

      ['hour', 'hours', 'hr', 'hrs'],

      ['day', 'days'],

      ['minute', 'minutes', 'min', 'mins'],

      ['second', 'seconds', 'sec', 'secs', 's'],

  ]

+ 

+ 

  def parse_duration(str):

      """Parse time duration from string, returns duration in seconds"""

      ret = 0

      n = None

      unit = None

+ 

      def parse_num(s):

          try:

              return int(s)
@@ -307,7 +315,7 @@ 

              n = parse_num(x)

              if n is not None:

                  continue

-             #perhaps the unit is appended w/o a space

+             # perhaps the unit is appended w/o a space

              for names in time_unit_aliases:

                  for name in names:

                      if x.endswith(name):
@@ -337,23 +345,28 @@ 

          unit = None

      return ret

  

+ 

  def error(msg=None, code=1):

      if msg:

          sys.stderr.write(msg + "\n")

          sys.stderr.flush()

      sys.exit(code)

  

+ 

  def warn(msg):

      sys.stderr.write(msg + "\n")

      sys.stderr.flush()

  

+ 

  def ensure_connection(session):

      try:

          ret = session.getAPIVersion()

      except requests.exceptions.ConnectionError:

          error(_("Error: Unable to connect to server"))

      if ret != koji.API_VERSION:

-         warn(_("WARNING: The server is at API version %d and the client is at %d" % (ret, koji.API_VERSION)))

+         warn(_("WARNING: The server is at API version %d and the client is at %d" %

+                (ret, koji.API_VERSION)))

+ 

  

  def has_krb_creds():

      if krbV is None:
@@ -366,22 +379,24 @@ 

      except krbV.Krb5Error:

          return False

  

+ 

  def activate_session(session):

      """Test and login the session is applicable"""

      global options

      if options.noauth:

-         #skip authentication

+         # skip authentication

          pass

      elif options.cert is not None and os.path.isfile(options.cert):

          # authenticate using SSL client cert

          session.ssl_login(options.cert, None, options.serverca, proxyuser=options.runas)

      elif options.user:

-         #authenticate using user/password

+         # authenticate using user/password

          session.login()

      elif has_krb_creds() or (options.keytab and options.principal):

          try:

              if options.keytab and options.principal:

-                 session.krb_login(principal=options.principal, keytab=options.keytab, proxyuser=options.runas)

+                 session.krb_login(principal=options.principal, keytab=options.keytab,

+                                   proxyuser=options.runas)

              else:

                  session.krb_login(proxyuser=options.runas)

          except krbV.Krb5Error as e:
@@ -394,6 +409,7 @@ 

      if options.debug:

          print("successfully connected to hub")

  

+ 

  def send_warning_notice(owner_name, builds):

      if not options.mail:

          return
@@ -404,14 +420,14 @@ 

      with open(options.email_template, 'r') as f:

          tpl = Template(f.read())

  

-     fmt="""\

+     fmt = """\

  Build: %%(name)s-%%(version)s-%%(release)s

  %s/buildinfo?buildID=%%(id)i""" % options.weburl

      middle = '\n\n'.join([fmt % b for b in builds])

  

      msg = MIMEText.MIMEText(tpl.safe_substitute(

-         owner = owner_name,

-         builds = middle,

+         owner=owner_name,

+         builds=middle,

      ))

  

      if len(builds) == 1:
@@ -419,7 +435,7 @@ 

      else:

          msg['Subject'] = "%i builds marked for deletion" % len(builds)

      msg['From'] = options.from_addr

-     msg['To'] = "%s@%s" % (owner_name, options.email_domain)  #XXX!

+     msg['To'] = "%s@%s" % (owner_name, options.email_domain)  # XXX!

      msg['X-Koji-Builder'] = owner_name

      if options.test:

          if options.debug:
@@ -435,7 +451,7 @@ 

                  s.login(options.smtp_user, options.smtp_pass)

              s.sendmail(msg['From'], msg['To'], msg.as_string())

              s.quit()

-         except:

+         except Exception:

              print("FAILED: Sending warning notice to %s" % msg['To'])

  

  
@@ -451,7 +467,7 @@ 

      print("...got %i builds" % len(untagged))

      min_age = options.delay

      trashcan_tag = options.trashcan_tag

-     #Step 1: place unreferenced builds into trashcan

+     # Step 1: place unreferenced builds into trashcan

      i = 0

      N = len(untagged)

      to_trash = []
@@ -476,7 +492,7 @@ 

      for binfo, [refs] in six.moves.zip(continuing, mcall.call_all()):

          i += 1

          nvr = binfo['nvr']

-         #XXX - this is more data than we need

+         # XXX - this is more data than we need

          #      also, this call takes waaaay longer than it should

          if refs.get('tags'):

              # must have been tagged just now
@@ -485,12 +501,13 @@ 

          if refs.get('rpms'):

              if options.debug:

                  print("[%i/%i] Build has %i rpm references: %s" % (i, N, len(refs['rpms']), nvr))

-                 #pprint.pprint(refs['rpms'])

+                 # pprint.pprint(refs['rpms'])

              continue

          if refs.get('archives'):

              if options.debug:

-                 print("[%i/%i] Build has %i archive references: %s" % (i, N, len(refs['archives']), nvr))

-                 #pprint.pprint(refs['archives'])

+                 print("[%i/%i] Build has %i archive references: %s" %

+                       (i, N, len(refs['archives']), nvr))

+                 # pprint.pprint(refs['archives'])

              continue

          if refs.get('component_of'):

              if options.debug:
@@ -498,22 +515,22 @@ 

              continue

          ts = refs['last_used']

          if ts:

-             #work around server bug

+             # work around server bug

              if isinstance(ts, list):

                  ts = ts[0]

-             #XXX - should really check time server side

+             # XXX - should really check time server side

              if options.debug:

                  print("[%i/%i] Build has been used in a buildroot: %s" % (i, N, nvr))

                  print("Last_used: %s" % datetime.datetime.fromtimestamp(ts).isoformat())

              age = time.time() - ts

              if age < min_age:

                  continue

-         #see how long build has been untagged

+         # see how long build has been untagged

          history = session.queryHistory(build=binfo['id'])['tag_listing']

          age = None

          binfo2 = None

          if not history:

-             #never tagged, we'll have to use the build create time

+             # never tagged, we'll have to use the build create time

              binfo2 = session.getBuild(binfo['id'])

              ts = binfo2.get('creation_ts')

              if ts is None:
@@ -527,10 +544,10 @@ 

              else:

                  age = time.time() - ts

          else:

-             history = [(h['revoke_event'],h) for h in history]

+             history = [(h['revoke_event'], h) for h in history]

              last = max(history)[1]

              if not last['revoke_event']:

-                 #this might happen if the build was tagged just now

+                 # this might happen if the build was tagged just now

                  print("[%i/%i] Warning: build not untagged: %s" % (i, N, nvr))

                  continue

              age = time.time() - last['revoke_ts']
@@ -538,7 +555,7 @@ 

              if options.debug:

                  print("[%i/%i] Build untagged only recently: %s" % (i, N, nvr))

              continue

-         #check build signatures

+         # check build signatures

          keys = get_build_sigs(binfo['id'], cache=True)

          if keys and options.debug:

              print("Build: %s, Keys: %s" % (nvr, keys))
@@ -546,23 +563,21 @@ 

              print("Skipping build %s. Keys: %s" % (nvr, keys))

              continue

  

-         #ok, go ahead add it to the list

+         # ok, go ahead add it to the list

          if binfo2 is None:

              binfo2 = session.getBuild(binfo['id'])

          print("[%i/%i] Adding build to trash list: %s" % (i, N, nvr))

          to_trash.append(binfo2)

  

-     #process to_trash

-     #group by owner so we can reduce the number of notices

+     # process to_trash

+     # group by owner so we can reduce the number of notices

      by_owner = {}

      for binfo in to_trash:

          by_owner.setdefault(binfo['owner_name'], []).append(binfo)

-     owners = to_list(by_owner.keys())

-     owners.sort()

+     owners = sorted(to_list(by_owner.keys()))

      mcall = koji.MultiCallSession(session, batch=1000)

      for owner_name in owners:

-         builds = [(b['nvr'], b) for b in by_owner[owner_name]]

-         builds.sort()

+         builds = sorted([(b['nvr'], b) for b in by_owner[owner_name]])

          send_warning_notice(owner_name, [x[1] for x in builds])

          for nvr, binfo in builds:

              if options.test:
@@ -570,14 +585,14 @@ 

              else:

                  if options.debug:

                      print("Moving to trashcan: %s" % nvr)

-                 #figure out package owner

+                 # figure out package owner

                  count = {}

                  for pkg in session.listPackages(pkgID=binfo['name']):

                      count.setdefault(pkg['owner_id'], 0)

                      count[pkg['owner_id']] += 1

                  if not count:

                      print("Warning: no owner for %s, using build owner" % nvr)

-                     #best we can do currently

+                     # best we can do currently

                      owner = binfo['owner_id']

                  else:

                      owner = max([(n, k) for k, n in six.iteritems(count)])[1]
@@ -586,6 +601,7 @@ 

      # run all packageListAdd/tagBuildBypass finally

      mcall.call_all()

  

+ 

  def protected_sig(keys):

      """Check list of keys and see if any are protected

  
@@ -596,7 +612,7 @@ 

          if not key:

              continue

          if not sigmatch(key, options.unprotected_key_patterns):

-             #this key is protected

+             # this key is protected

              return True

      return False

  
@@ -611,6 +627,7 @@ 

      run this action only."""

      return handle_delete(just_salvage=True)

  

+ 

  def salvage_build(binfo):

      """Removes trashcan tag from a build and prints a message"""

      if options.test:
@@ -620,6 +637,7 @@ 

              print("Untagging from trashcan: %(nvr)s" % binfo)

          session.untagBuildBypass(options.trashcan_tag, binfo['id'], force=True)

  

+ 

  def handle_delete(just_salvage=False):

      """Delete builds that have been in the trashcan for long enough

  
@@ -629,10 +647,9 @@ 

      """

      print("Getting list of builds in trash...")

      trashcan_tag = options.trashcan_tag

-     trash = [(b['nvr'], b) for b in session.listTagged(trashcan_tag)]

-     trash.sort()

+     trash = sorted([(b['nvr'], b) for b in session.listTagged(trashcan_tag)])

      print("...got %i builds" % len(trash))

-     #XXX - it would be better if there were more appropriate server calls for this

+     # XXX - it would be better if there were more appropriate server calls for this

      grace_period = options.grace_period

      import time

  
@@ -684,12 +701,12 @@ 

      for (nvr, binfo), [history] in zip(trash, mcall.call_all()):

          current = [x for x in history if x['active']]

          if not current:

-             #untagged just now?

+             # untagged just now?

              print("Warning: history missing for %s" % nvr)

              pprint.pprint(binfo)

              pprint.pprint(history)

              continue

-         assert len(current) == 1   #see db constraint

+         assert len(current) == 1  # see db constraint

          current = current[0]

          age = time.time() - current['create_ts']

          if age < grace_period:
@@ -704,14 +721,14 @@ 

          if options.test:

              print("Would have deleted build from trashcan: %s" % binfo['nvr'])

          else:

-             print("Deleting build: %s"  % binfo['nvr'])

-             mcall.untagBuildBypass(trashcan_tag,  binfo['id'])

+             print("Deleting build: %s" % binfo['nvr'])

+             mcall.untagBuildBypass(trashcan_tag, binfo['id'])

              mcall.deleteBuild(binfo['id'])

  

      for binfo, result in six.moves.zip(continuing, mcall.call_all()):

          if isinstance(result, dict):

              print("Warning: deletion failed: %s" % result['faultString'])

-             #TODO - log details for delete failures

+             # TODO - log details for delete failures

  

  

  class TagPruneTest(koji.policy.MatchTest):
@@ -792,6 +809,7 @@ 

      fo.close()

      return ret

  

+ 

  def scan_policies(str):

      """Read tag gc policies from a string

  
@@ -801,8 +819,10 @@ 

      tests = koji.policy.findSimpleTests(globals())

      return koji.policy.SimpleRuleSet(str.splitlines(), tests)

  

+ 

  build_sig_cache = {}

  

+ 

  def get_build_sigs(build, cache=False):

      if cache and build in build_sig_cache:

          return build_sig_cache[build]
@@ -813,7 +833,7 @@ 

          ret = build_sig_cache[build] = []

          return ret

      else:

-         #TODO - multicall helps, but it might be good to have a more robust server-side call

+         # TODO - multicall helps, but it might be good to have a more robust server-side call

          session.multicall = True

          for rpminfo in rpms:

              session.queryRPMSigs(rpm_id=rpminfo['id'])
@@ -824,23 +844,24 @@ 

      ret = build_sig_cache[build] = to_list(keys.keys())

      return ret

  

+ 

  def handle_prune():

      """Untag old builds according to policy

  

      If purge is True, will also attempt to delete the pruned builds afterwards

      """

-     #read policy

+     # read policy

      if not options.config or not options.config.has_option('prune', 'policy'):

          print("Skipping prune step. No policies available.")

          return

-     #policies = read_policies(options.policy_file)

+     # policies = read_policies(options.policy_file)

      policies = scan_policies(options.config.get('prune', 'policy'))

      for action in policies.all_actions():

          if action not in ("keep", "untag", "skip"):

              raise Exception("Invalid action: %s" % action)

      if options.debug:

          pprint.pprint(policies.ruleset)

-     #get tags

+     # get tags

      tags = session.listTags(perms=False, queryOpts={'order': 'name'})

      untagged = {}

      build_ids = {}
@@ -851,7 +872,7 @@ 

                  print("Skipping trashcan tag: %s" % tagname)

              continue

          if not check_tag(tagname):

-             #if options.debug:

+             # if options.debug:

              #    print("skipping tag due to filter: %s" % tagname)

              continue

          bypass = False
@@ -869,7 +890,7 @@ 

                  continue

          if options.debug:

              print("Pruning tag: %s" % tagname)

-         #get builds

+         # get builds

          history = session.queryHistory(tag=tagname, active=True)['tag_listing']

          if not history:

              if options.debug:
@@ -881,30 +902,29 @@ 

                  pkghist.setdefault(h['name'] + '-' + h['version'], []).append(h)

              else:

                  pkghist.setdefault(h['name'], []).append(h)

-         pkgs = to_list(pkghist.keys())

-         pkgs.sort()

+         pkgs = sorted(to_list(pkghist.keys()))

          for pkg in pkgs:

              if not check_package(pkg):

-                 #if options.debug:

+                 # if options.debug:

                  #    print("skipping package due to filter: %s" % pkg)

                  continue

              if options.debug:

                  print(pkg)

              hist = pkghist[pkg]

-             #these are the *active* history entries for tag/pkg

+             # these are the *active* history entries for tag/pkg

              skipped = 0

              for order, entry in enumerate(hist):

                  # get sig data

                  nvr = "%(name)s-%(version)s-%(release)s" % entry

                  data = {

-                     'tagname' : tagname,

-                     'pkgname' : pkg,

+                     'tagname': tagname,

+                     'pkgname': pkg,

                      'order': order - skipped,

-                     'ts' : entry['create_ts'],

-                     'nvr' : nvr,

-                     }

+                     'ts': entry['create_ts'],

+                     'nvr': nvr,

+                 }

                  data = LazyDict(data)

-                 data['keys'] = LazyValue(get_build_sigs, (entry['build_id'],), {'cache':True})

+                 data['keys'] = LazyValue(get_build_sigs, (entry['build_id'],), {'cache': True})

                  data['volname'] = LazyValue(lambda x: session.getBuild(x).get('volume_name'),

                                              (entry['build_id'],), cache=True)

                  build_ids[nvr] = entry['build_id']
@@ -922,9 +942,10 @@ 

                          print("Would have untagged %s from %s" % (nvr, tagname))

                          untagged.setdefault(nvr, {})[tagname] = 1

                      else:

-                         print("Untagging build %s from %s"  % (nvr, tagname))

+                         print("Untagging build %s from %s" % (nvr, tagname))

                          try:

-                             session.untagBuildBypass(taginfo['id'], entry['build_id'], force=bypass)

+                             session.untagBuildBypass(taginfo['id'], entry['build_id'],

+                                                      force=bypass)

                              untagged.setdefault(nvr, {})[tagname] = 1

                          except (six.moves.xmlrpc_client.Fault, koji.GenericError) as e:

                              print("Warning: untag operation failed: %s" % e)
@@ -934,21 +955,21 @@ 

          print("Attempting to purge %i builds" % len(untagged))

          for nvr in untagged:

              build_id = build_ids[nvr]

-             tags = [t['name'] for t in  session.listTags(build_id, perms=False)]

+             tags = [t['name'] for t in session.listTags(build_id, perms=False)]

              if options.test:

-                 #filted out the tags we would have dropped above

+                 # filted out the tags we would have dropped above

                  tags = [t for t in tags if t not in untagged[nvr]]

              if tags:

-                 #still tagged somewhere

+                 # still tagged somewhere

                  print("Skipping %s, still tagged: %s" % (nvr, tags))

                  continue

-             #check cached sigs first to save a little time

+             # check cached sigs first to save a little time

              if build_id in build_sig_cache:

                  keys = build_sig_cache[build_id]

                  if protected_sig(keys):

                      print("Skipping %s, signatures: %s" % (nvr, keys))

                      continue

-             #recheck signatures in case build was signed during run

+             # recheck signatures in case build was signed during run

              keys = get_build_sigs(build_id, cache=False)

              if protected_sig(keys):

                  print("Skipping %s, signatures: %s" % (nvr, keys))
@@ -957,14 +978,15 @@ 

              if options.test:

                  print("Would have deleted build: %s" % nvr)

              else:

-                 print("Deleting untagged build: %s"  % nvr)

+                 print("Deleting untagged build: %s" % nvr)

                  try:

                      session.deleteBuild(build_id, strict=False)

                  except (six.moves.xmlrpc_client.Fault, koji.GenericError) as e:

                      print("Warning: deletion failed: %s" % e)

-                     #server issue

+                     # server issue

                      pass

  

+ 

  if __name__ == "__main__":

  

      options, args = get_options()
@@ -988,7 +1010,7 @@ 

                  if options.exit_on_lock:

                      try:

                          session.logout()

-                     except:

+                     except Exception:

                          pass

                      sys.exit(1)

              os.close(lock_fd)
@@ -1010,7 +1032,7 @@ 

          pass

      except SystemExit:

          rv = 1

-     #except:

+     # except:

      #    if options.debug:

      #        raise

      #    else:
@@ -1019,7 +1041,7 @@ 

      #        print("%s: %s" % (exctype, value))

      try:

          session.logout()

-     except:

+     except Exception:

          pass

      if not options.skip_main:

          sys.exit(rv)

file modified
+261 -260
@@ -51,7 +51,7 @@ 

  

  # koji.fp.o keeps stalling, probably network errors...

  # better to time out than to stall

- socket.setdefaulttimeout(180)  #XXX - too short?

+ socket.setdefaulttimeout(180)  # XXX - too short?

  

  logfile = None

  
@@ -60,16 +60,19 @@ 

      """Stub function for translation"""

      return args

  

+ 

  def log(str):

      global logfile

      print("%s" % str)

      if logfile is not None:

          os.write(logfile, "%s\n" % str)

  

+ 

  class SubOption(object):

      """A simple container to help with tracking ConfigParser data"""

      pass

  

+ 

  def get_options():

      """process options from command line and config file"""

  
@@ -82,7 +85,7 @@ 

      parser.add_option("--krbservice", help=_("the service name of the"

                                               " principal being used by the hub"))

      parser.add_option("--runas", metavar="USER",

-              help=_("run as the specified user (requires special privileges)"))

+                       help=_("run as the specified user (requires special privileges)"))

      parser.add_option("--user", help=_("specify user"))

      parser.add_option("--password", help=_("specify password"))

      parser.add_option("--krb-rdns", action="store_true", default=False,
@@ -142,13 +145,15 @@ 

      parser.add_option("--rules-ignorelist",

                        help=_("Rules: list of packages to ignore"))

      parser.add_option("--rules-excludelist",

-                       help=_("Rules: list of packages to are excluded using ExcludeArch or ExclusiveArch"))

+                       help=_("Rules: list of packages to are excluded using ExcludeArch or "

+                              "ExclusiveArch"))

      parser.add_option("--rules-includelist",

                        help=_("Rules: list of packages to always include"))

      parser.add_option("--rules-protectlist",

                        help=_("Rules: list of package names to never replace"))

      parser.add_option("--tag-build", action="store_true", default=False,

-                       help=_("tag successful builds into the tag we are building, default is to not tag"))

+                       help=_("tag successful builds into the tag we are building, default is to "

+                              "not tag"))

      parser.add_option("--logfile",

                        help=_("file where everything gets logged"))

      parser.add_option("--arches",
@@ -156,14 +161,14 @@ 

      parser.add_option("--priority", type="int", default=5,

                        help=_("priority to set for submitted builds"))

  

-     #parse once to get the config file

+     # parse once to get the config file

      (options, args) = parser.parse_args()

  

      defaults = parser.get_default_values()

      cf = getattr(options, 'config_file', '/etc/koji-shadow/koji-shadow.conf')

      config = koji.read_config_files(cf)

  

-     #allow config file to update defaults

+     # allow config file to update defaults

      for opt in parser.option_list:

          if not opt.dest:

              continue
@@ -182,50 +187,37 @@ 

              else:

                  log(config.get(*alias))

                  setattr(defaults, name, config.get(*alias))

-     #config file options without a cmdline equivalent

-     otheropts = [

-         #name, type, default

-         ['keytab', None, 'string'],

-         ['principal', None, 'string'],

-         ['runas', None, 'string'],

-         ['user', None, 'string'],

-         ['password', None, 'string'],

-         ['noauth', None, 'boolean'],

-         ['server', None, 'string'],

-         ['remote', None, 'string'],

-         ['max_jobs', None, 'int'],

-         ['serverca', None, 'string'],

-         ['auth_cert', None, 'string'],

-         ['arches', None, 'string'],

-         ]

- 

- 

-     #parse again with updated defaults

+ 

+     # parse again with updated defaults

      (options, args) = parser.parse_args(values=defaults)

      options.config = config

  

      return options, args

  

+ 

  time_units = {

-     'second' : 1,

-     'minute' : 60,

-     'hour' : 3600,

-     'day' : 86400,

-     'week' : 604800,

+     'second': 1,

+     'minute': 60,

+     'hour': 3600,

+     'day': 86400,

+     'week': 604800,

  }

  time_unit_aliases = [

-     #[unit, alias, alias, ...]

+     # [unit, alias, alias, ...]

      ['week', 'weeks', 'wk', 'wks'],

      ['hour', 'hours', 'hr', 'hrs'],

      ['day', 'days'],

      ['minute', 'minutes', 'min', 'mins'],

      ['second', 'seconds', 'sec', 'secs', 's'],

  ]

+ 

+ 

  def parse_duration(str):

      """Parse time duration from string, returns duration in seconds"""

      ret = 0

      n = None

      unit = None

+ 

      def parse_num(s):

          try:

              return int(s)
@@ -241,7 +233,7 @@ 

              n = parse_num(x)

              if n is not None:

                  continue

-             #perhaps the unit is appended w/o a space

+             # perhaps the unit is appended w/o a space

              for names in time_unit_aliases:

                  for name in names:

                      if x.endswith(name):
@@ -271,16 +263,19 @@ 

          unit = None

      return ret

  

+ 

  def error(msg=None, code=1):

      if msg:

          sys.stderr.write(msg + "\n")

          sys.stderr.flush()

      sys.exit(code)

  

+ 

  def warn(msg):

      sys.stderr.write(msg + "\n")

      sys.stderr.flush()

  

+ 

  def ensure_connection(session):

      try:

          ret = session.getAPIVersion()
@@ -290,28 +285,31 @@ 

          warn(_("WARNING: The server is at API version %d and the client is at "

                 "%d" % (ret, koji.API_VERSION)))

  

+ 

  def activate_session(session):

      """Test and login the session is applicable"""

      global options

  

      if options.noauth:

-         #skip authentication

+         # skip authentication

          pass

      elif options.auth_cert and options.serverca:

          # convert to absolute paths

-         options.auth_cert  = os.path.expanduser(options.auth_cert)

-         options.serverca  = os.path.expanduser(options.serverca)

+         options.auth_cert = os.path.expanduser(options.auth_cert)

+         options.serverca = os.path.expanduser(options.serverca)

  

          if os.path.isfile(options.auth_cert):

              # authenticate using SSL client cert

-             session.ssl_login(cert=options.auth_cert, serverca=options.serverca, proxyuser=options.runas)

+             session.ssl_login(cert=options.auth_cert, serverca=options.serverca,

+                               proxyuser=options.runas)

      elif options.user:

-         #authenticate using user/password

+         # authenticate using user/password

          session.login()

      elif krbV:

          try:

              if options.keytab and options.principal:

-                 session.krb_login(principal=options.principal, keytab=options.keytab, proxyuser=options.runas)

+                 session.krb_login(principal=options.principal, keytab=options.keytab,

+                                   proxyuser=options.runas)

              else:

                  session.krb_login(proxyuser=options.runas)

          except krbV.Krb5Error as e:
@@ -324,6 +322,7 @@ 

      if options.debug:

          log("successfully connected to hub")

  

+ 

  def _unique_path(prefix):

      """Create a unique path fragment by appending a path component

      to prefix.  The path component will consist of a string of letter and numbers
@@ -333,7 +332,7 @@ 

      # For some reason repr(time.time()) includes 4 or 5

      # more digits of precision than str(time.time())

      return '%s/%r.%s' % (prefix, time.time(),

-                       ''.join([random.choice(string.ascii_letters) for i in range(8)]))

+                          ''.join([random.choice(string.ascii_letters) for i in range(8)]))

  

  

  class LocalBuild(object):
@@ -364,9 +363,9 @@ 

          self.order = 0

          self.substitute = None

          if child is not None:

-             #children tracks the builds that were built using this one

+             # children tracks the builds that were built using this one

              self.children[child] = 1

-         #see if we have it

+         # see if we have it

          self.rebuilt = False

          self.updateState()

          if self.state == 'missing':
@@ -375,7 +374,7 @@ 

                  if rinfo['arch'] == 'src':

                      self.srpm = rinfo

              self.getExtraArches()

-             self.getDeps() #sets deps, br_tag, base, order, (maybe state)

+             self.getDeps()  # sets deps, br_tag, base, order, (maybe state)

  

      def updateState(self):

          """Update state from local hub
@@ -391,7 +390,7 @@ 

                      self.rebuilt = True

                  return

              elif state in ('FAILED', 'CANCELED'):

-                 #treat these as having no build

+                 # treat these as having no build

                  pass

              elif state == 'BUILDING' and ours['task_id']:

                  self.setState("pending")
@@ -409,14 +408,14 @@ 

          noarch = False

          for rpminfo in self.rpms:

              if rpminfo['arch'] == 'noarch':

-                 #note that we've seen a noarch rpm

+                 # note that we've seen a noarch rpm

                  noarch = True

              elif rpminfo['arch'] != 'src':

                  return False

          return noarch

  

      def setState(self, state):

-         #log("%s -> %s" % (self.nvr, state))

+         # log("%s -> %s" % (self.nvr, state))

          if state == self.state:

              return

          if self.state is not None and self.tracker:
@@ -428,11 +427,11 @@ 

      def getSource(self):

          """Get source from remote"""

          if options.remote_topurl and self.srpm:

-             #download srpm from remote

+             # download srpm from remote

              pathinfo = koji.PathInfo(options.remote_topurl)

              url = "%s/%s" % (pathinfo.build(self.info), pathinfo.rpm(self.srpm))

              log("Downloading %s" % url)

-             #XXX - this is not really the right place for this

+             # XXX - this is not really the right place for this

              fsrc = urllib2.urlopen(url)

              fn = "%s/%s.src.rpm" % (options.workpath, self.nvr)

              koji.ensuredir(os.path.dirname(fn))
@@ -444,7 +443,7 @@ 

              session.uploadWrapper(fn, serverdir, blocksize=65536)

              src = "%s/%s" % (serverdir, os.path.basename(fn))

              return src

-         #otherwise use SCM url

+         # otherwise use SCM url

          task_id = self.info['task_id']

          if task_id:

              tinfo = remote.getTaskInfo(task_id)
@@ -452,12 +451,12 @@ 

                  try:

                      request = remote.getTaskRequest(task_id)

                      src = request[0]

-                     #XXX - Move SCM class out of kojid and use it to check for scm url

+                     # XXX - Move SCM class out of kojid and use it to check for scm url

                      if src.startswith('cvs:'):

                          return src

-                 except:

+                 except Exception:

                      pass

-         #otherwise fail

+         # otherwise fail

          return None

  

      def addChild(self, child):
@@ -492,28 +491,28 @@ 

              return

          buildroots.sort()

          self.order = buildroots[-1]

-         seen = {}       #used to avoid scanning the same buildroot twice

-         builds = {}     #track which builds we need for a rebuild

-         bases = {}      #track base install for buildroots

-         tags = {}       #track buildroot tag(s)

+         seen = {}  # used to avoid scanning the same buildroot twice

+         builds = {}  # track which builds we need for a rebuild

+         bases = {}  # track base install for buildroots

+         tags = {}  # track buildroot tag(s)

          remote.multicall = True

          unpack = []

          for br_id in buildroots:

              if br_id in seen:

                  continue

              seen[br_id] = 1

-             #br_info = remote.getBuildroot(br_id, strict=True)

+             # br_info = remote.getBuildroot(br_id, strict=True)

              remote.getBuildroot(br_id, strict=True)

              unpack.append(('br_info', br_id))

-             #tags.setdefault(br_info['tag_name'], 0)

-             #tags[br_info['tag_name']] += 1

-             #print(".")

+             # tags.setdefault(br_info['tag_name'], 0)

+             # tags[br_info['tag_name']] += 1

+             # print(".")

              remote.listRPMs(componentBuildrootID=br_id)

              unpack.append(('rpmlist', br_id))

-             #for rinfo in remote.listRPMs(componentBuildrootID=br_id):

-             #    builds[rinfo['build_id']] = 1

-             #    if not rinfo['is_update']:

-             #        bases.setdefault(rinfo['name'], {})[br_id] = 1

+             # for rinfo in remote.listRPMs(componentBuildrootID=br_id):

+             #     builds[rinfo['build_id']] = 1

+             #     if not rinfo['is_update']:

+             #         bases.setdefault(rinfo['name'], {})[br_id] = 1

          for (dtype, br_id), data in zip(unpack, remote.multiCall()):

              if dtype == 'br_info':

                  [br_info] = data
@@ -533,25 +532,27 @@ 

          #        repo and others the new one.

          base = []

          for name, brlist in six.iteritems(bases):

-             #We want to determine for each name if that package was present

-             #in /all/ the buildroots or just some.

-             #Because brlist is constructed only from elements of buildroots, we

-             #can simply check the length

+             # We want to determine for each name if that package was present

+             # in /all/ the buildroots or just some.

+             # Because brlist is constructed only from elements of buildroots, we

+             # can simply check the length

              assert len(brlist) <= len(buildroots)

              if len(brlist) == len(buildroots):

-                 #each buildroot had this as a base package

+                 # each buildroot had this as a base package

                  base.append(name)

          if len(tags) > 1:

-             log("Warning: found multiple buildroot tags for %s: %s" % (self.nvr, to_list(tags.keys())))

+             log("Warning: found multiple buildroot tags for %s: %s" %

+                 (self.nvr, to_list(tags.keys())))

              counts = sorted([(n, tag) for tag, n in six.iteritems(tags)])

              tag = counts[-1][1]

          else:

              tag = to_list(tags.keys())[0]

-         # due bugs in used tools mainline koji instance could store empty buildroot infos for builds

+         # due bugs in used tools mainline koji instance could store empty buildroot infos for

+         # builds

          if len(builds) == 0:

              self.setState("noroot")

          self.deps = builds

-         self.revised_deps = None #BuildTracker will set this later

+         self.revised_deps = None  # BuildTracker will set this later

          self.br_tag = tag

          self.base = base

  
@@ -564,7 +565,7 @@ 

          self.state_idx = {}

          self.nvr_idx = {}

          for state in ('common', 'pending', 'missing', 'broken', 'brokendeps',

-                      'noroot', 'blocked', 'grey'):

+                       'noroot', 'blocked', 'grey'):

              self.state_idx.setdefault(state, {})

          self.scanRules()

  
@@ -603,18 +604,18 @@ 

          self.ignorelist = self.ignorelist + self.excludelist

  

          if options.config.has_option('rules', 'substitutions'):

-             #At present this is a simple multi-line format

-             #one substitution per line

-             #format:

+             # At present this is a simple multi-line format

+             # one substitution per line

+             # format:

              #  missing-build  build-to-substitute

-             #TODO: allow more robust substitutions

+             # TODO: allow more robust substitutions

              for line in options.config.get('rules', 'substitutions').splitlines():

                  line = line.strip()

                  if line[:1] == "#":

-                     #skip comment

+                     # skip comment

                      continue

                  if not line:

-                     #blank

+                     # blank

                      continue

                  data = line.split()

                  if len(data) != 2:
@@ -650,17 +651,18 @@ 

          """find out which build is newer"""

          rc = rpm.labelCompare(nvr1, nvr2)

          if rc == 1:

-             #first evr wins

+             # first evr wins

              return 1

          elif rc == 0:

-             #same evr

+             # same evr

              return 0

          else:

-             #second evr wins

+             # second evr wins

              return -1

  

      def newerBuild(self, build, tag):

-         #XXX:  secondary arches need a policy to say if we have newer build localy it will be the substitute

+         # XXX: secondary arches need a policy to say if we have newer build localy it will be the

+         # substitute

          localBuilds = session.listTagged(tag, inherit=True, package=str(build.name))

          newer = None

          parentevr = (str(build.epoch), build.version, build.release)
@@ -669,14 +671,16 @@ 

              latestevr = (str(b['epoch']), b['version'], b['release'])

              newestRPM = self.rpmvercmp(parentevr, latestevr)

              if options.debug:

-                 log("remote evr: %s  \nlocal evr: %s \nResult: %s" % (parentevr, latestevr, newestRPM))

+                 log("remote evr: %s  \nlocal evr: %s \nResult: %s" %

+                     (parentevr, latestevr, newestRPM))

              if newestRPM == -1:

                  newer = b

              else:

                  break

-         #the local is newer

+         # the local is newer

          if newer is not None:

-             info = session.getBuild("%s-%s-%s" % (str(newer['name']), newer['version'], newer['release']))

+             info = session.getBuild("%s-%s-%s" %

+                                     (str(newer['name']), newer['version'], newer['release']))

              if info:

                  build = LocalBuild(info)

                  self.substitute_idx[parentnvr] = build
@@ -686,16 +690,16 @@ 

      def getSubstitute(self, nvr):

          build = self.substitute_idx.get(nvr)

          if not build:

-             #see if remote has it

+             # see if remote has it

              info = remote.getBuild(nvr)

              if info:

-                 #see if we're already tracking it

+                 # see if we're already tracking it

                  build = self.builds.get(info['id'])

                  if not build:

                      build = TrackedBuild(info['id'], tracker=self)

              else:

-                 #remote doesn't have it

-                 #see if we have it locally

+                 # remote doesn't have it

+                 # see if we have it locally

                  info = session.getBuild(nvr)

                  if info:

                      build = LocalBuild(info)
@@ -706,13 +710,13 @@ 

  

      def scanBuild(self, build_id, from_build=None, depth=0, tag=None):

          """Recursively scan a build and its dependencies"""

-         #print build_id

+         # print build_id

          build = self.builds.get(build_id)

          if build:

-             #already scanned

+             # already scanned

              if from_build:

                  build.addChild(from_build.id)

-             #There are situations where, we'll need to go forward anyway:

+             # There are situations where, we'll need to go forward anyway:

              # - if we were greylisted before, and depth > 0 now

              # - if we're being substituted and depth is 0

              if not (depth > 0 and build.state == 'grey') \
@@ -731,52 +735,53 @@ 

          head = " " * depth

          for ignored in self.ignorelist:

              if (build.name == ignored) or fnmatch.fnmatch(build.name, ignored):

-                 log ("%sIgnored Build: %s%s" % (head, build.nvr, tail))

+                 log("%sIgnored Build: %s%s" % (head, build.nvr, tail))

                  build.setState('ignore')

                  return build

          check = self.checkFilter(build, grey=None)

          if check is None:

-             #greylisted builds are ok as deps, but not primary builds

+             # greylisted builds are ok as deps, but not primary builds

              if depth == 0:

-                 log ("%sGreylisted build %s%s" % (head, build.nvr, tail))

+                 log("%sGreylisted build %s%s" % (head, build.nvr, tail))

                  build.setState('grey')

                  return build

-             #get rid of 'grey' state (filter will not be checked again)

+             # get rid of 'grey' state (filter will not be checked again)

              build.updateState()

          elif not check:

-             log ("%sBlocked build %s%s" % (head, build.nvr, tail))

+             log("%sBlocked build %s%s" % (head, build.nvr, tail))

              build.setState('blocked')

              return build

-         #make sure we dont have the build name protected

+         # make sure we dont have the build name protected

          if build.name not in self.protectlist:

-             #check to see if a substition applies

+             # check to see if a substition applies

              replace = self.substitutions.get(build.nvr)

              if replace:

                  build.substitute = replace

                  if depth > 0:

-                     log ("%sDep replaced: %s->%s" % (head, build.nvr, replace))

+                     log("%sDep replaced: %s->%s" % (head, build.nvr, replace))

                      return build

-             if options.prefer_new and (depth > 0) and (tag is not None) and not (build.state == "common"):

+             if options.prefer_new and (depth > 0) and (tag is not None) and \

+                     not (build.state == "common"):

                  latestBuild = self.newerBuild(build, tag)

-                 if latestBuild != None:

+                 if latestBuild is not None:

                      build.substitute = latestBuild.nvr

-                     log ("%sNewer build replaced: %s->%s" % (head, build.nvr, latestBuild.nvr))

+                     log("%sNewer build replaced: %s->%s" % (head, build.nvr, latestBuild.nvr))

                      return build

          else:

-             log ("%sProtected Build: %s" % (head, build.nvr))

+             log("%sProtected Build: %s" % (head, build.nvr))

          if build.state == "common":

-             #we're good

+             # we're good

              if build.rebuilt:

-                 log ("%sCommon build (rebuilt) %s%s" % (head, build.nvr, tail))

+                 log("%sCommon build (rebuilt) %s%s" % (head, build.nvr, tail))

              else:

-                 log ("%sCommon build %s%s" % (head, build.nvr, tail))

+                 log("%sCommon build %s%s" % (head, build.nvr, tail))

          elif build.state == 'pending':

-             log ("%sRebuild in progress: %s%s" % (head, build.nvr, tail))

+             log("%sRebuild in progress: %s%s" % (head, build.nvr, tail))

          elif build.state == "broken":

-             #The build already exists locally, but is somehow invalid.

-             #We should not replace it automatically. An admin can reset it

-             #if that is the correct thing. A substitution might also be in order

-             log ("%sWarning: build exists, but is invalid: %s%s" % (head, build.nvr, tail))

+             # The build already exists locally, but is somehow invalid.

+             # We should not replace it automatically. An admin can reset it

+             # if that is the correct thing. A substitution might also be in order

+             log("%sWarning: build exists, but is invalid: %s%s" % (head, build.nvr, tail))

          #

          #  !! Cases where importing a noarch is /not/ ok must occur

          #     before this point
@@ -784,30 +789,30 @@ 

          elif (options.import_noarch or options.import_noarch_only) and build.isNoarch():

              self.importBuild(build, tag)

          elif options.import_noarch_only and not build.isNoarch():

-             log ("%sSkipping archful build: %s" % (head, build.nvr))

+             log("%sSkipping archful build: %s" % (head, build.nvr))

          elif build.state == "noroot":

-             #Can't rebuild it, this is what substitutions are for

-             log ("%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail))

+             # Can't rebuild it, this is what substitutions are for

+             log("%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail))

          elif build.state == 'brokendeps':

-             #should not be possible at this point

-             log ("Error: build reports brokendeps state before dep scan")

+             # should not be possible at this point

+             log("Error: build reports brokendeps state before dep scan")

          elif build.state == "missing":

-             #scan its deps

-             log ("%sMissing build %s%s. Scanning deps..." % (head, build.nvr, tail))

+             # scan its deps

+             log("%sMissing build %s%s. Scanning deps..." % (head, build.nvr, tail))

              newdeps = []

              # include extra local builds as deps.

              if self.includelist:

                  for dep in self.includelist:

                      info = session.getBuild(dep)

                      if info:

-                         log ("%s Adding local Dep %s%s" % (head, dep, tail))

+                         log("%s Adding local Dep %s%s" % (head, dep, tail))

                          extradep = LocalBuild(info)

                          newdeps.append(extradep)

                      else:

-                         log ("%s Warning: could not find build for %s" % (head, dep))

-             #don't actually set build.revised_deps until we finish the dep scan

+                         log("%s Warning: could not find build for %s" % (head, dep))

+             # don't actually set build.revised_deps until we finish the dep scan

              for dep_id in build.deps:

-                 dep = self.scanBuild(dep_id, from_build=build, depth=depth+1, tag=tag)

+                 dep = self.scanBuild(dep_id, from_build=build, depth=depth + 1, tag=tag)

                  if dep.name in self.ignorelist:

                      # we are not done dep solving yet.  but we dont want this dep in our buildroot

                      continue
@@ -815,18 +820,18 @@ 

                      if dep.substitute:

                          dep2 = self.getSubstitute(dep.substitute)

                          if isinstance(dep2, TrackedBuild):

-                             self.scanBuild(dep2.id, from_build=build, depth=depth+1, tag=tag)

+                             self.scanBuild(dep2.id, from_build=build, depth=depth + 1, tag=tag)

                          elif dep2 is None:

-                             #dep is missing on both local and remote

-                             log ("%sSubstitute dep unavailable: %s" % (head, dep2.nvr))

-                             #no point in continuing

+                             # dep is missing on both local and remote

+                             log("%sSubstitute dep unavailable: %s" % (head, dep2.nvr))

+                             # no point in continuing

                              break

-                         #otherwise dep2 should be LocalBuild instance

+                         # otherwise dep2 should be LocalBuild instance

                          newdeps.append(dep2)

                      elif dep.state in ('broken', 'brokendeps', 'noroot', 'blocked'):

-                         #no point in continuing

+                         # no point in continuing

                          build.setState('brokendeps')

-                         log ("%sCan't rebuild %s, %s is %s" % (head, build.nvr, dep.nvr, dep.state))

+                         log("%sCan't rebuild %s, %s is %s" % (head, build.nvr, dep.nvr, dep.state))

                          newdeps = None

                          break

                      else:
@@ -836,7 +841,7 @@ 

                      self.rebuild_order += 1

                      build.order = self.rebuild_order

              build.revised_deps = newdeps

-         #scanning takes a long time, might as well start builds if we can

+         # scanning takes a long time, might as well start builds if we can

          self.checkJobs(tag)

          self.rebuildMissing()

          if len(self.builds) % 50 == 0:
@@ -854,24 +859,24 @@ 

                      if options.first_one:

                          return

                  except (socket.timeout, socket.error):

-                     log ("retry")

+                     log("retry")

                      continue

                  break

              else:

-                 log ("Error: unable to scan %(name)s-%(version)s-%(release)s" % build)

+                 log("Error: unable to scan %(name)s-%(version)s-%(release)s" % build)

                  continue

  

      def _importURL(self, url, fn):

          """Import an rpm directly from a url"""

          serverdir = _unique_path('koji-shadow')

          if options.link_imports:

-             #bit of a hack, but faster than uploading

+             # bit of a hack, but faster than uploading

              dst = "%s/%s/%s" % (koji.pathinfo.work(), serverdir, fn)

              old_umask = os.umask(0o02)

              try:

                  koji.ensuredir(os.path.dirname(dst))

-                 os.chown(os.path.dirname(dst), 48, 48) #XXX - hack

-                 log ("Downloading %s to %s" % (url, dst))

+                 os.chown(os.path.dirname(dst), 48, 48)  # XXX - hack

+                 log("Downloading %s to %s" % (url, dst))

                  fsrc = urllib2.urlopen(url)

                  fdst = open(fn, 'w')

                  shutil.copyfileobj(fsrc, fdst)
@@ -880,28 +885,29 @@ 

              finally:

                  os.umask(old_umask)

          else:

-             #TODO - would be possible, using uploadFile directly, to upload without writing locally.

-             #for now, though, just use uploadWrapper

+             # TODO - would be possible, using uploadFile directly,

+             #        to upload without writing locally.

+             # for now, though, just use uploadWrapper

              koji.ensuredir(options.workpath)

              dst = "%s/%s" % (options.workpath, fn)

-             log ("Downloading %s to %s..." % (url, dst))

+             log("Downloading %s to %s..." % (url, dst))

              fsrc = urllib2.urlopen(url)

              fdst = open(dst, 'w')

              shutil.copyfileobj(fsrc, fdst)

              fsrc.close()

              fdst.close()

-             log ("Uploading %s..." % dst)

+             log("Uploading %s..." % dst)

              session.uploadWrapper(dst, serverdir, blocksize=65536)

          session.importRPM(serverdir, fn)

  

      def importBuild(self, build, tag=None):

          '''import a build from remote hub'''

          if not build.srpm:

-             log ("No srpm for build %s, skipping import" % build.nvr)

-             #TODO - support no-src imports here

+             log("No srpm for build %s, skipping import" % build.nvr)

+             # TODO - support no-src imports here

              return False

          if not options.remote_topurl:

-             log ("Skipping import of %s, remote_topurl not specified" % build.nvr)

+             log("Skipping import of %s, remote_topurl not specified" % build.nvr)

              return False

          pathinfo = koji.PathInfo(options.remote_topurl)

          build_url = pathinfo.build(build.info)
@@ -910,53 +916,48 @@ 

          self._importURL(url, fname)

          for rpminfo in build.rpms:

              if rpminfo['arch'] == 'src':

-                 #already imported above

+                 # already imported above

                  continue

              relpath = pathinfo.rpm(rpminfo)

              url = "%s/%s" % (build_url, relpath)

              fname = os.path.basename(relpath)

              self._importURL(url, fname)

          build.updateState()

-         if options.tag_build and not tag == None:

+         if options.tag_build and tag is not None:

              self.tagSuccessful(build.nvr, tag)

          return True

  

-     def scan(self):

-         """Scan based on config file"""

-         to_scan = []

-         alltags = remote.listTags()

- 

      def rebuild(self, build):

          """Rebuild a remote build using closest possible buildroot"""

-         #first check that we can

+         # first check that we can

          if build.state != 'missing':

-             log ("Can't rebuild %s. state=%s" % (build.nvr, build.state))

+             log("Can't rebuild %s. state=%s" % (build.nvr, build.state))

              return

-         #deps = []

-         #for build_id in build.deps:

-         #    dep = self.builds.get(build_id)

-         #    if not dep:

-         #        log ("Missing dependency %i for %s. Not scanned?" % (build_id, build.nvr))

-         #        return

-         #    if dep.state != 'common':

-         #        log ("Dependency missing for %s: %s (%s)" % (build.nvr, dep.nvr, dep.state))

-         #        return

-         #    deps.append(dep)

+         # deps = []

+         # for build_id in build.deps:

+         #     dep = self.builds.get(build_id)

+         #     if not dep:

+         #         log ("Missing dependency %i for %s. Not scanned?" % (build_id, build.nvr))

+         #         return

+         #     if dep.state != 'common':

+         #         log ("Dependency missing for %s: %s (%s)" % (build.nvr, dep.nvr, dep.state))

+         #         return

+         #     deps.append(dep)

          deps = build.revised_deps

          if deps is None:

-             log ("Can't rebuild %s" % build.nvr)

+             log("Can't rebuild %s" % build.nvr)

              return

          if options.test:

-             log ("Skipping rebuild of %s (test mode)" % build.nvr)

+             log("Skipping rebuild of %s (test mode)" % build.nvr)

              return

-         #check/create tag

+         # check/create tag

          our_tag = "SHADOWBUILD-%s" % build.br_tag

          taginfo = session.getTag(our_tag)

          parents = None

          if not taginfo:

-             #XXX - not sure what is best here

-             #how do we pick arches?  for now just hardcoded

-             #XXX this call for perms is stupid, but it's all we've got

+             # XXX - not sure what is best here

+             # how do we pick arches?  for now just hardcoded

+             # XXX this call for perms is stupid, but it's all we've got

              perm_id = None

              for data in session.getAllPerms():

                  if data['name'] == 'admin':
@@ -964,9 +965,9 @@ 

                      break

              session.createTag(our_tag, perm=perm_id, arches=options.arches)

              taginfo = session.getTag(our_tag, strict=True)

-             #we don't need a target, we trigger our own repo creation and

-             #pass that repo_id to the build call

-             #session.createBuildTarget(taginfo['name'], taginfo['id'], taginfo['id'])

+             # we don't need a target, we trigger our own repo creation and

+             # pass that repo_id to the build call

+             # session.createBuildTarget(taginfo['name'], taginfo['id'], taginfo['id'])

              # duplicate also extra information for a tag (eg. packagemanager setting)

              rtaginfo = remote.getTag(build.br_tag)

              if 'extra' in rtaginfo:
@@ -976,8 +977,8 @@ 

          else:

              parents = session.getInheritanceData(taginfo['id'])

              if parents:

-                 log ("Warning: shadow build tag has inheritance")

-         #check package list

+                 log("Warning: shadow build tag has inheritance")

+         # check package list

          pkgs = {}

          for pkg in session.listPackages(tagID=taginfo['id']):

              pkgs[pkg['package_name']] = pkg
@@ -985,45 +986,44 @@ 

          for dep in deps:

              name = dep.info['name']

              if name not in pkgs:

-                 #guess owner

+                 # guess owner

                  owners = {}

                  for pkg in session.listPackages(pkgID=name):

                      owners.setdefault(pkg['owner_id'], []).append(pkg)

                  if owners:

-                     order = [(len(v), k) for k, v in six.iteritems(owners)]

-                     order.sort()

+                     order = sorted([(len(v), k) for k, v in six.iteritems(owners)])

                      owner = order[-1][1]

                  else:

-                     #just use ourselves

+                     # just use ourselves

                      owner = session.getLoggedInUser()['id']

                  missing_pkgs.append((name, owner))

-         #check build list

+         # check build list

          cur_builds = {}

          for binfo in session.listTagged(taginfo['id']):

-             #index by name in tagging order (latest first)

+             # index by name in tagging order (latest first)

              cur_builds.setdefault(binfo['name'], []).append(binfo)

          to_untag = []

          to_tag = []

          for dep in deps:

-             #XXX - assuming here that there is only one dep per 'name'

+             # XXX - assuming here that there is only one dep per 'name'

              #      may want to check that this is true

              cur_order = cur_builds.get(dep.info['name'], [])

              tagged = False

              for binfo in cur_order:

                  if binfo['nvr'] == dep.nvr:

                      tagged = True

-                     #may not be latest now, but it will be after we do all the untagging

+                     # may not be latest now, but it will be after we do all the untagging

                  else:

                      # note that the untagging keeps older builds from piling up. In a sense

                      # we're gc-pruning this tag ourselves every pass.

                      to_untag.append(binfo)

              if not tagged:

                  to_tag.append(dep)

-         #TODO - "add-on" packages

+         # TODO - "add-on" packages

          #       for handling arch-specific deps that may not show up on remote

          #       e.g. elilo or similar

          #       these extra packages should be added to tag, but not the build group

-         #TODO - local extra builds

+         # TODO - local extra builds

          #       a configurable mechanism to add specific local builds to the buildroot

          drop_groups = []

          build_group = None
@@ -1032,24 +1032,24 @@ 

                  build_group = group

              else:

                  # we should have no other groups but build

-                 log ("Warning: found stray group: %s" % group)

+                 log("Warning: found stray group: %s" % group)

                  drop_groups.append(group['name'])

          if build_group:

-             #fix build group package list based on base of build to shadow

+             # fix build group package list based on base of build to shadow

              needed = dict([(n, 1) for n in build.base])

              current = dict([(p['package'], 1) for p in build_group['packagelist']])

              add_pkgs = [n for n in needed if n not in current]

              drop_pkgs = [n for n in current if n not in needed]

-             #no group deps needed/allowed

+             # no group deps needed/allowed

              drop_deps = [(g['name'], 1) for g in build_group['grouplist']]

              if drop_deps:

-                 log ("Warning: build group had deps: %r" % build_group)

+                 log("Warning: build group had deps: %r" % build_group)

          else:

              add_pkgs = build.base

              drop_pkgs = []

              drop_deps = []

-         #update package list, tagged packages, and groups in one multicall/transaction

-         #(avoid useless repo regens)

+         # update package list, tagged packages, and groups in one multicall/transaction

+         # (avoid useless repo regens)

          session.multicall = True

          for name, owner in missing_pkgs:

              session.packageListAdd(taginfo['id'], name, owner=owner)
@@ -1057,48 +1057,49 @@ 

              session.untagBuildBypass(taginfo['id'], binfo['id'])

          for dep in to_tag:

              session.tagBuildBypass(taginfo['id'], dep.nvr)

-             #shouldn't need force here

-         #set groups data

+             # shouldn't need force here

+         # set groups data

          if not build_group:

              # build group not present. add it

              session.groupListAdd(taginfo['id'], 'build', force=True)

-             #using force in case group is blocked. This shouldn't be the case, but...

+             # using force in case group is blocked. This shouldn't be the case, but...

          for pkg_name in drop_pkgs:

-             #in principal, our tag should not have inheritance, so the remove call is the right thing

+             # in principal, our tag should not have inheritance,

+             # so the remove call is the right thing

              session.groupPackageListRemove(taginfo['id'], 'build', pkg_name)

          for pkg_name in add_pkgs:

              session.groupPackageListAdd(taginfo['id'], 'build', pkg_name)

-             #we never add any blocks, so forcing shouldn't be required

-         #TODO - adjust extra_arches for package to build

-         #get event id to facilitate waiting on repo

+             # we never add any blocks, so forcing shouldn't be required

+         # TODO - adjust extra_arches for package to build

+         # get event id to facilitate waiting on repo

          #       not sure if getLastEvent is good enough

          #       short of adding a new call, perhaps use getLastEvent together with event of

          #       current latest repo for tag

          session.getLastEvent()

          results = session.multiCall(strict=True)

          event_id = results[-1][0]['id']

-         #TODO - verify / check results ?

+         # TODO - verify / check results ?

          task_id = session.newRepo(our_tag, event=event_id)

-         #TODO - upload src

+         # TODO - upload src

          #       [?] use remote SCM url (if avail)?

          src = build.getSource()

          if not src:

-             log ("Couldn't get source for %s" % build.nvr)

+             log("Couldn't get source for %s" % build.nvr)

              return None

-         #wait for repo task

-         log ("Waiting on newRepo task %i" % task_id)

+         # wait for repo task

+         log("Waiting on newRepo task %i" % task_id)

          while True:

              tinfo = session.getTaskInfo(task_id)

              tstate = koji.TASK_STATES[tinfo['state']]

              if tstate == 'CLOSED':

                  break

              elif tstate in ('CANCELED', 'FAILED'):

-                 log ("Error: failed to generate repo")

+                 log("Error: failed to generate repo")

                  return None

-             #add a timeout?

-         #TODO       ...and verify repo

+             # add a timeout?

+         # TODO       ...and verify repo

          repo_id, event_id = session.getTaskResult(task_id)

-         #kick off build

+         # kick off build

          task_id = session.build(src, None, opts={'repo_id': repo_id}, priority=options.priority)

          return task_id

  
@@ -1112,34 +1113,34 @@ 

              log("%s: %i (+%i replaced)" % (state, len(not_replaced), n_replaced))

              if not_replaced and len(not_replaced) < 8:

                  log(' '.join([b.nvr for b in not_replaced]))

-         #generate a report of the most frequent problem deps

+         # generate a report of the most frequent problem deps

          problem_counts = {}

          for build in self.state_idx['brokendeps'].values():

              for dep_id in build.deps:

                  dep = self.builds.get(dep_id)

                  if not dep:

-                     #unscanned

-                     #possible because we short circuit the earlier scan on problems

-                     #we don't really know if this one is a problem or not, so just

-                     #skip it.

+                     # unscanned

+                     # possible because we short circuit the earlier scan on problems

+                     # we don't really know if this one is a problem or not, so just

+                     # skip it.

                      continue

                  if dep.state in ('common', 'pending', 'missing'):

-                     #not a problem

+                     # not a problem

                      continue

                  nvr = dep.nvr

                  if dep.substitute:

                      dep2 = self.getSubstitute(dep.substitute)

                      if dep2:

-                         #we have a substitution, so not a problem

+                         # we have a substitution, so not a problem

                          continue

-                     #otherwise the substitution is the problem

+                     # otherwise the substitution is the problem

                      nvr = dep.substitute

                  problem_counts.setdefault(nvr, 0)

                  problem_counts[nvr] += 1

          order = [(c, nvr) for (nvr, c) in six.iteritems(problem_counts)]

          if order:

              order.sort(reverse=True)

-             #print top 5 problems

+             # print top 5 problems

              log("-- top problems --")

              for (c, nvr) in order[:5]:

                  log(" %s (%i)" % (nvr, c))
@@ -1149,65 +1150,65 @@ 

          states = sorted(self.state_idx.keys())

          parts = ["%s: %i" % (s, len(self.state_idx[s])) for s in states]

          parts.append("total: %i" % N)

-         log (' '.join(parts))

+         log(' '.join(parts))

  

      def _print_builds(self, mylist):

          """small helper function for output"""

          for build in mylist:

-             log ("    %s (%s)" % (build.nvr, build.state))

+             log("    %s (%s)" % (build.nvr, build.state))

  

      def checkJobs(self, tag=None):

          """Check outstanding jobs. Return true if anything changes"""

          ret = False

          for build_id, build in self.state_idx['pending'].items():

-             #check pending builds

+             # check pending builds

              if not build.task_id:

-                 log ("No task id recorded for %s" % build.nvr)

+                 log("No task id recorded for %s" % build.nvr)

                  build.updateState()

                  ret = True

              info = session.getTaskInfo(build.task_id)

              if not info:

-                 log ("No such task: %i (build %s)" % (build.task_id, build.nvr))

+                 log("No such task: %i (build %s)" % (build.task_id, build.nvr))

                  build.updateState()

                  ret = True

                  continue

              state = koji.TASK_STATES[info['state']]

              if state in ('CANCELED', 'FAILED'):

-                 log ("Task %i is %s (build %s)" % (build.task_id, state, build.nvr))

-                 #we have to set the state to broken manually (updateState will mark

-                 #a failed build as missing)

+                 log("Task %i is %s (build %s)" % (build.task_id, state, build.nvr))

+                 # we have to set the state to broken manually (updateState will mark

+                 # a failed build as missing)

                  build.setState('broken')

                  ret = True

              elif state == 'CLOSED':

-                 log ("Task %i complete (build %s)" % (build.task_id, build.nvr))

-                 if options.tag_build and not tag == None:

+                 log("Task %i complete (build %s)" % (build.task_id, build.nvr))

+                 if options.tag_build and tag is not None:

                      self.tagSuccessful(build.nvr, tag)

                  build.updateState()

                  ret = True

                  if build.state != 'common':

-                     log ("Task %i finished, but %s still missing" \

-                             % (build.task_id, build.nvr))

+                     log("Task %i finished, but %s still missing"

+                         % (build.task_id, build.nvr))

          return ret

  

      def checkBuildDeps(self, build):

-         #check deps

+         # check deps

          if build.revised_deps is None:

-             #log ("No revised deplist yet for %s" % build.nvr)

+             # log("No revised deplist yet for %s" % build.nvr)

              return False

          problem = [x for x in build.revised_deps

-                          if x.state in ('broken', 'brokendeps', 'noroot', 'blocked')]

+                    if x.state in ('broken', 'brokendeps', 'noroot', 'blocked')]

          if problem:

-             log ("Can't rebuild %s, missing %i deps" % (build.nvr, len(problem)))

+             log("Can't rebuild %s, missing %i deps" % (build.nvr, len(problem)))

              build.setState('brokendeps')

              self._print_builds(problem)

              return False

          not_common = [x for x in build.revised_deps

-                          if x.state not in ('common', 'local')]

+                       if x.state not in ('common', 'local')]

          if not_common:

-             #could be missing or still building or whatever

-             #log ("Still missing %i revised deps for %s" % (len(not_common), build.nvr))

+             # could be missing or still building or whatever

+             # log("Still missing %i revised deps for %s" % (len(not_common), build.nvr))

              return False

-         #otherwise, we should be good to rebuild

+         # otherwise, we should be good to rebuild

          return True

  

      def rebuildMissing(self):
@@ -1217,21 +1218,20 @@ 

          ret = False

          if options.max_jobs and len(self.state_idx['pending']) >= options.max_jobs:

              return ret

-         missing = [(b.order, b.id, b) for b in six.itervalues(self.state_idx['missing'])]

-         missing.sort()

+         missing = sorted([(b.order, b.id, b) for b in six.itervalues(self.state_idx['missing'])])

          for order, build_id, build in missing:

              if not self.checkBuildDeps(build):

                  continue

-             #otherwise, we should be good to rebuild

-             log ("rebuild: %s" % build.nvr)

+             # otherwise, we should be good to rebuild

+             log("rebuild: %s" % build.nvr)

              task_id = self.rebuild(build)

              ret = True

              if options.test:

-                 #pretend build is available

+                 # pretend build is available

                  build.setState('common')

              elif not task_id:

-                 #something went wrong setting up the rebuild

-                 log ("Did not get a task for %s" % build.nvr)

+                 # something went wrong setting up the rebuild

+                 log("Did not get a task for %s" % build.nvr)

                  build.setState('broken')

              else:

                  # build might not show up as 'BUILDING' immediately, so we
@@ -1240,21 +1240,21 @@ 

                  build.setState('pending')

              if options.max_jobs and len(self.state_idx['pending']) >= options.max_jobs:

                  if options.debug:

-                     log ("Maximum number of jobs reached.")

+                     log("Maximum number of jobs reached.")

                  break

          return ret

  

      def runRebuilds(self, tag=None):

          """Rebuild missing builds"""

-         log ("Determining rebuild order")

-         #using self.state_idx to track build states

-         #make sure state_idx has at least these states

+         log("Determining rebuild order")

+         # using self.state_idx to track build states

+         # make sure state_idx has at least these states

          initial_avail = len(self.state_idx['common'])

          self.report_brief()

          while True:

              if (not self.state_idx['missing'] and not self.state_idx['pending']) or \

                 (options.prefer_new and not self.state_idx['pending']):

-                 #we're done

+                 # we're done

                  break

              changed1 = self.checkJobs(tag)

              changed2 = self.rebuildMissing()
@@ -1262,16 +1262,16 @@ 

                  time.sleep(30)

                  continue

              self.report_brief()

-         log ("Rebuilt %i builds" % (len(self.state_idx['common']) - initial_avail))

+         log("Rebuilt %i builds" % (len(self.state_idx['common']) - initial_avail))

  

      def tagSuccessful(self, nvr, tag):

          """tag completed builds into final tags"""

-         #TODO: check if there are other reasons why tagging may fail and handle them

+         # TODO: check if there are other reasons why tagging may fail and handle them

          try:

              session.tagBuildBypass(tag, nvr)

-             log ("tagged %s to %s" % (nvr, tag))

+             log("tagged %s to %s" % (nvr, tag))

          except koji.TagError:

-             log ("NOTICE: %s already tagged in %s" % (nvr, tag))

+             log("NOTICE: %s already tagged in %s" % (nvr, tag))

  

  

  def main(args):
@@ -1285,22 +1285,23 @@ 

      if options.logfile:

          filename = options.logfile

          try:

-             logfile = os.open(filename,os.O_CREAT|os.O_RDWR|os.O_APPEND, 0o777)

-         except:

+             logfile = os.open(filename, os.O_CREAT | os.O_RDWR | os.O_APPEND, 0o777)

+         except Exception:

              logfile = None

      if logfile is not None:

-         log ("logging to %s" % filename)

-         os.write(logfile, "\n\n========================================================================\n")

+         log("logging to %s" % filename)

+         os.write(logfile,

+                  "\n\n========================================================================\n")

  

      if options.build:

          binfo = remote.getBuild(options.build, strict=True)

          tracker.scanBuild(binfo['id'], tag=tag)

      else:

          if tag is None:

-             log ("Tag is required")

+             log("Tag is required")

              return

          else:

-             log ("Working on tag %s" % (tag))

+             log("Working on tag %s" % (tag))

              tracker.scanTag(tag)

      tracker.report()

      tracker.runRebuilds(tag)
@@ -1316,8 +1317,8 @@ 

      session = koji.ClientSession(options.server, session_opts)

      if not options.noauth:

          activate_session(session)

-     #XXX - sane auth

-     #XXX - config!

+     # XXX - sane auth

+     # XXX - config!

      remote_opts = {'anon_retry': True}

      for k in ('debug_xmlrpc', 'debug'):

          remote_opts[k] = getattr(options, k)
@@ -1331,7 +1332,7 @@ 

          pass

      except SystemExit:

          rv = 1

-     #except:

+     # except:

      #    if options.debug:

      #        raise

      #    else:
@@ -1340,6 +1341,6 @@ 

      #        log ("%s: %s" % (exctype, value))

      try:

          session.logout()

-     except:

+     except Exception:

          pass

      sys.exit(rv)

file modified
+18 -15
@@ -53,20 +53,17 @@ 

                        help=_("show xmlrpc debug output"))

      parser.add_option("-t", "--test", action="store_true",

                        help=_("test mode, no tag is deleted"))

- 

-     parser.add_option("--no-empty", action="store_false", dest="clean_empty",

-                      default=True, help=_("don't run emptiness check"))

-     parser.add_option("--empty-delay", action="store", metavar="DAYS",

-                      default=1, type=int,

+     parser.add_option("--no-empty", action="store_false", dest="clean_empty", default=True,

+                       help=_("don't run emptiness check"))

+     parser.add_option("--empty-delay", action="store", metavar="DAYS", default=1, type=int,

                        help=_("delete empty tags older than DAYS"))

-     parser.add_option("--no-old", action="store_false", dest="clean_old",

-                      default=True, help=_("don't run old check"))

-     parser.add_option("--old-delay", action="store", metavar="DAYS",

-                      default=30, type=int,

+     parser.add_option("--no-old", action="store_false", dest="clean_old", default=True,

+                       help=_("don't run old check"))

+     parser.add_option("--old-delay", action="store", metavar="DAYS", default=30, type=int,

                        help=_("delete older tags than timestamp"))

      parser.add_option("--ignore-tags", metavar="PATTERN", action="append",

                        help=_("Ignore tags matching PATTERN when pruning"))

-     #parse once to get the config file

+     # parse once to get the config file

      (options, args) = parser.parse_args()

  

      defaults = parser.get_default_values()
@@ -115,7 +112,7 @@ 

                      setattr(defaults, name, config.getboolean(*alias))

                  else:

                      setattr(defaults, name, config.get(*alias))

-     #parse again with defaults

+     # parse again with defaults

      (options, args) = parser.parse_args(values=defaults)

      options.config = config

  
@@ -139,25 +136,29 @@ 

      except xmlrpc.client.ProtocolError:

          error(_("Unable to connect to server"))

      if ret != koji.API_VERSION:

-         warn(_("The server is at API version %d and the client is at %d" % (ret, koji.API_VERSION)))

+         warn(_("The server is at API version %d and the client is at %d" %

+                (ret, koji.API_VERSION)))

  

  

  def activate_session(session):

      """Test and login the session is applicable"""

      global options

      if options.noauth:

-         #skip authentication

+         # skip authentication

          pass

      elif options.cert is not None and os.path.isfile(options.cert):

          # authenticate using SSL client cert

          session.ssl_login(options.cert, None, options.serverca, proxyuser=options.runas)

      elif options.user:

-         #authenticate using user/password

+         # authenticate using user/password

          session.login()

      elif options.keytab and options.principal:

          try:

              if options.keytab and options.principal:

-                 session.gssapi_login(principal=options.principal, keytab=options.keytab, proxyuser=options.runas)

+                 session.gssapi_login(

+                     principal=options.principal,

+                     keytab=options.keytab,

+                     proxyuser=options.runas)

              else:

                  session.gssapi_login(proxyuser=options.runas)

          except Exception as e:
@@ -250,12 +251,14 @@ 

      delete_tags(deleted)

      return passed

  

+ 

  def main(args):

      activate_session(session)

      sidetags = get_all()

      sidetags = clean_empty(sidetags)

      sidetags = clean_old(sidetags)

  

+ 

  if __name__ == "__main__":

      options, args = get_options()

      session_opts = koji.grab_session_options(options)

file modified
+10 -7
@@ -36,7 +36,8 @@ 

  

  

  def clean_notification_tasks(cursor, vacuum, test, age):

-     q = " FROM task WHERE method = 'build' AND completion_time < NOW() - '%s days'::interval" % int(age)

+     q = " FROM task WHERE method = 'build' AND completion_time < NOW() - '%s days'::interval" % \

+         int(age)

      if options.verbose:

          cursor.execute("SELECT COUNT(*) " + q)

          rows = cursor.fetchall()[0][0]
@@ -75,7 +76,7 @@ 

              if opts['scratch']:

                  cursor.execute("INSERT INTO temp_scratch_tasks VALUES (%s)", (task_id,))

                  ids.append(task_id)

-         except:

+         except Exception:

              continue

  

      parents = ids
@@ -95,7 +96,8 @@ 

          return

  

      # delete standard buildroots

-     cursor.execute("DELETE FROM standard_buildroot WHERE task_id IN (SELECT task_id FROM temp_scratch_tasks)")

+     cursor.execute(

+         "DELETE FROM standard_buildroot WHERE task_id IN (SELECT task_id FROM temp_scratch_tasks)")

  

      # delete tasks finally

      cursor.execute("DELETE FROM task WHERE id IN (SELECT task_id FROM temp_scratch_tasks)")
@@ -106,7 +108,8 @@ 

  

  

  def clean_buildroots(cursor, vacuum, test):

-     q = " FROM buildroot WHERE cg_id IS NULL AND id NOT IN (SELECT buildroot_id FROM standard_buildroot)"

+     q = " FROM buildroot " \

+         "WHERE cg_id IS NULL AND id NOT IN (SELECT buildroot_id FROM standard_buildroot)"

  

      if options.verbose:

          cursor.execute("SELECT COUNT(*) " + q)
@@ -169,7 +172,7 @@ 

      config.read(options.conf)

  

      cfgmap = [

-         #option, type, default

+         # option, type, default

          ['DBName', 'string', None],

          ['DBUser', 'string', None],

          ['DBHost', 'string', None],
@@ -193,7 +196,6 @@ 

      if opts['DBHost'] is None:

          opts['DBHost'] = opts['DBhost']

  

- 

      koji.db.provideDBopts(database=opts["DBName"],

                            user=opts["DBUser"],

                            password=opts.get("DBPass", None),
@@ -207,7 +209,8 @@ 

      clean_sessions(cursor, options.vacuum, options.test, options.sessions_age)

      clean_reservations(cursor, options.vacuum, options.test, options.reservations_age)

      if options.tag_notifications:

-         clean_notification_tasks(cursor, options.vacuum, options.test, age=options.tag_notifications_age)

+         clean_notification_tasks(cursor, options.vacuum, options.test,

+                                  age=options.tag_notifications_age)

      if options.scratch:

          clean_scratch_tasks(cursor, options.vacuum, options.test, age=options.scratch_age)

      if options.buildroots:

file modified
+95 -86
@@ -43,14 +43,15 @@ 

  

  tag_cache = {}

  

+ 

  def getTag(session, tag, event=None):

      """A caching version of the hub call"""

      cache = tag_cache

      now = time.time()

      if (tag, event) in cache:

-         ts, info = cache[(tag,event)]

+         ts, info = cache[(tag, event)]

          if now - ts < 600:

-             #use the cache

+             # use the cache

              return info

      info = session.getTag(tag, event=event)

      if info:
@@ -83,8 +84,8 @@ 

          self.first_seen = time.time()

          if self.current:

              order = self.session.getFullInheritance(self.tag_id, event=self.event_id)

-             #order may contain same tag more than once

-             tags = {self.tag_id : 1}

+             # order may contain same tag more than once

+             tags = {self.tag_id: 1}

              for x in order:

                  tags[x['parent_id']] = 1

              self.taglist = to_list(tags.keys())
@@ -156,13 +157,13 @@ 

              - timestamp really, really old

          """

          timeout = 36000

-         #XXX - config

+         # XXX - config

          if self.state != koji.REPO_INIT:

              return False

          age = time.time() - max(self.event_ts, self.first_seen)

-         #the first_seen timestamp is also factored in because a repo can be

-         #created from an older event and should not be expired based solely on

-         #that event's timestamp.

+         # the first_seen timestamp is also factored in because a repo can be

+         # created from an older event and should not be expired based solely on

+         # that event's timestamp.

          return age > timeout

  

      def tryDelete(self):
@@ -177,8 +178,8 @@ 

              lifetime = self.options.deleted_repo_lifetime

              # (should really be called expired_repo_lifetime)

          try:

-             #also check dir age. We do this because a repo can be created from an older event

-             #and should not be removed based solely on that event's timestamp.

+             # also check dir age. We do this because a repo can be created from an older event

+             # and should not be removed based solely on that event's timestamp.

              mtime = os.stat(path).st_mtime

          except OSError as e:

              if e.errno == 2:
@@ -200,7 +201,7 @@ 

          if self.state != koji.REPO_EXPIRED:

              raise koji.GenericError("Repo not expired")

          if self.session.repoDelete(self.repo_id) > 0:

-             #cannot delete, we are referenced by a buildroot

+             # cannot delete, we are referenced by a buildroot

              self.logger.debug("Cannot delete repo %s, still referenced" % self.repo_id)

              return False

          self.logger.info("Deleted repo %s" % self.repo_id)
@@ -268,7 +269,8 @@ 

          self._local.session = value

  

      def printState(self):

-         self.logger.debug('Tracking %i repos, %i child processes', len(self.repos), len(self.delete_pids))

+         self.logger.debug('Tracking %i repos, %i child processes',

+                           len(self.repos), len(self.delete_pids))

          for tag_id, task_id in six.iteritems(self.tasks):

              self.logger.debug("Tracking task %s for tag %s", task_id, tag_id)

          for pid, desc in six.iteritems(self.delete_pids):
@@ -299,9 +301,9 @@ 

              (childpid, status) = os.waitpid(pid, os.WNOHANG)

          except OSError as e:

              if e.errno != errno.ECHILD:

-                 #should not happen

+                 # should not happen

                  raise

-             #otherwise assume the process is gone

+             # otherwise assume the process is gone

              self.logger.info("%s: %s" % (prefix, e))

              return True

          if childpid != 0:
@@ -320,7 +322,7 @@ 

              try:

                  rmtree(path)

                  status = 0

-             except Exception:

+             except BaseException:

                  logger.error(''.join(traceback.format_exception(*sys.exc_info())))

                  logging.shutdown()

          finally:
@@ -345,18 +347,19 @@ 

              repo_id = data['id']

              repo = self.repos.get(repo_id)

              if repo:

-                 #we're already tracking it

+                 # we're already tracking it

                  if repo.state != data['state']:

-                     self.logger.info('State changed for repo %s: %s -> %s'

-                                        %(repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']]))

+                     self.logger.info(

+                         'State changed for repo %s: %s -> %s',

+                         repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']])

                      repo.state = data['state']

              else:

                  self.logger.info('Found repo %s, state=%s'

-                                    %(repo_id, koji.REPO_STATES[data['state']]))

+                                  % (repo_id, koji.REPO_STATES[data['state']]))

                  repo = ManagedRepo(self, data)

                  self.repos[repo_id] = repo

              if not getTag(self.session, repo.tag_id) and not repo.expired():

-                 self.logger.info('Tag %d for repo %d disappeared, expiring.' % (repo.tag_id, repo_id))

+                 self.logger.info('Tag %d for repo %d disappeared, expiring.', repo.tag_id, repo_id)

                  repo.expire()

          if len(self.repos) > len(repodata):

              # This shouldn't normally happen, but might if someone else calls
@@ -383,7 +386,7 @@ 

                  repo.current = False

                  if repo.expire_ts is None:

                      repo.expire_ts = time.time()

-                 #also no point in further checking

+                 # also no point in further checking

                  continue

              to_check.append(repo)

          if self.logger.isEnabledFor(logging.DEBUG):
@@ -407,7 +410,7 @@ 

              while True:

                  self.checkCurrentRepos()

                  time.sleep(self.options.sleeptime)

-         except:

+         except Exception:

              self.logger.exception('Error in currency checker thread')

              raise

          finally:
@@ -422,7 +425,7 @@ 

              while True:

                  self.regenRepos()

                  time.sleep(self.options.sleeptime)

-         except:

+         except Exception:

              self.logger.exception('Error in regen thread')

              raise

          finally:
@@ -441,7 +444,7 @@ 

  

          Also, warn about any oddities"""

          if self.delete_pids:

-             #skip

+             # skip

              return

          if not os.path.exists(topdir):

              self.logger.debug("%s doesn't exist, skipping", topdir)
@@ -466,14 +469,14 @@ 

                      self.logger.debug("%s/%s not an int, skipping", tagdir, repo_id)

                      continue

                  if repo_id in self.repos:

-                     #we're already managing it, no need to deal with it here

+                     # we're already managing it, no need to deal with it here

                      continue

                  repodir = "%s/%s" % (tagdir, repo_id)

                  try:

                      # lstat because it could be link to another volume

                      dirstat = os.lstat(repodir)

                  except OSError:

-                     #just in case something deletes the repo out from under us

+                     # just in case something deletes the repo out from under us

                      self.logger.debug("%s deleted already?!", repodir)

                      continue

                  symlink = False
@@ -488,22 +491,25 @@ 

                      if not self.options.ignore_stray_repos:

                          age = time.time() - dir_ts

                          self.logger.debug("did not expect %s; age: %s",

-                                 repodir, age)

+                                           repodir, age)

                          if age > max_age:

-                             self.logger.info("Removing unexpected directory (no such repo): %s", repodir)

+                             self.logger.info(

+                                 "Removing unexpected directory (no such repo): %s", repodir)

                              if symlink:

                                  os.unlink(repodir)

                              else:

                                  self.rmtree(repodir)

                      continue

                  if rinfo['tag_name'] != tag:

-                     self.logger.warn("Tag name mismatch (rename?): %s vs %s", tag, rinfo['tag_name'])

+                     self.logger.warn(

+                         "Tag name mismatch (rename?): %s vs %s", tag, rinfo['tag_name'])

                      continue

                  if rinfo['state'] in (koji.REPO_DELETED, koji.REPO_PROBLEM):

                      age = time.time() - max(rinfo['create_ts'], dir_ts)

                      self.logger.debug("potential removal candidate: %s; age: %s" % (repodir, age))

                      if age > max_age:

-                         logger.info("Removing stray repo (state=%s): %s" % (koji.REPO_STATES[rinfo['state']], repodir))

+                         logger.info("Removing stray repo (state=%s): %s",

+                                     koji.REPO_STATES[rinfo['state']], repodir)

                          if symlink:

                              os.unlink(repodir)

                          else:
@@ -513,19 +519,19 @@ 

          stats = self.tag_use_stats.get(tag_id)

          now = time.time()

          if stats and now - stats['ts'] < 3600:

-             #use the cache

+             # use the cache

              return stats

          data = self.session.listBuildroots(tagID=tag_id,

-                                            queryOpts={'order': '-create_event_id', 'limit' : 100})

-         #XXX magic number (limit)

+                                            queryOpts={'order': '-create_event_id', 'limit': 100})

+         # XXX magic number (limit)

          if data:

              tag_name = data[0]['tag_name']

          else:

              tag_name = "#%i" % tag_id

          stats = {'data': data, 'ts': now, 'tag_name': tag_name}

          recent = [x for x in data if now - x['create_ts'] < 3600 * 24]

-         #XXX magic number

-         stats ['n_recent'] = len(recent)

+         # XXX magic number

+         stats['n_recent'] = len(recent)

          self.tag_use_stats[tag_id] = stats

          self.logger.debug("tag %s recent use count: %i" % (tag_name, len(recent)))

          return stats
@@ -544,8 +550,7 @@ 

  

          stats = self.tagUseStats(entry['taginfo']['id'])

          # normalize use count

-         max_n = max([t.get('n_recent', 0) for t in self.needed_tags.values()]

-                         or [1])

+         max_n = max([t.get('n_recent', 0) for t in self.needed_tags.values()] or [1])

          if max_n == 0:

              # no recent use or missing data

              max_n = 1
@@ -559,7 +564,7 @@ 

              age = 0

          entry['score'] = age * adj

          self.logger.debug("Needed tag %s got score %.2f",

-                     entry['taginfo']['name'], entry['score'])

+                           entry['taginfo']['name'], entry['score'])

          # so a day old unused repo gets about the regen same score as a

          # 2.4-hour-old, very popular repo

  
@@ -572,7 +577,7 @@ 

          self.logger.debug("Current tasks: %r" % self.tasks)

          if self.other_tasks:

              self.logger.debug("Found %i untracked newRepo tasks",

-                         len(self.other_tasks))

+                               len(self.other_tasks))

          self.logger.debug("Updating repos")

  

          self.readCurrentRepos()
@@ -593,7 +598,7 @@ 

              if n_deletes >= self.options.delete_batch_size:

                  break

              if repo.expired():

-                 #try to delete

+                 # try to delete

                  if repo.tryDelete():

                      n_deletes += 1

                      del self.repos[repo.repo_id]
@@ -621,11 +626,12 @@ 

              tstate = koji.TASK_STATES[tinfo['state']]

              tag_id = self.tasks[task_id]['tag_id']

              if tstate == 'CLOSED':

-                 self.logger.info("Finished: newRepo task %s for tag %s" % (task_id, tag_id))

+                 self.logger.info("Finished: newRepo task %s for tag %s", task_id, tag_id)

                  self.recent_tasks[task_id] = time.time()

                  del self.tasks[task_id]

              elif tstate in ('CANCELED', 'FAILED'):

-                 self.logger.info("Problem: newRepo task %s for tag %s is %s" % (task_id, tag_id, tstate))

+                 self.logger.info(

+                     "Problem: newRepo task %s for tag %s is %s", task_id, tag_id, tstate)

                  self.recent_tasks[task_id] = time.time()

                  del self.tasks[task_id]

              else:
@@ -633,8 +639,9 @@ 

              # TODO: implement a timeout

  

          # also check other newRepo tasks

-         repo_tasks = self.session.listTasks(opts={'method':'newRepo',

-                             'state':([koji.TASK_STATES[s] for s in ('FREE', 'OPEN')])})

+         repo_tasks = self.session.listTasks(opts={'method': 'newRepo',

+                                                   'state': ([koji.TASK_STATES[s]

+                                                              for s in ('FREE', 'OPEN')])})

          others = [t for t in repo_tasks if t['id'] not in self.tasks]

          for tinfo in others:

              if tinfo['id'] not in self.other_tasks:
@@ -651,8 +658,8 @@ 

          self.build_tags = set([

              t['build_tag'] for t in self.session.getBuildTargets()

              if not koji.util.multi_fnmatch(t['build_tag_name'], ignore)

-             ])

-         #index repos by tag

+         ])

+         # index repos by tag

          tag_repos = {}

          for repo in to_list(self.repos.values()):

              tag_repos.setdefault(repo.tag_id, []).append(repo)
@@ -672,7 +679,7 @@ 

                  if covered:

                      # no longer needed

                      self.logger.info("Tag %(name)s has a current or in "

-                             "progress repo", entry['taginfo'])

+                                      "progress repo", entry['taginfo'])

                      del self.needed_tags[tag_id]

                  # if not covered, we already know

                  continue
@@ -699,10 +706,10 @@ 

                  ts = time.time()

  

              entry = {

-                     'taginfo': taginfo,

-                     'expire_ts': ts,

-                     'needed_since' : time.time(),

-                     }

+                 'taginfo': taginfo,

+                 'expire_ts': ts,

+                 'needed_since': time.time(),

+             }

              self.setTagScore(entry)

              self.needed_tags[tag_id] = entry

  
@@ -711,7 +718,7 @@ 

              entry = self.needed_tags.get(tag_id)

              if tag_id not in self.build_tags:

                  self.logger.info("Tag %(name)s is no longer a build tag",

-                         entry['taginfo'])

+                                  entry['taginfo'])

                  del self.needed_tags[tag_id]

          for tag_id, repolist in tag_repos.items():

              if tag_id not in self.build_tags:
@@ -724,7 +731,6 @@ 

              self.logger.info('Needed tags count went from %i to %i', n_need,

                               len(self.needed_tags))

  

- 

      def regenRepos(self):

          """Trigger newRepo tasks for needed tags"""

  
@@ -749,8 +755,7 @@ 

              if running_tasks >= self.options.max_repo_tasks:

                  self.logger.info("Maximum number of repo tasks reached")

                  return

-             elif (len(self.tasks) + len(self.other_tasks)

-                             >= self.options.repo_tasks_limit):

+             elif len(self.tasks) + len(self.other_tasks) >= self.options.repo_tasks_limit:

                  self.logger.info("Repo task limit reached")

                  return

              tagname = tag['taginfo']['name']
@@ -765,7 +770,7 @@ 

                  else:

                      # should not happen

                      logger.warning('Needed tag refers to unknown task. '

-                             '%s -> %i', tagname, task_id)

+                                    '%s -> %i', tagname, task_id)

                      # we'll advance and create a new task

              taskopts = {}

              if koji.util.multi_fnmatch(tagname, debuginfo_pat):
@@ -788,14 +793,14 @@ 

              else:

                  time_expired = "%.1f" % (time.time() - expire_ts)

              self.logger.info("Created newRepo task %s for tag %s (%s), "

-                     "expired for %s sec", task_id, tag['taginfo']['id'],

-                     tag['taginfo']['name'], time_expired)

+                              "expired for %s sec", task_id, tag['taginfo']['id'],

+                              tag['taginfo']['name'], time_expired)

              self.tasks[task_id] = {

-                     'id': task_id,

-                     'taskinfo': self.session.getTaskInfo(task_id),

-                     'tag_id': tag['taginfo']['id'],

-                     'maven': maven,

-                     }

+                 'id': task_id,

+                 'taskinfo': self.session.getTaskInfo(task_id),

+                 'tag_id': tag['taginfo']['id'],

+                 'maven': maven,

+             }

              tag['task_id'] = task_id

          if running_tasks_maven >= self.options.max_repo_tasks_maven:

              self.logger.info("Maximum number of maven repo tasks reached")
@@ -804,7 +809,7 @@ 

  def start_currency_checker(session, repomgr):

      subsession = session.subsession()

      thread = threading.Thread(name='currencyChecker',

-                         target=repomgr.currencyChecker, args=(subsession,))

+                               target=repomgr.currencyChecker, args=(subsession,))

      thread.setDaemon(True)

      thread.start()

      return thread
@@ -813,7 +818,7 @@ 

  def start_regen_loop(session, repomgr):

      subsession = session.subsession()

      thread = threading.Thread(name='regenLoop',

-                         target=repomgr.regenLoop, args=(subsession,))

+                               target=repomgr.regenLoop, args=(subsession,))

      thread.setDaemon(True)

      thread.start()

      return thread
@@ -822,9 +827,10 @@ 

  def main(options, session):

      repomgr = RepoManager(options, session)

      repomgr.readCurrentRepos()

+ 

      def shutdown(*args):

          raise SystemExit

-     signal.signal(signal.SIGTERM,shutdown)

+     signal.signal(signal.SIGTERM, shutdown)

      curr_chk_thread = start_currency_checker(session, repomgr)

      regen_thread = start_regen_loop(session, repomgr)

      # TODO also move rmtree jobs to threads
@@ -850,7 +856,7 @@ 

          except SystemExit:

              logger.warn("Shutting down")

              break

-         except:

+         except Exception:

              # log the exception and continue

              logger.error(''.join(traceback.format_exception(*sys.exc_info())))

          try:
@@ -864,6 +870,7 @@ 

      finally:

          session.logout()

  

+ 

  def get_options():

      """process options from command line and config file"""

      # parse command line args
@@ -926,15 +933,15 @@ 

                  'offline_retry_interval': 120,

                  'no_ssl_verify': False,

                  'max_delete_processes': 4,

-                 'max_repo_tasks' : 4,

-                 'max_repo_tasks_maven' : 2,

-                 'repo_tasks_limit' : 10,

-                 'delete_batch_size' : 3,

-                 'deleted_repo_lifetime': 7*24*3600,

-                 #XXX should really be called expired_repo_lifetime

-                 'dist_repo_lifetime': 7*24*3600,

+                 'max_repo_tasks': 4,

+                 'max_repo_tasks_maven': 2,

+                 'repo_tasks_limit': 10,

+                 'delete_batch_size': 3,

+                 'deleted_repo_lifetime': 7 * 24 * 3600,

+                 # XXX should really be called expired_repo_lifetime

+                 'dist_repo_lifetime': 7 * 24 * 3600,

                  'recent_tasks_lifetime': 600,

-                 'sleeptime' : 15,

+                 'sleeptime': 15,

                  'cert': None,

                  'ca': '',  # FIXME: unused, remove in next major release

                  'serverca': None,
@@ -945,10 +952,10 @@ 

                      'max_delete_processes', 'max_repo_tasks_maven',

                      'delete_batch_size', 'dist_repo_lifetime', 'sleeptime',

                      'recent_tasks_lifetime')

-         str_opts = ('topdir', 'server', 'user', 'password', 'logfile', 'principal', 'keytab', 'krbservice',

-                     'cert', 'ca', 'serverca', 'debuginfo_tags',

+         str_opts = ('topdir', 'server', 'user', 'password', 'logfile', 'principal', 'keytab',

+                     'krbservice', 'cert', 'ca', 'serverca', 'debuginfo_tags',

                      'source_tags', 'separate_source_tags', 'ignore_tags')  # FIXME: remove ca here

-         bool_opts = ('verbose','debug','ignore_stray_repos', 'offline_retry',

+         bool_opts = ('verbose', 'debug', 'ignore_stray_repos', 'offline_retry',

                       'krb_rdns', 'krb_canon_host', 'no_ssl_verify')

          for name in config.options(section):

              if name in int_opts:
@@ -962,7 +969,7 @@ 

      for name, value in defaults.items():

          if getattr(options, name, None) is None:

              setattr(options, name, value)

-     if options.logfile in ('','None','none'):

+     if options.logfile in ('', 'None', 'none'):

          options.logfile = None

      # special handling for cert defaults

      cert_defaults = {
@@ -976,6 +983,7 @@ 

                  setattr(options, name, fn)

      return options

  

+ 

  def quit(msg=None, code=1):

      if msg:

          logging.getLogger("koji.repo").error(msg)
@@ -983,10 +991,11 @@ 

          sys.stderr.flush()

      sys.exit(code)

  

- if  __name__ == "__main__":

+ 

+ if __name__ == "__main__":

  

      options = get_options()

-     topdir = getattr(options,'topdir',None)

+     topdir = getattr(options, 'topdir', None)

      pathinfo = koji.PathInfo(topdir)

      if options.show_config:

          pprint.pprint(options.__dict__)
@@ -996,14 +1005,14 @@ 

              try:

                  logfile = open(options.logfile, "w")

                  logfile.close()

-             except:

+             except Exception:

                  sys.stderr.write("Cannot create logfile: %s\n" % options.logfile)

                  sys.exit(1)

-         if not os.access(options.logfile,os.W_OK):

+         if not os.access(options.logfile, os.W_OK):

              sys.stderr.write("Cannot write to logfile: %s\n" % options.logfile)

              sys.exit(1)

      koji.add_file_logger("koji", options.logfile)

-     #note we're setting logging for koji.*

+     # note we're setting logging for koji.*

      logger = logging.getLogger("koji")

      if options.debug:

          logger.setLevel(logging.DEBUG)
@@ -1015,7 +1024,7 @@ 

          logger.setLevel(logging.WARNING)

  

      session_opts = koji.grab_session_options(options)

-     session = koji.ClientSession(options.server,session_opts)

+     session = koji.ClientSession(options.server, session_opts)

      if options.cert is not None and os.path.isfile(options.cert):

          # authenticate using SSL client certificates

          session.ssl_login(options.cert, None, options.serverca)
@@ -1024,7 +1033,7 @@ 

          session.login()

      elif koji.krbV and options.principal and options.keytab:

          session.krb_login(options.principal, options.keytab, options.ccache)

-     #get an exclusive session

+     # get an exclusive session

      try:

          session.exclusiveSession(force=options.force_lock)

      except koji.AuthLockError:

file modified
+3 -3
@@ -1,10 +1,10 @@ 

  #!/bin/bash

  

- awk '/^## INSERT kojikamid dup/ {exit} {print $0}' kojikamid.py

+ awk '/^# INSERT kojikamid dup #/ {exit} {print $0}' kojikamid.py

  

  for fn in ../koji/__init__.py ../koji/daemon.py

  do

-     awk '/^## END kojikamid dup/ {p=0} p {print $0} /^## BEGIN kojikamid dup/ {p=1}' $fn 

+     awk '/^# END kojikamid dup #/ {p=0} p {print $0} /^# BEGIN kojikamid dup #/ {p=1}' $fn

  done

  

- awk 'p {print $0} /^## INSERT kojikamid dup/ {p=1}' kojikamid.py

+ awk 'p {print $0} /^# INSERT kojikamid dup #/ {p=1}' kojikamid.py

file modified
+74 -48
@@ -54,23 +54,27 @@ 

  

  KOJIKAMID = True

  

- ## INSERT kojikamid dup

+ # INSERT kojikamid dup #

+ 

  

  class fakemodule(object):

      pass

  

- #make parts of the above insert accessible as koji.X

+ 

+ # make parts of the above insert accessible as koji.X

  koji = fakemodule()

- koji.GenericError = GenericError

- koji.BuildError = BuildError

+ koji.GenericError = GenericError  # noqa: F821

+ koji.BuildError = BuildError  # noqa: F821

+ 

  

  def encode_int(n):

      """If n is too large for a 32bit signed, convert it to a string"""

      if n <= 2147483647:

          return n

-     #else

+     # else

      return str(n)

  

+ 

  class WindowsBuild(object):

  

      LEADING_CHAR = re.compile('^[^A-Za-z_]')
@@ -88,9 +92,9 @@ 

          else:

              self.task_opts = {}

          self.workdir = '/tmp/build'

-         ensuredir(self.workdir)

+         ensuredir(self.workdir)  # noqa: F821

          self.buildreq_dir = os.path.join(self.workdir, 'buildreqs')

-         ensuredir(self.buildreq_dir)

+         ensuredir(self.buildreq_dir)  # noqa: F821

          self.source_dir = None

          self.spec_dir = None

          self.patches_dir = None
@@ -148,20 +152,20 @@ 

                  else:

                      self.logger.info('file %s exists', entry)

          if errors:

-             raise BuildError('error validating build environment: %s' % \

-                   ', '.join(errors))

+             raise BuildError('error validating build environment: %s' %  # noqa: F821

+                              ', '.join(errors))

  

      def updateClam(self):

          """update ClamAV virus definitions"""

          ret, output = run(['/bin/freshclam', '--quiet'])

          if ret:

-             raise BuildError('could not update ClamAV database: %s' % output)

+             raise BuildError('could not update ClamAV database: %s' % output)  # noqa: F821

  

      def checkEnv(self):

          """make the environment is fit for building in"""

          for tool in ['/bin/freshclam', '/bin/clamscan', '/bin/patch']:

              if not os.path.isfile(tool):

-                 raise BuildError('%s is missing from the build environment' % tool)

+                 raise BuildError('%s is missing from the build environment' % tool)  # noqa: F821

  

      def zipDir(self, rootdir, filename):

          rootbase = os.path.basename(rootdir)
@@ -178,41 +182,45 @@ 

  

      def checkout(self):

          """Checkout sources, winspec, and patches, and apply patches"""

-         src_scm = SCM(self.source_url)

-         self.source_dir = src_scm.checkout(ensuredir(os.path.join(self.workdir, 'source')))

+         src_scm = SCM(self.source_url)  # noqa: F821

+         self.source_dir = src_scm.checkout(

+             ensuredir(os.path.join(self.workdir, 'source')))  # noqa: F821

          self.zipDir(self.source_dir, os.path.join(self.workdir, 'sources.zip'))

          if 'winspec' in self.task_opts:

-             spec_scm = SCM(self.task_opts['winspec'])

-             self.spec_dir = spec_scm.checkout(ensuredir(os.path.join(self.workdir, 'spec')))

+             spec_scm = SCM(self.task_opts['winspec'])  # noqa: F821

+             self.spec_dir = spec_scm.checkout(

+                 ensuredir(os.path.join(self.workdir, 'spec')))  # noqa: F821

              self.zipDir(self.spec_dir, os.path.join(self.workdir, 'spec.zip'))

          else:

              self.spec_dir = self.source_dir

          if 'patches' in self.task_opts:

-             patch_scm = SCM(self.task_opts['patches'])

-             self.patches_dir = patch_scm.checkout(ensuredir(os.path.join(self.workdir, 'patches')))

+             patch_scm = SCM(self.task_opts['patches'])  # noqa: F821

+             self.patches_dir = patch_scm.checkout(

+                 ensuredir(os.path.join(self.workdir, 'patches')))  # noqa: F821

              self.zipDir(self.patches_dir, os.path.join(self.workdir, 'patches.zip'))

              self.applyPatches(self.source_dir, self.patches_dir)

          self.virusCheck(self.workdir)

  

      def applyPatches(self, sourcedir, patchdir):

          """Apply patches in patchdir to files in sourcedir)"""

-         patches = [patch for patch in os.listdir(patchdir) if \

-                    os.path.isfile(os.path.join(patchdir, patch)) and \

+         patches = [patch for patch in os.listdir(patchdir) if

+                    os.path.isfile(os.path.join(patchdir, patch)) and

                     patch.endswith('.patch')]

          if not patches:

-             raise BuildError('no patches found at %s' % patchdir)

+             raise BuildError('no patches found at %s' % patchdir)  # noqa: F821

          patches.sort()

          for patch in patches:

-             cmd = ['/bin/patch', '--verbose', '-d', sourcedir, '-p1', '-i', os.path.join(patchdir, patch)]

+             cmd = ['/bin/patch', '--verbose', '-d', sourcedir, '-p1', '-i',

+                    os.path.join(patchdir, patch)]

              run(cmd, fatal=True)

  

      def loadConfig(self):

          """Load build configuration from the spec file."""

          specfiles = [spec for spec in os.listdir(self.spec_dir) if spec.endswith('.ini')]

          if len(specfiles) == 0:

-             raise BuildError('No .ini file found')

+             raise BuildError('No .ini file found')  # noqa: F821

          elif len(specfiles) > 1:

-             raise BuildError('Multiple .ini files found')

+             raise BuildError('Multiple .ini files found')  # noqa: F821

  

          if six.PY2:

              conf = SafeConfigParser()
@@ -237,7 +245,8 @@ 

          # absolute paths, or without a path in which case it is searched for

          # on the PATH.

          if conf.has_option('building', 'preinstalled'):

-             self.preinstalled.extend([e.strip() for e in conf.get('building', 'preinstalled').split('\n') if e])

+             self.preinstalled.extend(

+                 [e.strip() for e in conf.get('building', 'preinstalled').split('\n') if e])

  

          # buildrequires and provides are multi-valued (space-separated)

          for br in conf.get('building', 'buildrequires').split():
@@ -306,7 +315,7 @@ 

          """Create the buildroot object on the hub."""

          repo_id = self.task_opts.get('repo_id')

          if not repo_id:

-             raise BuildError('repo_id must be specified')

+             raise BuildError('repo_id must be specified')  # noqa: F821

          self.buildroot_id = self.server.initBuildroot(repo_id, self.platform)

  

      def expireBuildroot(self):
@@ -316,9 +325,9 @@ 

      def fetchFile(self, basedir, buildinfo, fileinfo, brtype):

          """Download the file from buildreq, at filepath, into the basedir"""

          destpath = os.path.join(basedir, fileinfo['localpath'])

-         ensuredir(os.path.dirname(destpath))

+         ensuredir(os.path.dirname(destpath))  # noqa: F821

          if 'checksum_type' in fileinfo:

-             checksum_type = CHECKSUM_TYPES[fileinfo['checksum_type']]

+             checksum_type = CHECKSUM_TYPES[fileinfo['checksum_type']]  # noqa: F821

              if checksum_type == 'sha1':

                  checksum = hashlib.sha1()

              elif checksum_type == 'sha256':
@@ -326,13 +335,14 @@ 

              elif checksum_type == 'md5':

                  checksum = hashlib.md5()

              else:

-                 raise BuildError('Unknown checksum type %s for %s' % (

+                 raise BuildError('Unknown checksum type %s for %s' % (  # noqa: F821

                          checksum_type,

                          os.path.basename(fileinfo['localpath'])))

          with open(destpath, 'w') as destfile:

              offset = 0

              while True:

-                 encoded = self.server.getFile(buildinfo, fileinfo, encode_int(offset), 1048576, brtype)

+                 encoded = self.server.getFile(buildinfo, fileinfo, encode_int(offset), 1048576,

+                                               brtype)

                  if not encoded:

                      break

                  data = base64.b64decode(encoded)
@@ -345,9 +355,11 @@ 

          if 'checksum_type' in fileinfo:

              digest = checksum.hexdigest()

              if fileinfo['checksum'] != digest:

-                 raise BuildError('checksum validation failed for %s, %s (computed) != %s (provided)' % \

-                                  (destpath, digest, fileinfo['checksum']))

-             self.logger.info('Retrieved %s (%s bytes, %s: %s)', destpath, offset, checksum_type, digest)

+                 raise BuildError(  # noqa: F821

+                     'checksum validation failed for %s, %s (computed) != %s (provided)' %

+                     (destpath, digest, fileinfo['checksum']))

+             self.logger.info(

+                 'Retrieved %s (%s bytes, %s: %s)', destpath, offset, checksum_type, digest)

          else:

              self.logger.info('Retrieved %s (%s bytes)', destpath, offset)

  
@@ -361,7 +373,7 @@ 

              buildinfo = self.server.getLatestBuild(self.build_tag, buildreq,

                                                     self.task_opts.get('repo_id'))

              br_dir = os.path.join(self.buildreq_dir, buildreq, brtype)

-             ensuredir(br_dir)

+             ensuredir(br_dir)  # noqa: F821

              brinfo['dir'] = br_dir

              brfiles = []

              brinfo['files'] = brfiles
@@ -405,7 +417,8 @@ 

  

      def cmdBuild(self):

          """Do the build: run the execute line(s) with cmd.exe"""

-         tmpfd, tmpname = tempfile.mkstemp(prefix='koji-tmp', suffix='.bat', dir='/cygdrive/c/Windows/Temp')

+         tmpfd, tmpname = tempfile.mkstemp(prefix='koji-tmp', suffix='.bat',

+                                           dir='/cygdrive/c/Windows/Temp')

          script = os.fdopen(tmpfd, 'w')

          for attr in ['source_dir', 'spec_dir', 'patches_dir']:

              val = getattr(self, attr)
@@ -438,7 +451,7 @@ 

          cmd = ['cmd.exe', '/C', 'C:\\Windows\\Temp\\' + os.path.basename(tmpname)]

          ret, output = run(cmd, chdir=self.source_dir)

          if ret:

-             raise BuildError('build command failed, see build.log for details')

+             raise BuildError('build command failed, see build.log for details')  # noqa: F821

  

      def bashBuild(self):

          """Do the build: run the execute line(s) with bash"""
@@ -470,7 +483,7 @@ 

          cmd = ['/bin/bash', '-e', '-x', tmpname]

          ret, output = run(cmd, chdir=self.source_dir)

          if ret:

-             raise BuildError('build command failed, see build.log for details')

+             raise BuildError('build command failed, see build.log for details')  # noqa: F821

  

      def checkBuild(self):

          """Verify that the build completed successfully."""
@@ -497,13 +510,13 @@ 

                      errors.append('file %s does not exist' % entry)

          self.virusCheck(self.workdir)

          if errors:

-             raise BuildError('error validating build output: %s' % \

+             raise BuildError('error validating build output: %s' %  # noqa: F821

                    ', '.join(errors))

  

      def virusCheck(self, path):

          """ensure a path is virus free with ClamAV. path should be absolute"""

          if not path.startswith('/'):

-             raise BuildError('Invalid path to scan for viruses: ' + path)

+             raise BuildError('Invalid path to scan for viruses: ' + path)  # noqa: F821

          run(['/bin/clamscan', '--quiet', '--recursive', path], fatal=True)

  

      def gatherResults(self):
@@ -529,6 +542,7 @@ 

          self.expireBuildroot()

          return self.gatherResults()

  

+ 

  def run(cmd, chdir=None, fatal=False, log=True):

      global logfd

      output = ''
@@ -555,9 +569,10 @@ 

              msg += ', see %s for details' % (os.path.basename(logfd.name))

          else:

              msg += ', output: %s' % output

-         raise BuildError(msg)

+         raise BuildError(msg)  # noqa: F821

      return ret, output

  

+ 

  def find_net_info():

      """

      Find the network gateway configured for this VM.
@@ -586,6 +601,7 @@ 

          gateway = None

      return macaddr, gateway

  

+ 

  def upload_file(server, prefix, path):

      """upload a single file to the vmd"""

      logger = logging.getLogger('koji.vm')
@@ -606,6 +622,7 @@ 

      server.verifyChecksum(path, digest, 'md5')

      logger.info('Uploaded %s (%s bytes, md5: %s)', destpath, offset, digest)

  

+ 

  def get_mgmt_server():

      """Get a ServerProxy object we can use to retrieve task info"""

      logger = logging.getLogger('koji.vm')
@@ -617,12 +634,14 @@ 

      logger.debug('found MAC address %s, connecting to %s:%s',

                   macaddr, gateway, MANAGER_PORT)

      server = six.moves.xmlrpc_client.ServerProxy('http://%s:%s/' %

-                                    (gateway, MANAGER_PORT), allow_none=True)

+                                                  (gateway, MANAGER_PORT), allow_none=True)

      # we would set a timeout on the socket here, but that is apparently not

      # supported by python/cygwin/Windows

      task_port = server.getPort(macaddr)

      logger.debug('found task-specific port %s', task_port)

-     return six.moves.xmlrpc_client.ServerProxy('http://%s:%s/' % (gateway, task_port), allow_none=True)

+     return six.moves.xmlrpc_client.ServerProxy('http://%s:%s/' % (gateway, task_port),

+                                                allow_none=True)

+ 

  

  def get_options():

      """handle usage and parse options"""
@@ -632,11 +651,14 @@ 

      """

      parser = OptionParser(usage=usage)

      parser.add_option('-d', '--debug', action='store_true', help='Log debug statements')

-     parser.add_option('-i', '--install', action='store_true', help='Install this daemon as a service', default=False)

-     parser.add_option('-u', '--uninstall', action='store_true', help='Uninstall this daemon if it was installed previously as a service', default=False)

+     parser.add_option('-i', '--install', action='store_true', default=False,

+                       help='Install this daemon as a service')

+     parser.add_option('-u', '--uninstall', action='store_true', default=False,

+                       help='Uninstall this daemon if it was installed previously as a service')

      (options, args) = parser.parse_args()

      return options

  

+ 

  def setup_logging(opts):

      global logfile, logfd

      logger = logging.getLogger('koji.vm')
@@ -651,11 +673,13 @@ 

      logger.addHandler(handler)

      return handler

  

+ 

  def log_local(msg):

      tb = ''.join(traceback.format_exception(*sys.exc_info()))

      sys.stderr.write('%s: %s\n' % (time.ctime(), msg))

      sys.stderr.write(tb)

  

+ 

  def stream_logs(server, handler, builds):

      """Stream logs incrementally to the server.

         The global logfile will always be streamed.
@@ -675,7 +699,7 @@ 

                      try:

                          fd = open(log, 'r')

                          logs[log] = (relpath, fd)

-                     except:

+                     except Exception:

                          log_local('Error opening %s' % log)

                          continue

                  else:
@@ -689,10 +713,11 @@ 

                  del contents

                  try:

                      server.uploadDirect(relpath, offset, size, digest, data)

-                 except:

+                 except Exception:

                      log_local('error uploading %s' % relpath)

          time.sleep(1)

  

+ 

  def fail(server, handler):

      """do the right thing when a build fails"""

      global logfile, logfd
@@ -704,14 +729,14 @@ 

              logfd.flush()

              upload_file(server, os.path.dirname(logfile),

                          os.path.basename(logfile))

-         except:

+         except Exception:

              log_local('error calling upload_file()')

          while True:

              try:

                  # this is the very last thing we do, keep trying as long as we can

                  server.failTask(tb)

                  break

-             except:

+             except Exception:

                  log_local('error calling server.failTask()')

      sys.exit(1)

  
@@ -719,6 +744,7 @@ 

  logfile = '/tmp/build.log'

  logfd = None

  

+ 

  def main():

      prog = os.path.basename(sys.argv[0])

      opts = get_options()
@@ -780,7 +806,7 @@ 

          results['logs'].append(os.path.basename(logfile))

  

          server.closeTask(results)

-     except:

+     except Exception:

          fail(server, handler)

      sys.exit(0)

  

file modified
+103 -70
@@ -48,8 +48,14 @@ 

  import koji.util

  from koji.daemon import SCM, TaskManager

  # TaskHandlers are required to be imported, do not remove them

- from koji.tasks import (BaseTaskHandler, MultiPlatformTask, RestartTask,  # noqa: F401

-                         RestartVerifyTask, ServerExit, ServerRestart)

+ from koji.tasks import (  # noqa: F401

+     BaseTaskHandler,

+     MultiPlatformTask,

+     RestartTask,

+     RestartVerifyTask,

+     ServerExit,

+     ServerRestart

+ )

  

  try:

      import krbV
@@ -62,6 +68,8 @@ 

      if err[3] != libvirt.VIR_ERR_ERROR:

          # Don't log libvirt errors: global error handler will do that

          logging.warn("Non-error from libvirt: '%s'", err[2])

+ 

+ 

  libvirt.registerErrorHandler(f=libvirt_callback, ctx=None)

  

  
@@ -92,7 +100,7 @@ 

      parser.add_option("--maxjobs", type='int', help="Specify maxjobs")

      parser.add_option("--sleeptime", type='int', help="Specify the polling interval")

      parser.add_option("--admin-emails", type='str', action="store", metavar="EMAILS",

-                        help="Comma-separated addresses to send error notices to.")

+                       help="Comma-separated addresses to send error notices to.")

      parser.add_option("--workdir", help="Specify workdir")

      parser.add_option("--pluginpath", help="Specify plugin search path")

      parser.add_option("--plugin", action="append", help="Load specified plugin")
@@ -101,7 +109,7 @@ 

  

      if args:

          parser.error("incorrect number of arguments")

-         #not reached

+         # not reached

          assert False  # pragma: no cover

  

      # load local config
@@ -176,7 +184,7 @@ 

              if os.path.exists(fn):

                  setattr(options, name, fn)

  

-     #make sure workdir exists

+     # make sure workdir exists

      if not os.path.exists(options.workdir):

          koji.ensuredir(options.workdir)

  
@@ -185,6 +193,7 @@ 

  

      return options

  

+ 

  def quit(msg=None, code=1):

      if msg:

          logging.getLogger("koji.vm").error(msg)
@@ -192,24 +201,27 @@ 

          sys.stderr.flush()

      sys.exit(code)

  

+ 

  def main(options, session):

      logger = logging.getLogger("koji.vm")

      logger.info('Starting up')

      tm = VMTaskManager(options, session)

      tm.findHandlers(globals())

      if options.plugin:

-         #load plugins

+         # load plugins

          pt = koji.plugin.PluginTracker(path=options.pluginpath.split(':'))

          for name in options.plugin:

              logger.info('Loading plugin: %s', name)

              tm.scanPlugin(pt.load(name))

+ 

      def shutdown(*args):

          raise SystemExit

+ 

      def restart(*args):

          logger.warn("Initiating graceful restart")

          tm.restart_pending = True

-     signal.signal(signal.SIGTERM,shutdown)

-     signal.signal(signal.SIGUSR1,restart)

+     signal.signal(signal.SIGTERM, shutdown)

+     signal.signal(signal.SIGUSR1, restart)

      taken = False

      tm.cleanupAllVMs()

      while True:
@@ -218,7 +230,7 @@ 

              tm.updateTasks()

              taken = tm.getNextTask()

              tm.cleanupExpiredVMs()

-         except (SystemExit,ServerExit,KeyboardInterrupt):

+         except (SystemExit, ServerExit, KeyboardInterrupt):

              logger.warn("Exiting")

              break

          except ServerRestart:
@@ -229,7 +241,7 @@ 

              break

          except koji.RetryError:

              raise

-         except:

+         except Exception:

              # XXX - this is a little extreme

              # log the exception and continue

              logger.error('Error in main loop', exc_info=True)
@@ -239,7 +251,7 @@ 

                  # The load-balancing code in getNextTask() will prevent a single builder

                  # from getting overloaded.

                  time.sleep(options.sleeptime)

-         except (SystemExit,KeyboardInterrupt):

+         except (SystemExit, KeyboardInterrupt):

              logger.warn("Exiting")

              break

      logger.warn("Shutting down, please wait...")
@@ -257,10 +269,12 @@ 

  

      def __init__(self, addr, port):

          if sys.version_info[:2] <= (2, 4):

-             six.moves.xmlrpc_server.SimpleXMLRPCServer.__init__(self, (addr, port), logRequests=False)

+             six.moves.xmlrpc_server.SimpleXMLRPCServer.__init__(self, (addr, port),

+                                                                 logRequests=False)

          else:

-             six.moves.xmlrpc_server.SimpleXMLRPCServer.__init__(self, (addr, port), logRequests=False,

-                                                            allow_none=True)

+             six.moves.xmlrpc_server.SimpleXMLRPCServer.__init__(self, (addr, port),

+                                                                 logRequests=False,

+                                                                 allow_none=True)

          self.logger = logging.getLogger('koji.vm.DaemonXMLRPCServer')

          self.socket.settimeout(5)

          self.active = True
@@ -281,13 +295,13 @@ 

                          self.close_request(conn)

              except socket.timeout:

                  pass

-             except:

+             except Exception:

                  self.logger.error('Error handling requests', exc_info=True)

  

      if sys.version_info[:2] <= (2, 4):

          # Copy and paste from SimpleXMLRPCServer, with the addition of passing

          # allow_none=True to xmlrpclib.dumps()

-         def _marshaled_dispatch(self, data, dispatch_method = None):

+         def _marshaled_dispatch(self, data, dispatch_method=None):

              params, method = six.moves.xmlrpc_client.loads(data)

              try:

                  if dispatch_method is not None:
@@ -295,14 +309,17 @@ 

                  else:

                      response = self._dispatch(method, params)

                  response = (response,)

-                 response = six.moves.xmlrpc_client.dumps(response, methodresponse=1, allow_none=True)

+                 response = six.moves.xmlrpc_client.dumps(response,

+                                                          methodresponse=1, allow_none=True)

              except six.moves.xmlrpc_client.Fault as fault:

                  response = six.moves.xmlrpc_client.dumps(fault)

-             except:

+             except Exception:

                  # report exception back to server

                  response = six.moves.xmlrpc_client.dumps(

-                     six.moves.xmlrpc_client.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value))

-                     )

+                     six.moves.xmlrpc_client.Fault(

+                         1, "%s:%s" %

+                         (sys.exc_info()[0], sys.exc_info()[1]))

+                 )

              return response

  

  
@@ -338,13 +355,13 @@ 

              if not repo_info:

                  raise koji.BuildError('invalid repo ID: %s' % repo_id)

              policy_data = {

-                 'user_id' : task_info['owner'],

-                 'source' : source_url,

-                 'task_id' : self.id,

-                 'build_tag' : build_tag['id'],

-                 'skip_tag' : bool(opts.get('skip_tag')),

+                 'user_id': task_info['owner'],

+                 'source': source_url,

+                 'task_id': self.id,

+                 'build_tag': build_tag['id'],

+                 'skip_tag': bool(opts.get('skip_tag')),

                  'target': target_info['id']

-                 }

+             }

              if not opts.get('skip_tag'):

                  policy_data['tag'] = dest_tag['id']

              self.session.host.assertPolicy('build_from_repo_id', policy_data)
@@ -357,7 +374,9 @@ 

  

          task_opts = koji.util.dslice(opts, ['timeout', 'cpus', 'mem', 'static_mac'], strict=False)

          task_id = self.session.host.subtask(method='vmExec',

-                                             arglist=[name, [source_url, build_tag['name'], subopts], task_opts],

+                                             arglist=[name,

+                                                      [source_url, build_tag['name'], subopts],

+                                                      task_opts],

                                              label=name[:255],

                                              parent=self.id)

          results = self.wait(task_id)[task_id]
@@ -367,15 +386,16 @@ 

          if not opts.get('scratch'):

              build_info = koji.util.dslice(results, ['name', 'version', 'release', 'epoch'])

              build_info['package_name'] = build_info['name']

-             pkg_cfg = self.session.getPackageConfig(dest_tag['id'], build_info['name'], event=event_id)

+             pkg_cfg = self.session.getPackageConfig(dest_tag['id'], build_info['name'],

+                                                     event=event_id)

              if not opts.get('skip_tag'):

                  # Make sure package is on the list for this tag

                  if pkg_cfg is None:

-                     raise koji.BuildError("package %s not in list for tag %s" \

-                             % (build_info['name'], dest_tag['name']))

+                     raise koji.BuildError("package %s not in list for tag %s"

+                                           % (build_info['name'], dest_tag['name']))

                  elif pkg_cfg['blocked']:

-                     raise koji.BuildError("package %s is blocked for tag %s" \

-                             % (build_info['name'], dest_tag['name']))

+                     raise koji.BuildError("package %s is blocked for tag %s"

+                                           % (build_info['name'], dest_tag['name']))

  

              build_info = self.session.host.initWinBuild(self.id, build_info,

                                                          koji.util.dslice(results, ['platform']))
@@ -385,8 +405,8 @@ 

              rpm_results = None

              spec_url = opts.get('specfile')

              if spec_url:

-                 rpm_results = self.buildWrapperRPM(spec_url, task_id, target_info, build_info, repo_id,

-                                                    channel='default')

+                 rpm_results = self.buildWrapperRPM(spec_url, task_id, target_info, build_info,

+                                                    repo_id, channel='default')

  

              if opts.get('scratch'):

                  self.session.host.moveWinBuildToScratch(self.id, results, rpm_results)
@@ -395,7 +415,7 @@ 

          except (SystemExit, ServerExit, KeyboardInterrupt):

              # we do not trap these

              raise

-         except:

+         except Exception:

              if not opts.get('scratch'):

                  # scratch builds do not get imported

                  self.session.host.failBuild(self.id, build_id)
@@ -410,6 +430,7 @@ 

                                                      parent=self.id)

              self.wait(tag_task_id)

  

+ 

  class VMExecTask(BaseTaskHandler):

      """

      Handles the startup, state-tracking, and shutdown of a VM
@@ -423,8 +444,8 @@ 

  

      def __init__(self, *args, **kw):

          super(VMExecTask, self).__init__(*args, **kw)

-         self.task_manager = six.moves.xmlrpc_client.ServerProxy('http://%s:%s/' % (self.options.privaddr, self.options.portbase),

-                                                   allow_none=True)

+         self.task_manager = six.moves.xmlrpc_client.ServerProxy(

+             'http://%s:%s/' % (self.options.privaddr, self.options.portbase), allow_none=True)

          self.port = None

          self.server = None

          self.task_info = None
@@ -438,13 +459,16 @@ 

      def mkqcow2(self, clone_name, source_disk, disk_num):

          new_name = clone_name + '-disk-' + str(disk_num) + self.QCOW2_EXT

          new_path = os.path.join(self.options.imagedir, new_name)

-         cmd = ['/usr/bin/qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s' % source_disk, new_path]

-         proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)

+         cmd = ['/usr/bin/qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s' % source_disk,

+                new_path]

+         proc = subprocess.Popen(cmd,

+                                 stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)

          output, dummy = proc.communicate()

          ret = proc.wait()

          if ret:

-             raise koji.BuildError('unable to create qcow2 image, "%s" returned %s; output was: %s' % \

-                   (' '.join(cmd), ret, output))

+             raise koji.BuildError(

+                 'unable to create qcow2 image, "%s" returned %s; output was: %s' %

+                 (' '.join(cmd), ret, output))

          vm_user = pwd.getpwnam(self.options.vmuser)

          os.chown(new_path, vm_user.pw_uid, vm_user.pw_gid)

          return new_path
@@ -674,13 +698,13 @@ 

              remote_pi = koji.PathInfo(self.options.topurl)

              if type == 'rpm':

                  remote_url = remote_pi.build(buildinfo) + '/' + \

-                              fileinfo['localpath']

+                     fileinfo['localpath']

              elif type == 'maven':

                  remote_url = remote_pi.mavenbuild(buildinfo) + '/' + \

-                              fileinfo['localpath']

+                     fileinfo['localpath']

              elif type == 'win':

                  remote_url = remote_pi.winbuild(buildinfo) + '/' + \

-                              fileinfo['localpath']

+                     fileinfo['localpath']

              else:

                  raise koji.BuildError('unsupported file type: %s' % type)

              koji.ensuredir(os.path.dirname(localpath))
@@ -695,14 +719,17 @@ 

                  hdr = koji.get_rpm_header(localpath)

                  payloadhash = koji.hex_string(koji.get_header_field(hdr, 'sigmd5'))

                  if fileinfo['payloadhash'] != payloadhash:

-                     raise koji.BuildError("Downloaded rpm %s doesn't match checksum (expected: %s, got %s)" % (

-                         os.path.basename(fileinfo['localpath']),

-                         fileinfo['payloadhash'], payloadhash))

+                     raise koji.BuildError(

+                         "Downloaded rpm %s doesn't match checksum (expected: %s, got %s)" %

+                         (os.path.basename(fileinfo['localpath']),

+                          fileinfo['payloadhash'],

+                          payloadhash))

                  if not koji.util.check_sigmd5(localpath):

-                     raise koji.BuildError("Downloaded rpm %s doesn't match sigmd5" % \

-                         os.path.basename(fileinfo['localpath']))

+                     raise koji.BuildError("Downloaded rpm %s doesn't match sigmd5" %

+                                           os.path.basename(fileinfo['localpath']))

              else:

-                 self.verifyChecksum(localpath, fileinfo['checksum'], koji.CHECKSUM_TYPES[fileinfo['checksum_type']])

+                 self.verifyChecksum(localpath, fileinfo['checksum'],

+                                     koji.CHECKSUM_TYPES[fileinfo['checksum_type']])

  

          return open(localpath, 'r')

  
@@ -739,8 +766,8 @@ 

                  raise koji.BuildError('% does not exist' % local_path)

              size = os.path.getsize(local_path)

              if offset != size:

-                 raise koji.BuildError('cannot write to %s at offset %s, size is %s' % \

-                       (local_path, offset, size))

+                 raise koji.BuildError('cannot write to %s at offset %s, size is %s' %

+                                       (local_path, offset, size))

              fobj = open(local_path, 'r+')

              fobj.seek(offset)

          data = base64.b64decode(contents)
@@ -783,8 +810,9 @@ 

          if sum.hexdigest() == checksum:

              return True

          else:

-             raise koji.BuildError('%s checksum validation failed for %s, %s (computed) != %s (provided)' % \

-                   (algo, local_path, sum.hexdigest(), checksum))

+             raise koji.BuildError(

+                 '%s checksum validation failed for %s, %s (computed) != %s (provided)' %

+                 (algo, local_path, sum.hexdigest(), checksum))

  

      def closeTask(self, output):

          self.output = output
@@ -834,7 +862,7 @@ 

  

          conn = libvirt.open(None)

          clone_name = self.clone(conn, name, opts)

-         self.logger.debug('Cloned VM %s to %s',name, clone_name)

+         self.logger.debug('Cloned VM %s to %s', name, clone_name)

          try:

              vm = conn.lookupByName(clone_name)

              macaddr = self.macAddr(vm)
@@ -848,8 +876,8 @@ 

              self.logger.info('Started VM %s', clone_name)

          except libvirt.libvirtError as e:

              self.logger.error('error starting VM %s', clone_name, exc_info=True)

-             raise koji.PreBuildError('error starting VM %s, error was: %s' % \

-                   (clone_name, e))

+             raise koji.PreBuildError('error starting VM %s, error was: %s' %

+                                      (clone_name, e))

  

          start = time.time()

          while True:
@@ -866,8 +894,9 @@ 

                  if mins > timeout:

                      vm.destroy()

                      self.server.server_close()

-                     raise koji.BuildError('Task did not complete after %.2f minutes, VM %s has been destroyed' % \

-                           (mins, clone_name))

+                     raise koji.BuildError(

+                         'Task did not complete after %.2f minutes, VM %s has been destroyed' %

+                         (mins, clone_name))

              else:

                  vm.destroy()

                  self.server.server_close()
@@ -877,6 +906,7 @@ 

                  else:

                      raise koji.BuildError(self.output)

  

+ 

  class VMTaskManager(TaskManager):

      def __init__(self, options, session):

          super(VMTaskManager, self).__init__(options, session)
@@ -899,7 +929,9 @@ 

              if macaddr in self.macaddrs:

                  raise koji.PreBuildError('duplicate MAC address: %s' % macaddr)

              self.macaddrs[macaddr] = (vm_name, task_id, port)

-             self.logger.info('registered MAC address %s for VM %s (task ID %s, port %s)', macaddr, vm_name, task_id, port)

+             self.logger.info(

+                 'registered MAC address %s for VM %s (task ID %s, port %s)',

+                 macaddr, vm_name, task_id, port)

              return True

          finally:

              self.macaddr_lock.release()
@@ -935,7 +967,7 @@ 

          for node in nodelist:

              disk = node.prop('file')

              if os.path.basename(disk).startswith(VMExecTask.CLONE_PREFIX) and \

-                    disk.endswith(VMExecTask.QCOW2_EXT):

+                     disk.endswith(VMExecTask.QCOW2_EXT):

                  disks.append(disk)

          ctx.xpathFreeContext()

          doc.freeDoc()
@@ -950,7 +982,8 @@ 

          availableMB = available // 1024 // 1024

          self.logger.debug('disk space available in %s: %i MB', self.options.imagedir, availableMB)

          if availableMB < self.options.minspace:

-             self.status = 'Insufficient disk space: %i MB, %i MB required' % (availableMB, self.options.minspace)

+             self.status = 'Insufficient disk space: %i MB, %i MB required' % \

+                           (availableMB, self.options.minspace)

              self.logger.warn(self.status)

              return False

          return True
@@ -995,7 +1028,7 @@ 

              task_info = self.session.getTaskInfo(task['id'], request=True)

              vm_name = task_info['request'][0]

              try:

-                 vm = self.libvirt_conn.lookupByName(vm_name)

+                 self.libvirt_conn.lookupByName(vm_name)

              except libvirt.libvirtError:

                  # if this builder does not have the requested VM,

                  # we can't handle the task
@@ -1023,7 +1056,7 @@ 

                  if os.path.isfile(disk):

                      os.unlink(disk)

                      self.logger.debug('Removed disk file %s for VM %s', disk, vm_name)

-             except:

+             except Exception:

                  self.logger.error('Error removing disk file %s for VM %s', disk, vm_name,

                                    exc_info=True)

                  return False
@@ -1040,7 +1073,7 @@ 

          """

          vms = self.libvirt_conn.listDefinedDomains() + self.libvirt_conn.listDomainsID()

          for vm_name in vms:

-             if type(vm_name) == int:

+             if isinstance(vm_name, int):

                  vm_name = self.libvirt_conn.lookupByID(vm_name).name()

              if vm_name.startswith(VMExecTask.CLONE_PREFIX):

                  self.cleanupVM(vm_name)
@@ -1084,7 +1117,7 @@ 

  

  if __name__ == "__main__":

      koji.add_file_logger("koji", "/var/log/kojivmd.log")

-     #note we're setting logging params for all of koji*

+     # note we're setting logging params for all of koji*

      options = get_options()

      if options.debug:

          logging.getLogger("koji").setLevel(logging.DEBUG)
@@ -1097,7 +1130,7 @@ 

      if options.admin_emails:

          koji.add_mail_logger("koji", options.admin_emails)

  

-     #start a session and login

+     # start a session and login

      session_opts = koji.grab_session_options(options)

      session = koji.ClientSession(options.server, session_opts)

      if options.cert and os.path.isfile(options.cert):
@@ -1131,14 +1164,14 @@ 

              quit("Could not connect to Kerberos authentication service: '%s'" % e.args[1])

      else:

          quit("No username/password supplied and Kerberos missing or not configured")

-     #make session exclusive

+     # make session exclusive

      try:

          session.exclusiveSession(force=options.force_lock)

      except koji.AuthLockError:

          quit("Error: Unable to get lock. Trying using --force-lock")

      if not session.logged_in:

          quit("Error: Unknown login error")

-     #make sure it works

+     # make sure it works

      try:

          ret = session.echo("OK")

      except requests.exceptions.ConnectionError:
@@ -1148,7 +1181,7 @@ 

  

      # run main

      if options.daemon:

-         #detach

+         # detach

          koji.daemonize()

          main(options, session)

      elif not options.skip_main:

file modified
+247 -111
@@ -42,12 +42,16 @@ 

  from koji.util import to_list

  from kojiweb.util import _genHTML, _getValidTokens, _initValues

  

+ 

  # Convenience definition of a commonly-used sort function

- _sortbyname = lambda x: x['name']

+ def _sortbyname(x):

+     return x['name']

+ 

  

- #loggers

+ # loggers

  authlogger = logging.getLogger('koji.auth')

  

+ 

  def _setUserCookie(environ, user):

      options = environ['koji.options']

      # include the current time in the cookie so we can verify that
@@ -62,7 +66,7 @@ 

      value = "%s:%s" % (shasum.hexdigest(), value)

      cookies = six.moves.http_cookies.SimpleCookie()

      cookies['user'] = value

-     c = cookies['user']  #morsel instance

+     c = cookies['user']  # morsel instance

      c['secure'] = True

      c['path'] = os.path.dirname(environ['SCRIPT_NAME'])

      # the Cookie module treats integer expire times as relative seconds
@@ -72,15 +76,17 @@ 

      environ['koji.headers'].append(['Set-Cookie', out])

      environ['koji.headers'].append(['Cache-Control', 'no-cache="set-cookie"'])

  

+ 

  def _clearUserCookie(environ):

      cookies = six.moves.http_cookies.SimpleCookie()

      cookies['user'] = ''

-     c = cookies['user']  #morsel instance

+     c = cookies['user']  # morsel instance

      c['path'] = os.path.dirname(environ['SCRIPT_NAME'])

      c['expires'] = 0

      out = c.OutputString()

      environ['koji.headers'].append(['Set-Cookie', out])

  

+ 

  def _getUserCookie(environ):

      options = environ['koji.options']

      cookies = six.moves.http_cookies.SimpleCookie(environ.get('HTTP_COOKIE', ''))
@@ -118,6 +124,7 @@ 

      # Otherwise, cookie is valid and current

      return user

  

+ 

  def _krbLogin(environ, session, principal):

      options = environ['koji.options']

      wprinc = options['WebPrincipal']
@@ -126,6 +133,7 @@ 

      return session.krb_login(principal=wprinc, keytab=keytab,

                               ccache=ccache, proxyuser=principal)

  

+ 

  def _sslLogin(environ, session, username):

      options = environ['koji.options']

      client_cert = options['WebCert']
@@ -134,6 +142,7 @@ 

      return session.ssl_login(client_cert, None, server_ca,

                               proxyuser=username)

  

+ 

  def _assertLogin(environ):

      session = environ['koji.session']

      options = environ['koji.options']
@@ -145,9 +154,12 @@ 

                  raise koji.AuthError('could not login %s via SSL' % environ['koji.currentLogin'])

          elif options['WebPrincipal']:

              if not _krbLogin(environ, environ['koji.session'], environ['koji.currentLogin']):

-                 raise koji.AuthError('could not login using principal: %s' % environ['koji.currentLogin'])

+                 raise koji.AuthError(

+                     'could not login using principal: %s' % environ['koji.currentLogin'])

          else:

-             raise koji.AuthError('KojiWeb is incorrectly configured for authentication, contact the system administrator')

+             raise koji.AuthError(

+                 'KojiWeb is incorrectly configured for authentication, '

+                 'contact the system administrator')

  

          # verify a valid authToken was passed in to avoid CSRF

          authToken = environ['koji.form'].getfirst('a', '')
@@ -159,12 +171,14 @@ 

              # their authToken is likely expired

              # send them back to the page that brought them here so they

              # can re-click the link with a valid authToken

-             _redirectBack(environ, page=None, forceSSL=(_getBaseURL(environ).startswith('https://')))

+             _redirectBack(environ, page=None,

+                           forceSSL=(_getBaseURL(environ).startswith('https://')))

              assert False  # pragma: no cover

      else:

          _redirect(environ, 'login')

          assert False  # pragma: no cover

  

+ 

  def _getServer(environ):

      opts = environ['koji.options']

      s_opts = {'krbservice': opts['KrbService'],
@@ -178,7 +192,8 @@ 

      if environ['koji.currentLogin']:

          environ['koji.currentUser'] = session.getUser(environ['koji.currentLogin'])

          if not environ['koji.currentUser']:

-             raise koji.AuthError('could not get user for principal: %s' % environ['koji.currentLogin'])

+             raise koji.AuthError(

+                 'could not get user for principal: %s' % environ['koji.currentLogin'])

          _setUserCookie(environ, environ['koji.currentLogin'])

      else:

          environ['koji.currentUser'] = None
@@ -186,23 +201,27 @@ 

      environ['koji.session'] = session

      return session

  

+ 

  def _construct_url(environ, page):

      port = environ['SERVER_PORT']

      host = environ['SERVER_NAME']

      url_scheme = environ['wsgi.url_scheme']

      if (url_scheme == 'https' and port == '443') or \

-         (url_scheme == 'http' and port == '80'):

+             (url_scheme == 'http' and port == '80'):

          return "%s://%s%s" % (url_scheme, host, page)

      return "%s://%s:%s%s" % (url_scheme, host, port, page)

  

+ 

  def _getBaseURL(environ):

      base = environ['SCRIPT_NAME']

      return _construct_url(environ, base)

  

+ 

  def _redirect(environ, location):

      environ['koji.redirect'] = location

      raise ServerRedirect

  

+ 

  def _redirectBack(environ, page, forceSSL):

      if page:

          # We'll work with the page we were given
@@ -227,6 +246,7 @@ 

      # and redirect to the page

      _redirect(environ, page)

  

+ 

  def login(environ, page=None):

      session = _getServer(environ)

      options = environ['koji.options']
@@ -256,7 +276,9 @@ 

      elif options['WebPrincipal']:

          principal = environ.get('REMOTE_USER')

          if not principal:

-             raise koji.AuthError('configuration error: mod_auth_gssapi should have performed authentication before presenting this page')

+             raise koji.AuthError(

+                 'configuration error: mod_auth_gssapi should have performed authentication before '

+                 'presenting this page')

  

          if not _krbLogin(environ, session, principal):

              raise koji.AuthError('could not login using principal: %s' % principal)
@@ -264,12 +286,15 @@ 

          username = principal

          authlogger.info('Successful Kerberos authentication by %s', username)

      else:

-         raise koji.AuthError('KojiWeb is incorrectly configured for authentication, contact the system administrator')

+         raise koji.AuthError(

+             'KojiWeb is incorrectly configured for authentication, contact the system '

+             'administrator')

  

      _setUserCookie(environ, username)

      # To protect the session cookie, we must forceSSL

      _redirectBack(environ, page, forceSSL=True)

  

+ 

  def logout(environ, page=None):

      user = _getUserCookie(environ)

      _clearUserCookie(environ)
@@ -278,6 +303,7 @@ 

  

      _redirectBack(environ, page, forceSSL=False)

  

+ 

  def index(environ, packageOrder='package_name', packageStart=None):

      values = _initValues(environ)

      server = _getServer(environ)
@@ -305,8 +331,10 @@ 

      values['order'] = '-id'

  

      if user:

-         kojiweb.util.paginateResults(server, values, 'listPackages', kw={'userID': user['id'], 'with_dups': True},

-                                      start=packageStart, dataName='packages', prefix='package', order=packageOrder, pageSize=10)

+         kojiweb.util.paginateResults(server, values, 'listPackages',

+                                      kw={'userID': user['id'], 'with_dups': True},

+                                      start=packageStart, dataName='packages', prefix='package',

+                                      order=packageOrder, pageSize=10)

  

          notifs = server.getBuildNotifications(user['id'])

          notifs.sort(key=lambda x: x['id'])
@@ -326,13 +354,14 @@ 

  

      return _genHTML(environ, 'index.chtml')

  

+ 

  def notificationedit(environ, notificationID):

      server = _getServer(environ)

      _assertLogin(environ)

  

      notificationID = int(notificationID)

      notification = server.getBuildNotification(notificationID)

-     if notification == None:

+     if notification is None:

          raise koji.GenericError('no notification with ID: %i' % notificationID)

  

      form = environ['koji.form']
@@ -371,6 +400,7 @@ 

  

          return _genHTML(environ, 'notificationedit.chtml')

  

+ 

  def notificationcreate(environ):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -415,6 +445,7 @@ 

  

          return _genHTML(environ, 'notificationedit.chtml')

  

+ 

  def notificationdelete(environ, notificationID):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -428,6 +459,7 @@ 

  

      _redirect(environ, 'index')

  

+ 

  # All Tasks

  _TASKS = ['build',

            'buildSRPMFromSCM',
@@ -459,11 +491,16 @@ 

            'livemedia',

            'createLiveMedia']

  # Tasks that can exist without a parent

- _TOPLEVEL_TASKS = ['build', 'buildNotification', 'chainbuild', 'maven', 'chainmaven', 'wrapperRPM', 'winbuild', 'newRepo', 'distRepo', 'tagBuild', 'tagNotification', 'waitrepo', 'livecd', 'appliance', 'image', 'livemedia']

+ _TOPLEVEL_TASKS = ['build', 'buildNotification', 'chainbuild', 'maven', 'chainmaven', 'wrapperRPM',

+                    'winbuild', 'newRepo', 'distRepo', 'tagBuild', 'tagNotification', 'waitrepo',

+                    'livecd', 'appliance', 'image', 'livemedia']

  # Tasks that can have children

- _PARENT_TASKS = ['build', 'chainbuild', 'maven', 'chainmaven', 'winbuild', 'newRepo', 'distRepo', 'wrapperRPM', 'livecd', 'appliance', 'image', 'livemedia']

+ _PARENT_TASKS = ['build', 'chainbuild', 'maven', 'chainmaven', 'winbuild', 'newRepo', 'distRepo',

+                  'wrapperRPM', 'livecd', 'appliance', 'image', 'livemedia']

+ 

  

- def tasks(environ, owner=None, state='active', view='tree', method='all', hostID=None, channelID=None, start=None, order='-id'):

+ def tasks(environ, owner=None, state='active', view='tree', method='all', hostID=None,

+           channelID=None, start=None, order='-id'):

      values = _initValues(environ, 'Tasks', 'tasks')

      server = _getServer(environ)

  
@@ -517,7 +554,9 @@ 

          opts['parent'] = None

  

      if state == 'active':

-         opts['state'] = [koji.TASK_STATES['FREE'], koji.TASK_STATES['OPEN'], koji.TASK_STATES['ASSIGNED']]

+         opts['state'] = [koji.TASK_STATES['FREE'],

+                          koji.TASK_STATES['OPEN'],

+                          koji.TASK_STATES['ASSIGNED']]

      elif state == 'all':

          pass

      else:
@@ -566,6 +605,7 @@ 

  

      return _genHTML(environ, 'tasks.chtml')

  

+ 

  def taskinfo(environ, taskID):

      server = _getServer(environ)

      values = _initValues(environ, 'Task Info', 'tasks')
@@ -615,7 +655,7 @@ 

      values['estCompletion'] = None

      if taskBuild and taskBuild['state'] == koji.BUILD_STATES['BUILDING']:

          avgDuration = server.getAverageBuildDuration(taskBuild['package_id'])

-         if avgDuration != None:

+         if avgDuration is not None:

              avgDelta = datetime.timedelta(seconds=avgDuration)

              startTime = datetime.datetime.fromtimestamp(taskBuild['creation_ts'])

              values['estCompletion'] = startTime + avgDelta
@@ -674,7 +714,7 @@ 

      if task['state'] in (koji.TASK_STATES['CLOSED'], koji.TASK_STATES['FAILED']):

          try:

              result = server.getTaskResult(task['id'])

-         except:

+         except Exception:

              excClass, exc = sys.exc_info()[:2]

              values['result'] = exc

              values['excClass'] = excClass
@@ -683,7 +723,7 @@ 

              values['excClass'] = None

              if task['method'] == 'buildContainer' and 'koji_builds' in result:

                  values['taskBuilds'] = [

-                         server.getBuild(int(buildID)) for buildID in result['koji_builds']]

+                     server.getBuild(int(buildID)) for buildID in result['koji_builds']]

      else:

          values['result'] = None

          values['excClass'] = None
@@ -697,7 +737,7 @@ 

      pathinfo = koji.PathInfo(topdir=topurl)

      values['pathinfo'] = pathinfo

  

-     paths = [] # (volume, relpath) tuples

+     paths = []  # (volume, relpath) tuples

      for relname, volumes in six.iteritems(server.listTaskOutput(task['id'], all_volumes=True)):

          paths += [(volume, relname) for volume in volumes]

      values['output'] = sorted(paths, key=_sortByExtAndName)
@@ -708,10 +748,11 @@ 

  

      try:

          values['params_parsed'] = _genHTML(environ, 'taskinfo_params.chtml')

-     except:

+     except Exception:

          values['params_parsed'] = None

      return _genHTML(environ, 'taskinfo.chtml')

  

+ 

  def taskstatus(environ, taskID):

      server = _getServer(environ)

  
@@ -726,6 +767,7 @@ 

              output += '%s:%s:%s\n' % (volume, filename, file_stats['st_size'])

      return output

  

+ 

  def resubmittask(environ, taskID):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -734,6 +776,7 @@ 

      newTaskID = server.resubmitTask(taskID)

      _redirect(environ, 'taskinfo?taskID=%i' % newTaskID)

  

+ 

  def canceltask(environ, taskID):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -742,11 +785,13 @@ 

      server.cancelTask(taskID)

      _redirect(environ, 'taskinfo?taskID=%i' % taskID)

  

+ 

  def _sortByExtAndName(item):

      """Sort filename tuples key function, first by extension, and then by name."""

      kRoot, kExt = os.path.splitext(os.path.basename(item[1]))

      return (kExt, kRoot)

  

+ 

  def getfile(environ, taskID, name, volume='DEFAULT', offset=None, size=None):

      server = _getServer(environ)

      taskID = int(taskID)
@@ -790,7 +835,7 @@ 

          if size > (file_size - offset):

              size = file_size - offset

  

-     #environ['koji.headers'].append(['Content-Length', str(size)])

+     # environ['koji.headers'].append(['Content-Length', str(size)])

      return _chunk_file(server, environ, taskID, name, offset, size, volume)

  

  
@@ -802,7 +847,8 @@ 

          chunk_size = 1048576

          if remaining < chunk_size:

              chunk_size = remaining

-         content = server.downloadTaskOutput(taskID, name, offset=offset, size=chunk_size, volume=volume)

+         content = server.downloadTaskOutput(taskID, name,

+                                             offset=offset, size=chunk_size, volume=volume)

          if not content:

              break

          yield content
@@ -810,16 +856,17 @@ 

          offset += content_length

          remaining -= content_length

  

+ 

  def tags(environ, start=None, order=None, childID=None):

      values = _initValues(environ, 'Tags', 'tags')

      server = _getServer(environ)

  

-     if order == None:

+     if order is None:

          order = 'name'

      values['order'] = order

  

      kojiweb.util.paginateMethod(server, values, 'listTags', kw=None,

-                                  start=start, dataName='tags', prefix='tag', order=order)

+                                 start=start, dataName='tags', prefix='tag', order=order)

  

      if environ['koji.currentUser']:

          values['perms'] = server.getUserPerms(environ['koji.currentUser']['id'])
@@ -830,20 +877,23 @@ 

  

      return _genHTML(environ, 'tags.chtml')

  

+ 

  _PREFIX_CHARS = [chr(char) for char in list(range(48, 58)) + list(range(97, 123))]

  

- def packages(environ, tagID=None, userID=None, order='package_name', start=None, prefix=None, inherited='1'):

+ 

+ def packages(environ, tagID=None, userID=None, order='package_name', start=None, prefix=None,

+              inherited='1'):

      values = _initValues(environ, 'Packages', 'packages')

      server = _getServer(environ)

      tag = None

-     if tagID != None:

+     if tagID is not None:

          if tagID.isdigit():

              tagID = int(tagID)

          tag = server.getTag(tagID, strict=True)

      values['tagID'] = tagID

      values['tag'] = tag

      user = None

-     if userID != None:

+     if userID is not None:

          if userID.isdigit():

              userID = int(userID)

          user = server.getUser(userID, strict=True)
@@ -859,21 +909,26 @@ 

      values['inherited'] = inherited

  

      kojiweb.util.paginateMethod(server, values, 'listPackages',

-                                     kw={'tagID': tagID, 'userID': userID, 'prefix': prefix, 'inherited': bool(inherited)},

-                                     start=start, dataName='packages', prefix='package', order=order)

+                                 kw={'tagID': tagID,

+                                     'userID': userID,

+                                     'prefix': prefix,

+                                     'inherited': bool(inherited)},

+                                 start=start, dataName='packages', prefix='package', order=order)

  

      values['chars'] = _PREFIX_CHARS

  

      return _genHTML(environ, 'packages.chtml')

  

- def packageinfo(environ, packageID, tagOrder='name', tagStart=None, buildOrder='-completion_time', buildStart=None):

+ 

+ def packageinfo(environ, packageID, tagOrder='name', tagStart=None, buildOrder='-completion_time',

+                 buildStart=None):

      values = _initValues(environ, 'Package Info', 'packages')

      server = _getServer(environ)

  

      if packageID.isdigit():

          packageID = int(packageID)

      package = server.getPackage(packageID)

-     if package == None:

+     if package is None:

          raise koji.GenericError('invalid package ID: %s' % packageID)

  

      values['title'] = package['name'] + ' | Package Info'
@@ -884,11 +939,14 @@ 

      kojiweb.util.paginateMethod(server, values, 'listTags', kw={'package': package['id']},

                                  start=tagStart, dataName='tags', prefix='tag', order=tagOrder)

      kojiweb.util.paginateMethod(server, values, 'listBuilds', kw={'packageID': package['id']},

-                                 start=buildStart, dataName='builds', prefix='build', order=buildOrder)

+                                 start=buildStart, dataName='builds', prefix='build',

+                                 order=buildOrder)

  

      return _genHTML(environ, 'packageinfo.chtml')

  

- def taginfo(environ, tagID, all='0', packageOrder='package_name', packageStart=None, buildOrder='-completion_time', buildStart=None, childID=None):

+ 

+ def taginfo(environ, tagID, all='0', packageOrder='package_name', packageStart=None,

+             buildOrder='-completion_time', buildStart=None, childID=None):

      values = _initValues(environ, 'Tag Info', 'tags')

      server = _getServer(environ)

  
@@ -929,7 +987,7 @@ 

      values['external_repos'] = server.getExternalRepoList(tag['id'])

  

      child = None

-     if childID != None:

+     if childID is not None:

          child = server.getTag(int(childID), strict=True)

      values['child'] = child

  
@@ -943,6 +1001,7 @@ 

  

      return _genHTML(environ, 'taginfo.chtml')

  

+ 

  def tagcreate(environ):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -978,6 +1037,7 @@ 

  

          return _genHTML(environ, 'tagedit.chtml')

  

+ 

  def tagedit(environ, tagID):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -986,7 +1046,7 @@ 

  

      tagID = int(tagID)

      tag = server.getTag(tagID)

-     if tag == None:

+     if tag is None:

          raise koji.GenericError('no tag with ID: %i' % tagID)

  

      form = environ['koji.form']
@@ -1020,19 +1080,21 @@ 

  

          return _genHTML(environ, 'tagedit.chtml')

  

+ 

  def tagdelete(environ, tagID):

      server = _getServer(environ)

      _assertLogin(environ)

  

      tagID = int(tagID)

      tag = server.getTag(tagID)

-     if tag == None:

+     if tag is None:

          raise koji.GenericError('no tag with ID: %i' % tagID)

  

      server.deleteTag(tag['id'])

  

      _redirect(environ, 'tags')

  

+ 

  def tagparent(environ, tagID, parentID, action):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -1071,14 +1133,16 @@ 

                  if datum['priority'] > maxPriority:

                      maxPriority = datum['priority']

              values['maxPriority'] = maxPriority

-             inheritanceData = [datum for datum in  inheritanceData \

+             inheritanceData = [datum for datum in inheritanceData

                                 if datum['parent_id'] == parent['id']]

              if len(inheritanceData) == 0:

                  values['inheritanceData'] = None

              elif len(inheritanceData) == 1:

                  values['inheritanceData'] = inheritanceData[0]

              else:

-                 raise koji.GenericError('tag %i has tag %i listed as a parent more than once' % (tag['id'], parent['id']))

+                 raise koji.GenericError(

+                     'tag %i has tag %i listed as a parent more than once' %

+                     (tag['id'], parent['id']))

  

              return _genHTML(environ, 'tagparent.chtml')

      elif action == 'remove':
@@ -1096,6 +1160,7 @@ 

  

      _redirect(environ, 'taginfo?tagID=%i' % tag['id'])

  

+ 

  def externalrepoinfo(environ, extrepoID):

      values = _initValues(environ, 'External Repo Info', 'tags')

      server = _getServer(environ)
@@ -1111,6 +1176,7 @@ 

  

      return _genHTML(environ, 'externalrepoinfo.chtml')

  

+ 

  def buildinfo(environ, buildID):

      values = _initValues(environ, 'Build Info', 'builds')

      server = _getServer(environ)
@@ -1135,7 +1201,8 @@ 

          for archive in archives:

              if btype == 'maven':

                  archive['display'] = archive['filename']

-                 archive['dl_url'] = '/'.join([pathinfo.mavenbuild(build), pathinfo.mavenfile(archive)])

+                 archive['dl_url'] = '/'.join([pathinfo.mavenbuild(build),

+                                               pathinfo.mavenfile(archive)])

              elif btype == 'win':

                  archive['display'] = pathinfo.winfile(archive)

                  archive['dl_url'] = '/'.join([pathinfo.winbuild(build), pathinfo.winfile(archive)])
@@ -1171,7 +1238,8 @@ 

          # get the summary, description, and changelogs from the built srpm

          # if the build is not yet complete

          if build['state'] != koji.BUILD_STATES['COMPLETE']:

-             srpm_tasks = server.listTasks(opts={'parent': task['id'], 'method': 'buildSRPMFromSCM'})

+             srpm_tasks = server.listTasks(opts={'parent': task['id'],

+                                                 'method': 'buildSRPMFromSCM'})

              if srpm_tasks:

                  srpm_task = srpm_tasks[0]

                  if srpm_task['state'] == koji.TASK_STATES['CLOSED']:
@@ -1181,12 +1249,14 @@ 

                              srpm_path = output

                              break

                      if srpm_path:

-                         srpm_headers = server.getRPMHeaders(taskID=srpm_task['id'], filepath=srpm_path,

+                         srpm_headers = server.getRPMHeaders(taskID=srpm_task['id'],

+                                                             filepath=srpm_path,

                                                              headers=['summary', 'description'])

                          if srpm_headers:

                              values['summary'] = koji.fixEncoding(srpm_headers['summary'])

                              values['description'] = koji.fixEncoding(srpm_headers['description'])

-                         changelog = server.getChangelogEntries(taskID=srpm_task['id'], filepath=srpm_path)

+                         changelog = server.getChangelogEntries(taskID=srpm_task['id'],

+                                                                filepath=srpm_path)

                          if changelog:

                              values['changelog'] = changelog

      else:
@@ -1226,7 +1296,7 @@ 

              values['start_time'] = task['start_time']

      if build['state'] == koji.BUILD_STATES['BUILDING']:

          avgDuration = server.getAverageBuildDuration(build['package_id'])

-         if avgDuration != None:

+         if avgDuration is not None:

              avgDelta = datetime.timedelta(seconds=avgDuration)

              startTime = datetime.datetime.fromtimestamp(build['creation_ts'])

              values['estCompletion'] = startTime + avgDelta
@@ -1236,7 +1306,9 @@ 

      values['pathinfo'] = pathinfo

      return _genHTML(environ, 'buildinfo.chtml')

  

- def builds(environ, userID=None, tagID=None, packageID=None, state=None, order='-build_id', start=None, prefix=None, inherited='1', latest='1', type=None):

+ 

+ def builds(environ, userID=None, tagID=None, packageID=None, state=None, order='-build_id',

+            start=None, prefix=None, inherited='1', latest='1', type=None):

      values = _initValues(environ, 'Builds', 'builds')

      server = _getServer(environ)

  
@@ -1271,7 +1343,7 @@ 

  

      if state == 'all':

          state = None

-     elif state != None:

+     elif state is not None:

          state = int(state)

      values['state'] = state

  
@@ -1304,21 +1376,27 @@ 

  

      if tag:

          # don't need to consider 'state' here, since only completed builds would be tagged

-         kojiweb.util.paginateResults(server, values, 'listTagged', kw={'tag': tag['id'], 'package': (package and package['name'] or None),

-                                                                        'owner': (user and user['name'] or None),

-                                                                        'type': type,

-                                                                        'inherit': bool(inherited), 'latest': bool(latest), 'prefix': prefix},

+         kojiweb.util.paginateResults(server, values, 'listTagged',

+                                      kw={'tag': tag['id'],

+                                          'package': (package and package['name'] or None),

+                                          'owner': (user and user['name'] or None),

+                                          'type': type,

+                                          'inherit': bool(inherited), 'latest': bool(latest),

+                                          'prefix': prefix},

                                       start=start, dataName='builds', prefix='build', order=order)

      else:

-         kojiweb.util.paginateMethod(server, values, 'listBuilds', kw={'userID': (user and user['id'] or None), 'packageID': (package and package['id'] or None),

-                                                                       'type': type,

-                                                                       'state': state, 'prefix': prefix},

+         kojiweb.util.paginateMethod(server, values, 'listBuilds',

+                                     kw={'userID': (user and user['id'] or None),

+                                         'packageID': (package and package['id'] or None),

+                                         'type': type,

+                                         'state': state, 'prefix': prefix},

                                      start=start, dataName='builds', prefix='build', order=order)

  

      values['chars'] = _PREFIX_CHARS

  

      return _genHTML(environ, 'builds.chtml')

  

+ 

  def users(environ, order='name', start=None, prefix=None):

      values = _initValues(environ, 'Users', 'users')

      server = _getServer(environ)
@@ -1338,7 +1416,9 @@ 

  

      return _genHTML(environ, 'users.chtml')

  

- def userinfo(environ, userID, packageOrder='package_name', packageStart=None, buildOrder='-completion_time', buildStart=None):

+ 

+ def userinfo(environ, userID, packageOrder='package_name', packageStart=None,

+              buildOrder='-completion_time', buildStart=None):

      values = _initValues(environ, 'User Info', 'users')

      server = _getServer(environ)

  
@@ -1350,17 +1430,23 @@ 

  

      values['user'] = user

      values['userID'] = userID

-     values['taskCount'] = server.listTasks(opts={'owner': user['id'], 'parent': None}, queryOpts={'countOnly': True})

+     values['taskCount'] = server.listTasks(opts={'owner': user['id'], 'parent': None},

+                                            queryOpts={'countOnly': True})

  

-     kojiweb.util.paginateResults(server, values, 'listPackages', kw={'userID': user['id'], 'with_dups': True},

-                                  start=packageStart, dataName='packages', prefix='package', order=packageOrder, pageSize=10)

+     kojiweb.util.paginateResults(server, values, 'listPackages',

+                                  kw={'userID': user['id'], 'with_dups': True},

+                                  start=packageStart, dataName='packages', prefix='package',

+                                  order=packageOrder, pageSize=10)

  

      kojiweb.util.paginateMethod(server, values, 'listBuilds', kw={'userID': user['id']},

-                                 start=buildStart, dataName='builds', prefix='build', order=buildOrder, pageSize=10)

+                                 start=buildStart, dataName='builds', prefix='build',

+                                 order=buildOrder, pageSize=10)

  

      return _genHTML(environ, 'userinfo.chtml')

  

- def rpminfo(environ, rpmID, fileOrder='name', fileStart=None, buildrootOrder='-id', buildrootStart=None):

+ 

+ def rpminfo(environ, rpmID, fileOrder='name', fileStart=None, buildrootOrder='-id',

+             buildrootStart=None):

      values = _initValues(environ, 'RPM Info', 'builds')

      server = _getServer(environ)

  
@@ -1369,15 +1455,15 @@ 

  

      values['title'] = '%(name)s-%%s%(version)s-%(release)s.%(arch)s.rpm' % rpm + ' | RPM Info'

      epochStr = ''

-     if rpm['epoch'] != None:

+     if rpm['epoch'] is not None:

          epochStr = '%s:' % rpm['epoch']

      values['title'] = values['title'] % epochStr

  

      build = None

-     if rpm['build_id'] != None:

+     if rpm['build_id'] is not None:

          build = server.getBuild(rpm['build_id'])

      builtInRoot = None

-     if rpm['buildroot_id'] != None:

+     if rpm['buildroot_id'] is not None:

          builtInRoot = server.getBuildroot(rpm['buildroot_id'])

      if rpm['external_repo_id'] == 0:

          dep_names = {
@@ -1398,8 +1484,11 @@ 

          values['summary'] = koji.fixEncoding(headers.get('summary'))

          values['description'] = koji.fixEncoding(headers.get('description'))

          values['license'] = koji.fixEncoding(headers.get('license'))

-     buildroots = kojiweb.util.paginateMethod(server, values, 'listBuildroots', kw={'rpmID': rpm['id']},

-                                              start=buildrootStart, dataName='buildroots', prefix='buildroot',

+     buildroots = kojiweb.util.paginateMethod(server, values, 'listBuildroots',

+                                              kw={'rpmID': rpm['id']},

+                                              start=buildrootStart,

+                                              dataName='buildroots',

+                                              prefix='buildroot',

                                               order=buildrootOrder)

  

      values['rpmID'] = rpmID
@@ -1413,7 +1502,9 @@ 

  

      return _genHTML(environ, 'rpminfo.chtml')

  

- def archiveinfo(environ, archiveID, fileOrder='name', fileStart=None, buildrootOrder='-id', buildrootStart=None):

+ 

+ def archiveinfo(environ, archiveID, fileOrder='name', fileStart=None, buildrootOrder='-id',

+                 buildrootStart=None):

      values = _initValues(environ, 'Archive Info', 'builds')

      server = _getServer(environ)

  
@@ -1428,12 +1519,15 @@ 

      if 'relpath' in archive:

          wininfo = True

      builtInRoot = None

-     if archive['buildroot_id'] != None:

+     if archive['buildroot_id'] is not None:

          builtInRoot = server.getBuildroot(archive['buildroot_id'])

      kojiweb.util.paginateMethod(server, values, 'listArchiveFiles', args=[archive['id']],

                                  start=fileStart, dataName='files', prefix='file', order=fileOrder)

-     buildroots = kojiweb.util.paginateMethod(server, values, 'listBuildroots', kw={'archiveID': archive['id']},

-                                              start=buildrootStart, dataName='buildroots', prefix='buildroot',

+     buildroots = kojiweb.util.paginateMethod(server, values, 'listBuildroots',

+                                              kw={'archiveID': archive['id']},

+                                              start=buildrootStart,

+                                              dataName='buildroots',

+                                              prefix='buildroot',

                                               order=buildrootOrder)

  

      values['title'] = archive['filename'] + ' | Archive Info'
@@ -1446,11 +1540,13 @@ 

      values['wininfo'] = wininfo

      values['builtInRoot'] = builtInRoot

      values['buildroots'] = buildroots

-     values['show_rpm_components'] = server.listRPMs(imageID=archive['id'], queryOpts={'limit':1})

-     values['show_archive_components'] = server.listArchives(imageID=archive['id'], queryOpts={'limit':1})

+     values['show_rpm_components'] = server.listRPMs(imageID=archive['id'], queryOpts={'limit': 1})

+     values['show_archive_components'] = server.listArchives(imageID=archive['id'],

+                                                             queryOpts={'limit': 1})

  

      return _genHTML(environ, 'archiveinfo.chtml')

  

+ 

  def fileinfo(environ, filename, rpmID=None, archiveID=None):

      values = _initValues(environ, 'File Info', 'builds')

      server = _getServer(environ)
@@ -1485,13 +1581,14 @@ 

  

      return _genHTML(environ, 'fileinfo.chtml')

  

+ 

  def cancelbuild(environ, buildID):

      server = _getServer(environ)

      _assertLogin(environ)

  

      buildID = int(buildID)

      build = server.getBuild(buildID)

-     if build == None:

+     if build is None:

          raise koji.GenericError('unknown build ID: %i' % buildID)

  

      result = server.cancelBuild(build['id'])
@@ -1500,6 +1597,7 @@ 

  

      _redirect(environ, 'buildinfo?buildID=%i' % build['id'])

  

+ 

  def hosts(environ, state='enabled', start=None, order='name'):

      values = _initValues(environ, 'Hosts', 'hosts')

      server = _getServer(environ)
@@ -1530,6 +1628,7 @@ 

  

      return _genHTML(environ, 'hosts.chtml')

  

+ 

  def hostinfo(environ, hostID=None, userID=None):

      values = _initValues(environ, 'Host Info', 'hosts')

      server = _getServer(environ)
@@ -1538,7 +1637,7 @@ 

          if hostID.isdigit():

              hostID = int(hostID)

          host = server.getHost(hostID)

-         if host == None:

+         if host is None:

              raise koji.GenericError('invalid host ID: %s' % hostID)

      elif userID:

          userID = int(userID)
@@ -1546,7 +1645,7 @@ 

          host = None

          if hosts:

              host = hosts[0]

-         if host == None:

+         if host is None:

              raise koji.GenericError('invalid host ID: %s' % userID)

      else:

          raise koji.GenericError('hostID or userID must be provided')
@@ -1556,7 +1655,8 @@ 

      channels = server.listChannels(host['id'])

      channels.sort(key=_sortbyname)

      buildroots = server.listBuildroots(hostID=host['id'],

-                                        state=[state[1] for state in koji.BR_STATES.items() if state[0] != 'EXPIRED'])

+                                        state=[state[1] for state in koji.BR_STATES.items()

+                                               if state[0] != 'EXPIRED'])

      buildroots.sort(key=lambda x: x['create_event_time'], reverse=True)

  

      values['host'] = host
@@ -1570,13 +1670,14 @@ 

  

      return _genHTML(environ, 'hostinfo.chtml')

  

+ 

  def hostedit(environ, hostID):

      server = _getServer(environ)

      _assertLogin(environ)

  

      hostID = int(hostID)

      host = server.getHost(hostID)

-     if host == None:

+     if host is None:

          raise koji.GenericError('no host with ID: %i' % hostID)

  

      form = environ['koji.form']
@@ -1619,6 +1720,7 @@ 

  

          return _genHTML(environ, 'hostedit.chtml')

  

+ 

  def disablehost(environ, hostID):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -1629,6 +1731,7 @@ 

  

      _redirect(environ, 'hostinfo?hostID=%i' % host['id'])

  

+ 

  def enablehost(environ, hostID):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -1639,21 +1742,22 @@ 

  

      _redirect(environ, 'hostinfo?hostID=%i' % host['id'])

  

+ 

  def channelinfo(environ, channelID):

      values = _initValues(environ, 'Channel Info', 'hosts')

      server = _getServer(environ)

  

      channelID = int(channelID)

      channel = server.getChannel(channelID)

-     if channel == None:

+     if channel is None:

          raise koji.GenericError('invalid channel ID: %i' % channelID)

  

      values['title'] = channel['name'] + ' | Channel Info'

  

      states = [koji.TASK_STATES[s] for s in ('FREE', 'OPEN', 'ASSIGNED')]

      values['taskCount'] = \

-             server.listTasks(opts={'channel_id': channelID, 'state': states},

-                              queryOpts={'countOnly': True})

+         server.listTasks(opts={'channel_id': channelID, 'state': states},

+                          queryOpts={'countOnly': True})

  

      hosts = server.listHosts(channelID=channelID)

      hosts.sort(key=_sortbyname)
@@ -1665,14 +1769,16 @@ 

  

      return _genHTML(environ, 'channelinfo.chtml')

  

- def buildrootinfo(environ, buildrootID, builtStart=None, builtOrder=None, componentStart=None, componentOrder=None):

+ 

+ def buildrootinfo(environ, buildrootID, builtStart=None, builtOrder=None, componentStart=None,

+                   componentOrder=None):

      values = _initValues(environ, 'Buildroot Info', 'hosts')

      server = _getServer(environ)

  

      buildrootID = int(buildrootID)

      buildroot = server.getBuildroot(buildrootID)

  

-     if buildroot == None:

+     if buildroot is None:

          raise koji.GenericError('unknown buildroot ID: %i' % buildrootID)

  

      elif buildroot['br_type'] == koji.BR_TYPES['STANDARD']:
@@ -1688,6 +1794,7 @@ 

  

      return _genHTML(environ, template)

  

+ 

  def rpmlist(environ, type, buildrootID=None, imageID=None, start=None, order='nvr'):

      """

      rpmlist requires a buildrootID OR an imageID to be passed in. From one
@@ -1698,11 +1805,11 @@ 

      values = _initValues(environ, 'RPM List', 'hosts')

      server = _getServer(environ)

  

-     if buildrootID != None:

+     if buildrootID is not None:

          buildrootID = int(buildrootID)

          buildroot = server.getBuildroot(buildrootID)

          values['buildroot'] = buildroot

-         if buildroot == None:

+         if buildroot is None:

              raise koji.GenericError('unknown buildroot ID: %i' % buildrootID)

  

          if type == 'component':
@@ -1718,13 +1825,13 @@ 

          else:

              raise koji.GenericError('unrecognized type of rpmlist')

  

-     elif imageID != None:

+     elif imageID is not None:

          imageID = int(imageID)

          values['image'] = server.getArchive(imageID)

          # If/When future image types are supported, add elifs here if needed.

          if type == 'image':

              kojiweb.util.paginateMethod(server, values, 'listRPMs',

-                                         kw={'imageID': imageID}, \

+                                         kw={'imageID': imageID},

                                          start=start, dataName='rpms',

                                          prefix='rpm', order=order)

          else:
@@ -1739,6 +1846,7 @@ 

  

      return _genHTML(environ, 'rpmlist.chtml')

  

+ 

  def archivelist(environ, type, buildrootID=None, imageID=None, start=None, order='filename'):

      values = _initValues(environ, 'Archive List', 'hosts')

      server = _getServer(environ)
@@ -1748,15 +1856,19 @@ 

          buildroot = server.getBuildroot(buildrootID)

          values['buildroot'] = buildroot

  

-         if buildroot == None:

+         if buildroot is None:

              raise koji.GenericError('unknown buildroot ID: %i' % buildrootID)

  

          if type == 'component':

-             kojiweb.util.paginateMethod(server, values, 'listArchives', kw={'componentBuildrootID': buildroot['id']},

-                                         start=start, dataName='archives', prefix='archive', order=order)

+             kojiweb.util.paginateMethod(server, values, 'listArchives',

+                                         kw={'componentBuildrootID': buildroot['id']},

+                                         start=start, dataName='archives', prefix='archive',

+                                         order=order)

          elif type == 'built':

-             kojiweb.util.paginateMethod(server, values, 'listArchives', kw={'buildrootID': buildroot['id']},

-                                     start=start, dataName='archives', prefix='archive', order=order)

+             kojiweb.util.paginateMethod(server, values, 'listArchives',

+                                         kw={'buildrootID': buildroot['id']},

+                                         start=start, dataName='archives', prefix='archive',

+                                         order=order)

          else:

              raise koji.GenericError('unrecognized type of archivelist')

      elif imageID is not None:
@@ -1765,7 +1877,8 @@ 

          # If/When future image types are supported, add elifs here if needed.

          if type == 'image':

              kojiweb.util.paginateMethod(server, values, 'listArchives', kw={'imageID': imageID},

-                                         start=start, dataName='archives', prefix='archive', order=order)

+                                         start=start, dataName='archives', prefix='archive',

+                                         order=order)

          else:

              raise koji.GenericError('unrecognized type of archivelist')

      else:
@@ -1777,6 +1890,7 @@ 

  

      return _genHTML(environ, 'archivelist.chtml')

  

+ 

  def buildtargets(environ, start=None, order='name'):

      values = _initValues(environ, 'Build Targets', 'buildtargets')

      server = _getServer(environ)
@@ -1792,18 +1906,19 @@ 

  

      return _genHTML(environ, 'buildtargets.chtml')

  

+ 

  def buildtargetinfo(environ, targetID=None, name=None):

      values = _initValues(environ, 'Build Target Info', 'buildtargets')

      server = _getServer(environ)

  

      target = None

-     if targetID != None:

+     if targetID is not None:

          targetID = int(targetID)

          target = server.getBuildTarget(targetID)

-     elif name != None:

+     elif name is not None:

          target = server.getBuildTarget(name)

  

-     if target == None:

+     if target is None:

          raise koji.GenericError('invalid build target: %s' % (targetID or name))

  

      values['title'] = target['name'] + ' | Build Target Info'
@@ -1821,6 +1936,7 @@ 

  

      return _genHTML(environ, 'buildtargetinfo.chtml')

  

+ 

  def buildtargetedit(environ, targetID):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -1828,7 +1944,7 @@ 

      targetID = int(targetID)

  

      target = server.getBuildTarget(targetID)

-     if target == None:

+     if target is None:

          raise koji.GenericError('invalid build target: %s' % targetID)

  

      form = environ['koji.form']
@@ -1837,12 +1953,12 @@ 

          name = form.getfirst('name')

          buildTagID = int(form.getfirst('buildTag'))

          buildTag = server.getTag(buildTagID)

-         if buildTag == None:

+         if buildTag is None:

              raise koji.GenericError('invalid tag ID: %i' % buildTagID)

  

          destTagID = int(form.getfirst('destTag'))

          destTag = server.getTag(destTagID)

-         if destTag == None:

+         if destTag is None:

              raise koji.GenericError('invalid tag ID: %i' % destTagID)

  

          server.editBuildTarget(target['id'], name, buildTag['id'], destTag['id'])
@@ -1860,6 +1976,7 @@ 

  

          return _genHTML(environ, 'buildtargetedit.chtml')

  

+ 

  def buildtargetcreate(environ):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -1877,7 +1994,7 @@ 

          server.createBuildTarget(name, buildTagID, destTagID)

          target = server.getBuildTarget(name)

  

-         if target == None:

+         if target is None:

              raise koji.GenericError('error creating build target "%s"' % name)

  

          _redirect(environ, 'buildtargetinfo?targetID=%i' % target['id'])
@@ -1894,6 +2011,7 @@ 

  

          return _genHTML(environ, 'buildtargetedit.chtml')

  

+ 

  def buildtargetdelete(environ, targetID):

      server = _getServer(environ)

      _assertLogin(environ)
@@ -1901,18 +2019,20 @@ 

      targetID = int(targetID)

  

      target = server.getBuildTarget(targetID)

-     if target == None:

+     if target is None:

          raise koji.GenericError('invalid build target: %i' % targetID)

  

      server.deleteBuildTarget(target['id'])

  

      _redirect(environ, 'buildtargets')

  

+ 

  def reports(environ):

      _getServer(environ)

      _initValues(environ, 'Reports', 'reports')

      return _genHTML(environ, 'reports.chtml')

  

+ 

  def buildsbyuser(environ, start=None, order='-builds'):

      values = _initValues(environ, 'Builds by User', 'reports')

      server = _getServer(environ)
@@ -1940,6 +2060,7 @@ 

  

      return _genHTML(environ, 'buildsbyuser.chtml')

  

+ 

  def rpmsbyhost(environ, start=None, order=None, hostArch=None, rpmArch=None):

      values = _initValues(environ, 'RPMs by Host', 'reports')

      server = _getServer(environ)
@@ -1969,7 +2090,7 @@ 

      values['rpmArch'] = rpmArch

      values['rpmArchList'] = hostArchList + ['noarch', 'src']

  

-     if order == None:

+     if order is None:

          order = '-rpms'

      values['order'] = order

  
@@ -1981,6 +2102,7 @@ 

  

      return _genHTML(environ, 'rpmsbyhost.chtml')

  

+ 

  def packagesbyuser(environ, start=None, order=None):

      values = _initValues(environ, 'Packages by User', 'reports')

      server = _getServer(environ)
@@ -1998,7 +2120,7 @@ 

          if numPackages > maxPackages:

              maxPackages = numPackages

  

-     if order == None:

+     if order is None:

          order = '-packages'

      values['order'] = order

  
@@ -2010,6 +2132,7 @@ 

  

      return _genHTML(environ, 'packagesbyuser.chtml')

  

+ 

  def tasksbyhost(environ, start=None, order='-tasks', hostArch=None):

      values = _initValues(environ, 'Tasks by Host', 'reports')

      server = _getServer(environ)
@@ -2046,6 +2169,7 @@ 

  

      return _genHTML(environ, 'tasksbyhost.chtml')

  

+ 

  def tasksbyuser(environ, start=None, order='-tasks'):

      values = _initValues(environ, 'Tasks by User', 'reports')

      server = _getServer(environ)
@@ -2074,6 +2198,7 @@ 

  

      return _genHTML(environ, 'tasksbyuser.chtml')

  

+ 

  def buildsbystatus(environ, days='7'):

      values = _initValues(environ, 'Builds by Status', 'reports')

      server = _getServer(environ)
@@ -2088,9 +2213,12 @@ 

  

      server.multicall = True

      # use taskID=-1 to filter out builds with a null task_id (imported rather than built in koji)

-     server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['COMPLETE'], taskID=-1, queryOpts={'countOnly': True})

-     server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['FAILED'], taskID=-1, queryOpts={'countOnly': True})

-     server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['CANCELED'], taskID=-1, queryOpts={'countOnly': True})

+     server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['COMPLETE'], taskID=-1,

+                       queryOpts={'countOnly': True})

+     server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['FAILED'], taskID=-1,

+                       queryOpts={'countOnly': True})

+     server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['CANCELED'], taskID=-1,

+                       queryOpts={'countOnly': True})

      [[numSucceeded], [numFailed], [numCanceled]] = server.multiCall()

  

      values['numSucceeded'] = numSucceeded
@@ -2109,6 +2237,7 @@ 

  

      return _genHTML(environ, 'buildsbystatus.chtml')

  

+ 

  def buildsbytarget(environ, days='7', start=None, order='-builds'):

      values = _initValues(environ, 'Builds by Target', 'reports')

      server = _getServer(environ)
@@ -2148,12 +2277,14 @@ 

  

      return _genHTML(environ, 'buildsbytarget.chtml')

  

+ 

  def _filter_hosts_by_arch(hosts, arch):

      if arch == '__all__':

          return hosts

      else:

          return [h for h in hosts if arch in h['arches'].split()]

  

+ 

  def clusterhealth(environ, arch='__all__'):

      values = _initValues(environ, 'Cluster health', 'reports')

      server = _getServer(environ)
@@ -2204,18 +2335,19 @@ 

      values['channels'] = sorted(channels, key=lambda x: x['name'])

      return _genHTML(environ, 'clusterhealth.chtml')

  

+ 

  def recentbuilds(environ, user=None, tag=None, package=None):

      values = _initValues(environ, 'Recent Build RSS')

      server = _getServer(environ)

  

      tagObj = None

-     if tag != None:

+     if tag is not None:

          if tag.isdigit():

              tag = int(tag)

          tagObj = server.getTag(tag)

  

      userObj = None

-     if user != None:

+     if user is not None:

          if user.isdigit():

              user = int(user)

          userObj = server.getUser(user)
@@ -2226,8 +2358,9 @@ 

              package = int(package)

          packageObj = server.getPackage(package)

  

-     if tagObj != None:

-         builds = server.listTagged(tagObj['id'], inherit=True, package=(packageObj and packageObj['name'] or None),

+     if tagObj is not None:

+         builds = server.listTagged(tagObj['id'], inherit=True,

+                                    package=(packageObj and packageObj['name'] or None),

                                     owner=(userObj and userObj['name'] or None))

          builds.sort(key=kojiweb.util.sortByKeyFuncNoneGreatest('completion_time'), reverse=True)

          builds = builds[:20]
@@ -2273,6 +2406,7 @@ 

      environ['koji.headers'].append(['Content-Type', 'text/xml'])

      return _genHTML(environ, 'recentbuilds.chtml')

  

+ 

  _infoURLs = {'package': 'packageinfo?packageID=%(id)i',

               'build': 'buildinfo?buildID=%(id)i',

               'tag': 'taginfo?tagID=%(id)i',
@@ -2299,6 +2433,7 @@ 

      # any type not listed will default to 'name'

  }

  

+ 

  def search(environ, start=None, order=None):

      values = _initValues(environ, 'Search', 'search')

      server = _getServer(environ)
@@ -2323,7 +2458,7 @@ 

          if match == 'regexp':

              try:

                  re.compile(terms)

-             except:

+             except Exception:

                  values['error'] = 'Invalid regular expression'

                  return _genHTML(environ, 'search.chtml')

  
@@ -2335,7 +2470,8 @@ 

          values['order'] = order

  

          results = kojiweb.util.paginateMethod(server, values, 'search', args=(terms, type, match),

-                                               start=start, dataName='results', prefix='result', order=order)

+                                               start=start, dataName='results', prefix='result',

+                                               order=order)

          if not start and len(results) == 1:

              # if we found exactly one result, skip the result list and redirect to the info page

              # (you're feeling lucky)

file modified
+16 -13
@@ -44,7 +44,7 @@ 

  class Dispatcher(object):

  

      def __init__(self):

-         #we can't do much setup until we get a request

+         # we can't do much setup until we get a request

          self.firstcall = True

          self.options = {}

          self.startup_error = None
@@ -66,7 +66,7 @@ 

          self.logger = logging.getLogger("koji.web")

  

      cfgmap = [

-         #option, type, default

+         # option, type, default

          ['SiteName', 'string', None],

          ['KojiHubURL', 'string', 'http://localhost/kojihub'],

          ['KojiFilesURL', 'string', 'http://localhost/kojifiles'],
@@ -96,7 +96,9 @@ 

          ['LibPath', 'string', '/usr/share/koji-web/lib'],

  

          ['LogLevel', 'string', 'WARNING'],

-         ['LogFormat', 'string', '%(msecs)d [%(levelname)s] m=%(method)s u=%(user_name)s p=%(process)s r=%(remoteaddr)s %(name)s: %(message)s'],

+         ['LogFormat', 'string',

+          '%(msecs)d [%(levelname)s] m=%(method)s u=%(user_name)s p=%(process)s r=%(remoteaddr)s '

+          '%(name)s: %(message)s'],

  

          ['Tasks', 'list', []],

          ['ToplevelTasks', 'list', []],
@@ -156,7 +158,7 @@ 

      def setup_logging2(self, environ):

          """Adjust logging based on configuration options"""

          opts = self.options

-         #determine log level

+         # determine log level

          level = opts['LogLevel']

          valid_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')

          # the config value can be a single level name or a series of
@@ -172,7 +174,7 @@ 

                  default = level

              if level not in valid_levels:

                  raise koji.GenericError("Invalid log level: %s" % level)

-             #all our loggers start with koji

+             # all our loggers start with koji

              if name == '':

                  name = 'koji'

                  default = level
@@ -187,7 +189,7 @@ 

          if opts.get('KojiDebug'):

              logger.setLevel(logging.DEBUG)

          elif default is None:

-             #LogLevel did not configure a default level

+             # LogLevel did not configure a default level

              logger.setLevel(logging.WARNING)

          self.formatter = HubFormatter(opts['LogFormat'])

          self.formatter.environ = environ
@@ -205,7 +207,7 @@ 

                  args = inspect.getargspec(val)

                  if not args[0] or args[0][0] != 'environ':

                      continue

-             except:

+             except Exception:

                  tb_str = ''.join(traceback.format_exception(*sys.exc_info()))

                  self.logger.error(tb_str)

              self.handler_index[name] = val
@@ -213,7 +215,7 @@ 

      def prep_handler(self, environ):

          path_info = environ['PATH_INFO']

          if not path_info:

-             #empty path info (no trailing slash) breaks our relative urls

+             # empty path info (no trailing slash) breaks our relative urls

              environ['koji.redirect'] = environ['REQUEST_URI'] + '/'

              raise ServerRedirect

          elif path_info == '/':
@@ -225,9 +227,11 @@ 

          func = self.handler_index.get(method)

          if not func:

              raise URLNotFound

-         #parse form args

+         # parse form args

          data = {}

-         fs = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ.copy(), keep_blank_values=True)

+         fs = cgi.FieldStorage(fp=environ['wsgi.input'],

+                               environ=environ.copy(),

+                               keep_blank_values=True)

          for field in fs.list:

              if field.filename:

                  val = field
@@ -245,10 +249,9 @@ 

          if not varkw:

              # remove any unexpected args

              data = dslice(data, args, strict=False)

-             #TODO (warning in header or something?)

+             # TODO (warning in header or something?)

          return func, data

  

- 

      def _setup(self, environ):

          global kojiweb_handlers

          global kojiweb
@@ -318,7 +321,7 @@ 

          except (NameError, AttributeError):

              tb_str = ''.join(traceback.format_exception(*sys.exc_info()))

              self.logger.error(tb_str)

-             #fallback to simple error page

+             # fallback to simple error page

              return self.simple_error_page(message, err=tb_short)

          values = _initValues(environ, *desc)

          values['etype'] = etype

file modified
+82 -35
@@ -26,7 +26,7 @@ 

  import os

  import ssl

  import stat

- #a bunch of exception classes that explainError needs

+ # a bunch of exception classes that explainError needs

  from socket import error as socket_error

  from xml.parsers.expat import ExpatError

  
@@ -41,16 +41,18 @@ 

  class NoSuchException(Exception):

      pass

  

+ 

  try:

      # pyOpenSSL might not be around

      from OpenSSL.SSL import Error as SSL_Error

- except:

+ except Exception:

      SSL_Error = NoSuchException

  

  

  themeInfo = {}

  themeCache = {}

  

+ 

  def _initValues(environ, title='Build System Info', pageID='summary'):

      global themeInfo

      global themeCache
@@ -63,12 +65,14 @@ 

      themeCache.clear()

      themeInfo.clear()

      themeInfo['name'] = environ['koji.options'].get('KojiTheme', None)

-     themeInfo['staticdir'] = environ['koji.options'].get('KojiStaticDir', '/usr/share/koji-web/static')

+     themeInfo['staticdir'] = environ['koji.options'].get('KojiStaticDir',

+                                                          '/usr/share/koji-web/static')

  

      environ['koji.values'] = values

  

      return values

  

+ 

  def themePath(path, local=False):

      global themeInfo

      global themeCache
@@ -95,6 +99,7 @@ 

      themeCache[path, local] = ret

      return ret

  

+ 

  class DecodeUTF8(Cheetah.Filters.Filter):

      def filter(self, *args, **kw):

          """Convert all strs to unicode objects"""
@@ -106,6 +111,8 @@ 

          return result

  

  # Escape ampersands so the output can be valid XHTML

+ 

+ 

  class XHTMLFilter(DecodeUTF8):

      def filter(self, *args, **kw):

          result = super(XHTMLFilter, self).filter(*args, **kw)
@@ -116,8 +123,10 @@ 

          result = result.replace('&amp;gt;', '&gt;')

          return result

  

+ 

  TEMPLATES = {}

  

+ 

  def _genHTML(environ, fileName):

      reqdir = os.path.dirname(environ['SCRIPT_FILENAME'])

      if os.getcwd() != reqdir:
@@ -154,23 +163,26 @@ 

      else:

          return tmpl_inst.respond()

  

+ 

  def _truncTime():

      now = datetime.datetime.now()

      # truncate to the nearest 15 minutes

      return now.replace(minute=(now.minute // 15 * 15), second=0, microsecond=0)

  

+ 

  def _genToken(environ, tstamp=None):

      if 'koji.currentLogin' in environ and environ['koji.currentLogin']:

          user = environ['koji.currentLogin']

      else:

          return ''

-     if tstamp == None:

+     if tstamp is None:

          tstamp = _truncTime()

      value = user + str(tstamp) + environ['koji.options']['Secret'].value

      if six.PY3:

          value = value.encode('utf-8')

      return hashlib.md5(value).hexdigest()[-8:]

  

+ 

  def _getValidTokens(environ):

      tokens = []

      now = _truncTime()
@@ -181,6 +193,7 @@ 

              tokens.append(token)

      return tokens

  

+ 

  def toggleOrder(template, sortKey, orderVar='order'):

      """

      If orderVar equals 'sortKey', return '-sortKey', else
@@ -191,6 +204,7 @@ 

      else:

          return sortKey

  

+ 

  def toggleSelected(template, var, option, checked=False):

      """

      If the passed in variable var equals the literal value in option,
@@ -206,6 +220,7 @@ 

      else:

          return ''

  

+ 

  def sortImage(template, sortKey, orderVar='order'):

      """

      Return an html img tag suitable for inclusion in the sortKey of a sortable table,
@@ -213,12 +228,15 @@ 

      """

      orderVal = template.getVar(orderVar)

      if orderVal == sortKey:

-         return '<img src="%s" class="sort" alt="ascending sort"/>' % themePath("images/gray-triangle-up.gif")

+         return '<img src="%s" class="sort" alt="ascending sort"/>' % \

+                themePath("images/gray-triangle-up.gif")

      elif orderVal == '-' + sortKey:

-         return '<img src="%s" class="sort" alt="descending sort"/>' % themePath("images/gray-triangle-down.gif")

+         return '<img src="%s" class="sort" alt="descending sort"/>' % \

+                themePath("images/gray-triangle-down.gif")

      else:

          return ''

  

+ 

  def passthrough(template, *vars):

      """

      Construct a string suitable for use as URL
@@ -232,13 +250,14 @@ 

      result = []

      for var in vars:

          value = template.getVar(var, default=None)

-         if value != None:

+         if value is not None:

              result.append('%s=%s' % (var, value))

      if result:

          return '&' + '&'.join(result)

      else:

          return ''

  

+ 

  def passthrough_except(template, *exclude):

      """

      Construct a string suitable for use as URL
@@ -251,10 +270,11 @@ 

      """

      passvars = []

      for var in template._PASSTHROUGH:

-         if not var in exclude:

+         if var not in exclude:

              passvars.append(var)

      return passthrough(template, *passvars)

  

+ 

  def sortByKeyFuncNoneGreatest(key):

      """Return a function to sort a list of maps by the given key.

      None will sort higher than all other values (instead of lower).
@@ -265,7 +285,9 @@ 

          return (v is None, v)

      return internal_key

  

- def paginateList(values, data, start, dataName, prefix=None, order=None, noneGreatest=False, pageSize=50):

+ 

+ def paginateList(values, data, start, dataName, prefix=None, order=None, noneGreatest=False,

+                  pageSize=50):

      """

      Slice the 'data' list into one page worth.  Start at offset

      'start' and limit the total number of pages to pageSize
@@ -274,7 +296,7 @@ 

      under which a number of list-related metadata variables will

      be added to the value map.

      """

-     if order != None:

+     if order is not None:

          if order.startswith('-'):

              order = order[1:]

              reverse = True
@@ -296,10 +318,12 @@ 

  

      return data

  

+ 

  def paginateMethod(server, values, methodName, args=None, kw=None,

                     start=None, dataName=None, prefix=None, order=None, pageSize=50):

-     """Paginate the results of the method with the given name when called with the given args and kws.

-     The method must support the queryOpts keyword parameter, and pagination is done in the database."""

+     """Paginate the results of the method with the given name when called with the given args and

+     kws. The method must support the queryOpts keyword parameter, and pagination is done in the

+     database."""

      if args is None:

          args = []

      if kw is None:
@@ -324,12 +348,13 @@ 

  

      return data

  

+ 

  def paginateResults(server, values, methodName, args=None, kw=None,

                      start=None, dataName=None, prefix=None, order=None, pageSize=50):

-     """Paginate the results of the method with the given name when called with the given args and kws.

-     This method should only be used when then method does not support the queryOpts command (because

-     the logic used to generate the result list prevents filtering/ordering from being done in the database).

-     The method must return a list of maps."""

+     """Paginate the results of the method with the given name when called with the given args and

+     kws. This method should only be used when then method does not support the queryOpts command

+     (because the logic used to generate the result list prevents filtering/ordering from being done

+     in the database). The method must return a list of maps."""

      if args is None:

          args = []

      if kw is None:
@@ -352,6 +377,7 @@ 

  

      return data

  

+ 

  def _populateValues(values, dataName, prefix, data, totalRows, start, count, pageSize, order):

      """Populate the values list with the data about the list provided."""

      values[dataName] = data
@@ -369,24 +395,29 @@ 

      totalPages = int(totalRows // pageSize)

      if totalRows % pageSize > 0:

          totalPages += 1

-     pages = [page for page in range(0, totalPages) if (abs(page - currentPage) < 100 or ((page + 1) % 100 == 0))]

+     pages = [page for page in range(0, totalPages)

+              if (abs(page - currentPage) < 100 or ((page + 1) % 100 == 0))]

      values[(prefix and prefix + 'Pages') or 'pages'] = pages

  

+ 

  def stateName(stateID):

      """Convert a numeric build state into a readable name."""

      return koji.BUILD_STATES[stateID].lower()

  

+ 

  def imageTag(name):

      """Return an img tag that loads an icon with the given name"""

      return '<img class="stateimg" src="%s" title="%s" alt="%s"/>' \

             % (themePath("images/%s.png" % name), name, name)

  

+ 

  def stateImage(stateID):

      """Return an IMG tag that loads an icon appropriate for

      the given state"""

      name = stateName(stateID)

      return imageTag(name)

  

+ 

  def brStateName(stateID):

      """Convert a numeric buildroot state into a readable name."""

      if stateID is None:
@@ -414,14 +445,17 @@ 

      else:

          return 'unknown'

  

+ 

  def taskState(stateID):

      """Convert a numeric task state into a readable name"""

      return koji.TASK_STATES[stateID].lower()

  

+ 

  formatTime = koji.formatTime

  formatTimeRSS = koji.formatTimeLong

  formatTimeLong = koji.formatTimeLong

  

+ 

  def formatTimestampDifference(start_ts, end_ts):

      diff = end_ts - start_ts

      seconds = diff % 60
@@ -431,6 +465,7 @@ 

      hours = diff

      return "%d:%02d:%02d" % (hours, minutes, seconds)

  

+ 

  def formatDep(name, version, flags):

      """Format dependency information into

      a human-readable format.  Copied from
@@ -448,9 +483,10 @@ 

              if flags & koji.RPMSENSE_EQUAL:

                  s = s + "="

              if version:

-                 s = "%s %s" %(s, version)

+                 s = "%s %s" % (s, version)

      return s

  

+ 

  def formatMode(mode):

      """Format a numeric mode into a ls-like string describing the access mode."""

      if stat.S_ISREG(mode):
@@ -485,9 +521,11 @@ 

  

      return result

  

+ 

  def formatThousands(value):

      return '{:,}'.format(value)

  

+ 

  def rowToggle(template):

      """If the value of template._rowNum is even, return 'row-even';

      if it is odd, return 'row-odd'.  Increment the value before checking it.
@@ -529,6 +567,7 @@ 

                1024: 'unpatched',

                2048: 'public key'}

  

+ 

  def formatFileFlags(flags):

      """Format rpm fileflags for display.  Returns

      a list of human-readable strings specifying the
@@ -539,6 +578,7 @@ 

              results.append(desc)

      return results

  

+ 

  def escapeHTML(value):

      """Replace special characters to the text can be displayed in

      an HTML page correctly.
@@ -551,8 +591,9 @@ 

  

      value = koji.fixEncoding(value)

      return value.replace('&', '&amp;').\

-            replace('<', '&lt;').\

-            replace('>', '&gt;')

+         replace('<', '&lt;').\

+         replace('>', '&gt;')

+ 

  

  def authToken(template, first=False, form=False):

      """Return the current authToken if it exists.
@@ -561,7 +602,7 @@ 

      If first is True, prefix it with ?, otherwise prefix it

      with &.  If no authToken exists, return an empty string."""

      token = template.getVar('authToken', default=None)

-     if token != None:

+     if token is not None:

          if form:

              return '<input type="hidden" name="a" value="%s"/>' % token

          if first:
@@ -571,6 +612,7 @@ 

      else:

          return ''

  

+ 

  def explainError(error):

      """Explain an exception in user-consumable terms

  
@@ -643,8 +685,9 @@ 

          - composer

          - empty_str_placeholder

      """

+ 

      def __init__(self, text='', size=None, need_escape=None, begin_tag='',

-                      end_tag='', composer=None, empty_str_placeholder=None):

+                  end_tag='', composer=None, empty_str_placeholder=None):

          self.text = text

          if size is None:

              self.size = len(text)
@@ -688,8 +731,9 @@ 

          - end_tag

          - composer

      """

+ 

      def __init__(self, fragments=None, need_escape=None, begin_tag='',

-                      end_tag='<br />', composer=None):

+                  end_tag='<br />', composer=None):

          if fragments is None:

              self.fragments = []

          else:
@@ -706,7 +750,7 @@ 

                  return composer(self, length, postscript)

  

              self.composer = composer_wrapper

-         self.size=self._size()

+         self.size = self._size()

  

      def default_composer(self, length=None, postscript=None):

          line_text = ''
@@ -718,7 +762,8 @@ 

              if length is None:

                  line_text += fragment.composer()

              else:

-                 if size >= length: break

+                 if size >= length:

+                     break

                  remainder_size = length - size

                  line_text += fragment.composer(remainder_size)

                  size += fragment.size
@@ -746,18 +791,19 @@ 

          _str = sep.join([str(val) for val in value])

      elif isinstance(value, dict):

          _str = sep.join(['%s=%s' % ((n == '' and "''" or n), v)

-                              for n, v in value.items()])

+                          for n, v in value.items()])

      else:

          _str = str(value)

      if _str is None:

          _str = ''

  

      return TaskResultFragment(text=_str, need_escape=need_escape,

-                                   begin_tag=begin_tag, end_tag=end_tag)

+                               begin_tag=begin_tag, end_tag=end_tag)

+ 

  

  def task_result_to_html(result=None, exc_class=None,

-                             max_abbr_lines=None, max_abbr_len=None,

-                             abbr_postscript=None):

+                         max_abbr_lines=None, max_abbr_len=None,

+                         abbr_postscript=None):

      """convert the result to a mutiple lines HTML fragment

  

      Args:
@@ -801,7 +847,7 @@ 

  

      def _parse_properties(props):

          return ', '.join([v is not None and '%s=%s' % (n, v) or str(n)

-                               for n, v in props.items()])

+                           for n, v in props.items()])

  

      if exc_class:

          if hasattr(result, 'faultString'):
@@ -810,7 +856,7 @@ 

              _str = "%s: %s" % (exc_class.__name__, str(result))

          fragment = TaskResultFragment(text=_str, need_escape=True)

          line = TaskResultLine(fragments=[fragment],

-                                   begin_tag='<pre>', end_tag='</pre>')

+                               begin_tag='<pre>', end_tag='</pre>')

          lines.append(line)

      elif isinstance(result, dict):

  
@@ -821,11 +867,12 @@ 

              val_fragment = line.fragments[1]

              if length is None:

                  return '%s%s = %s%s%s' % (line.begin_tag, key_fragment.composer(),

-                                             val_fragment.composer(), postscript,

-                                             line.end_tag)

+                                           val_fragment.composer(), postscript,

+                                           line.end_tag)

              first_part_len = len('%s = ') + key_fragment.size

              remainder_len = length - first_part_len

-             if remainder_len < 0: remainder_len = 0

+             if remainder_len < 0:

+                 remainder_len = 0

  

              return '%s%s = %s%s%s' % (

                  line.begin_tag, key_fragment.composer(),
@@ -840,7 +887,7 @@ 

                  val_fragment = _parse_value(k, v)

                  key_fragment = TaskResultFragment(text=k, need_escape=True)

                  line = TaskResultLine(fragments=[key_fragment, val_fragment],

-                                           need_escape=False, composer=composer)

+                                       need_escape=False, composer=composer)

              lines.append(line)

      else:

          if result is not None:

applied E,F,W,C,I rules except

# too many leading ‘#’ for block comment
E266,
# do not assign a lambda expression, use a def
E731,
# [PY2] list comprehension redefines `name` from line `N`
F812,
# line break after binary operator
W504

and the max-line-length is 99 for now

most of code changes are made by autopep8 except line length change, etc, as it looks ugly at some places.

testing code are excluded still.

fixes: #2050

questionable change is except: -> except BaseException:

It could be improved by Exception at some places or leave it as is.

1 new commit added

  • flake8: ignore F812 rule for PY2
4 years ago

20 new commits added

  • flake8: ignore F812 rule for PY2
  • flake8: update contribution guide for flake8
  • add test-requirements.txt to install testing related modules by pip
  • flake8: apply all rules after rebasing
  • flake8: apply W rules (prefering W503)
  • flake8: apply E501 with max-line-length=99
  • flake8: apply all rest E7 rules
  • flake8: apply E71x rule
  • flake8: apply E70x rule
  • flake8: apply E501 rule
  • flake8: apply E4 rules and ignore E402 in sidetag_hub plugin
  • flake8: apply E3 rules
  • flake8: apply E2 rules except E266
  • flake8: apply E1 rules
  • flake8: apply E265 for util/koji-*
  • flake8: apply F rules for koji-shadow
  • refine import style
  • flake8: util/koji-* were ignored
  • flake8: follow E265 rule
  • flake8: follow all F rules
4 years ago

There is a lot of error messages now (30 types). Is it still WIP?

No. And there's no error on my local. Here is my flake8 (python3 ver)

$ flake8 --version
3.7.9 (import-order: 0.18.1, mccabe: 0.6.1, pycodestyle: 2.5.0, pyflakes: 2.1.1) CPython 3.6.6 on Linux

Ah, my fault. I've had old checkout in the directory, which caused all the errors.

I would change BaseException to Exception as you've proposed. Otherwise it looks good. I've added issue to 1.21. It is a huge but simple PR and rebasing it later could be costly.

:thumbsup:

1 new commit added

  • use Exception instead of BaseException for bare expection
4 years ago

1 new commit added

  • still use BaseException for logging purpose
4 years ago

updated

55a3f8c7 is still using BaseException for logging purpose before exit

rebased onto 6b92ff7b1ae1dd02f5164ca5588014f371887386

4 years ago

23 new commits added

  • flake8: apply rules for koji-sidetag-cleanup
  • still use BaseException for logging purpose
  • use Exception instead of BaseException for bare expection
  • flake8: ignore F812 rule for PY2
  • flake8: update contribution guide for flake8
  • add test-requirements.txt to install testing related modules by pip
  • flake8: apply all rules after rebasing
  • flake8: apply W rules (prefering W503)
  • flake8: apply E501 with max-line-length=99
  • flake8: apply all rest E7 rules
  • flake8: apply E71x rule
  • flake8: apply E70x rule
  • flake8: apply E502 rule
  • flake8: apply E4 rules and ignore E402 in sidetag_hub plugin
  • flake8: apply E3 rules
  • flake8: apply E2 rules except E266
  • flake8: apply E1 rules
  • flake8: apply E265 for util/koji-*
  • flake8: apply F rules for koji-shadow
  • refine import style
  • flake8: util/koji-* were ignored
  • flake8: follow E265 rule
  • flake8: follow all F rules
4 years ago

rebased onto 642508c

4 years ago

Commit 25fb4e6 fixes this pull-request

Pull-Request has been merged by mikem

4 years ago
Changes Summary 45
+31 -17
file changed
.flake8
+794 -646
file changed
builder/kojid
+36 -29
file changed
builder/mergerepos
+23 -21
file changed
cli/koji
+775 -609
file changed
cli/koji_cli/commands.py
+75 -65
file changed
cli/koji_cli/lib.py
+13 -2
file changed
docs/source/writing_koji_code.rst
+1347 -1003
file changed
hub/kojihub.py
+65 -45
file changed
hub/kojixmlrpc.py
+314 -212
file changed
koji/__init__.py
+57 -38
file changed
koji/arch.py
+68 -55
file changed
koji/auth.py
+3 -2
file changed
koji/context.py
+142 -100
file changed
koji/daemon.py
+11 -5
file changed
koji/db.py
+36 -30
file changed
koji/plugin.py
+17 -13
file changed
koji/policy.py
+44 -42
file changed
koji/rpmdiff.py
+2 -0
file changed
koji/server.py
+106 -73
file changed
koji/tasks.py
+18 -15
file changed
koji/util.py
+3 -4
file changed
koji/xmlrpcplus.py
+58 -42
file changed
plugins/builder/runroot.py
+25 -17
file changed
plugins/cli/runroot.py
+9 -7
file changed
plugins/cli/save_failed_tree.py
+3 -3
file changed
plugins/cli/sidetag_cli.py
+12 -0
file changed
plugins/hub/protonmsg.py
+3 -3
file changed
plugins/hub/rpm2maven.py
+9 -9
file changed
plugins/hub/runroot_hub.py
+7 -7
file changed
plugins/hub/save_failed_tree.py
+13 -14
file changed
plugins/hub/sidetag_hub.py
+9 -8
file changed
setup.py
+6
file added
test-requirements.txt
+1 -1
file changed
tests/test_cli/test_list_tagged.py
+118 -96
file changed
util/koji-gc
+261 -260
file changed
util/koji-shadow
+18 -15
file changed
util/koji-sidetag-cleanup
+10 -7
file changed
util/koji-sweep-db
+95 -86
file changed
util/kojira
+3 -3
file changed
vm/fix_kojikamid.sh
+74 -48
file changed
vm/kojikamid.py
+103 -70
file changed
vm/kojivmd
+247 -111
file changed
www/kojiweb/index.py
+16 -13
file changed
www/kojiweb/wsgi_publisher.py
+82 -35
file changed
www/lib/kojiweb/util.py