#316 fix some pylint warnings
Merged 5 years ago by ngompa. Opened 5 years ago by ret2libc.
ret2libc/FedoraReview some-pylint-warnings  into  devel

other minor warning fixes
Riccardo Schirone • 5 years ago  
fix logging related pylint warnings
Riccardo Schirone • 5 years ago  
fix len-as-condition pylint warning
Riccardo Schirone • 5 years ago  
fix wrong-import-order pylint warning
Riccardo Schirone • 5 years ago  
fix no-else-return pylint warning
Riccardo Schirone • 5 years ago  
fix bad-continuation pylint warning
Riccardo Schirone • 5 years ago  
file modified
+5 -5
@@ -60,20 +60,20 @@ 

                  content = stream.read()

                  stream.close()

              except IOError, err:

-                 self.log.warning('Could not retrieve info from ' + url)

-                 self.log.debug('Error: %s' % err, exc_info=True)

+                 self.log.warning('Could not retrieve info from %s', url)

+                 self.log.debug('Error: %s', err, exc_info=True)

                  continue

              res = re.search('Package: %s\nVersion:.*' % name, content)

              if res is not None:

-                 self.log.debug("Found in: %s" % url)

+                 self.log.debug("Found in: %s", url)

                  versionok.append(url)

                  if version is None:

                      ver = res.group().split('\n')[1]

                      version = ver.replace('Version:', '').strip()

                  else:

                      self.log.warning(

-                         " * Found two version of the package in %s"

-                         % (" ".join(versionok)))

+                         " * Found two version of the package in %s",

+                         " ".join(versionok))

          return version

  

  

file modified
+7 -6
@@ -73,13 +73,13 @@ 

                  if (rpm.post and '/sbin/ldconfig' in rpm.post) \

                      or (rpm.postun and '/sbin/ldconfig' in rpm.postun):

                          bad_pkgs.append(pkg)

-         

+ 

          if self.flags['EPEL5'] or self.flags['EPEL6'] \

              or self.flags['EPEL7']:

              if bad_pkgs:

                  self.set_passed(self.FAIL,

-                                 '/sbin/ldconfig not called in '

-                                 + ', '.join(bad_pkgs))

+                                 '/sbin/ldconfig not called in ' +

+                                 ', '.join(bad_pkgs))

              else:

                  self.set_passed(self.PASS)

          else:
@@ -89,8 +89,8 @@ 

                  'for Fedora 28 and later.'

              if bad_pkgs:

                  self.set_passed(self.FAIL,

-                                 '/sbin/ldconfig called in '

-                                 + ', '.join(bad_pkgs))

+                                 '/sbin/ldconfig called in ' +

+                                 ', '.join(bad_pkgs))

              else:

                  self.set_passed(self.PASS)

  
@@ -119,7 +119,8 @@ 

              for path in self.rpms.find_all('*.h', pkg):

                  # header files (.h) under /usr/src/debug/* will be in

                  #  the -debuginfo package.

-                 if path.startswith('/usr/src/debug/') and ('-debuginfo' in pkg or '-debugsource' in pkg):

+                 if path.startswith('/usr/src/debug/') and \

+                    ('-debuginfo' in pkg or '-debugsource' in pkg):

                      continue

                  # All other .h files should be in a -devel package.

                  if '-devel' not in pkg:

file modified
+40 -44
@@ -22,8 +22,6 @@ 

  import os

  import os.path

  import re

- import rpm

- 

  from glob import glob

  from StringIO import StringIO

  from subprocess import Popen, PIPE, CalledProcessError
@@ -31,7 +29,7 @@ 

      from subprocess import check_output          # pylint: disable=E0611

  except ImportError:

      from FedoraReview.el_compat import check_output

- 

+ import rpm

  

  from FedoraReview import CheckBase, Mock, ReviewDirs

  from FedoraReview import ReviewError             # pylint: disable=W0611
@@ -130,8 +128,8 @@ 

          if check_dirs:

              self.set_passed(

                  self.PENDING,

-                 'Especially check following dirs for bundled code: '

-                 + ', '.join(check_dirs))

+                 'Especially check following dirs for bundled code: ' +

+                 ', '.join(check_dirs))

          else:

              self.set_passed(self.PENDING)

  
@@ -231,8 +229,8 @@ 

          if not install_sec:

              self.set_passed(self.NA)

              return

-         self.log.debug('regex: ' + regex)

-         self.log.debug('install_sec: ' + install_sec)

+         self.log.debug('regex: %s', regex)

+         self.log.debug('install_sec: %s', install_sec)

          has_clean = install_sec and re.search(regex, install_sec)

          if self.flags['EPEL5']:

              self.text = 'EPEL5: Package does run rm -rf %{buildroot}' \
@@ -268,7 +266,7 @@ 

              with open('/dev/null', 'w') as f:

                  suids = check_output(cmd.split(), stderr=f).strip().split()

          except CalledProcessError:

-             self.log.info('Cannot run find command: ' + cmd)

+             self.log.info('Cannot run find command: %s', cmd)

              suids = []

          else:

              suids = [os.path.basename(s) for s in suids]
@@ -419,7 +417,7 @@ 

          found = in_list('%{?dist}', rel_tags)

          if len(rel_tags) > 1:

              self.set_passed(found, 'Multiple Release: tags found')

-         elif len(rel_tags) == 0:

+         elif not rel_tags:

              self.set_passed(found, 'No Release: tags found')

          else:

              self.set_passed(found)
@@ -622,7 +620,7 @@ 

          tarballs if first option fails '''

          s = Mock.get_builddir('BUILD') + '/*'

          globs = glob(s)

-         if globs and len(globs) > 0:

+         if globs:

              msg = 'Checking patched sources after %prep for licenses.'

              source_dir = globs[0]

          else:
@@ -661,7 +659,7 @@ 

      def run(self):

          try:

              source_dir, msg = self._get_source_dir()

-             self.log.debug("Scanning sources in " + source_dir)

+             self.log.debug("Scanning sources in %s", source_dir)

              if os.path.exists(source_dir):

                  cmd = 'licensecheck -r ' + source_dir

                  try:
@@ -670,13 +668,13 @@ 

                      self.set_passed(self.PENDING,

                                      "Cannot run licensecheck: " + str(err))

                      return

-                 self.log.debug("Got license reply, length: %d" % len(out))

+                 self.log.debug("Got license reply, length: %d", len(out))

                  files_by_license = self._parse_licenses(out)

                  filename = os.path.join(ReviewDirs.root,

                                          'licensecheck.txt')

                  self._write_license(files_by_license, filename)

              else:

-                 self.log.error('Source directory %s does not exist!' %

+                 self.log.error('Source directory %s does not exist!',

                                 source_dir)

              if not files_by_license:

                  msg += ' No licenses found.'
@@ -690,7 +688,7 @@ 

                  msg += ' Detailed output of licensecheck in ' + filename

              self.set_passed(self.PENDING, msg)

          except OSError as e:

-             self.log.error('OSError: %s' % str(e))

+             self.log.error('OSError: %s', str(e))

              msg = ' Programmer error: ' + e.strerror

              self.set_passed(self.PENDING, msg)

  
@@ -759,16 +757,15 @@ 

  

          for _license in licenses:

              if self._license_flag == 'L' and _license not in qpL_files:

-                 self.log.debug("Found " + _license + " not marked as %license")

+                 self.log.debug("Found %s not marked as %%license", _license)

                  self.set_passed(

                      self.FAIL,

                      "License file %s is not marked as %%license" % _license)

                  return

              if _license not in flagged_files:

-                 self.log.debug("Cannot find " + _license +

-                                " in doclist")

+                 self.log.debug("Cannot find %s in doclist", _license)

                  self.set_passed(self.FAIL,

-                                 "Cannot find %s in rpm(s)" % _license)

+                                 "Cannot find %s in rpm(s)", _license)

                  return

          self.set_passed(self.PASS)

  
@@ -1082,8 +1079,8 @@ 

          if bad_dirs:

              bad_dirs = list(set(bad_dirs))

              self.set_passed(self.PENDING,

-                             "Directories without known owners: "

-                             + ', '.join(bad_dirs))

+                             "Directories without known owners: " +

+                             ', '.join(bad_dirs))

          else:

              self.set_passed(self.PASS)

  
@@ -1242,17 +1239,17 @@ 

              local = self.srpm.extract(s.filename)

              if not local:

                  self.log.warn(

-                     "Cannot extract local source: " + s.filename)

+                     "Cannot extract local source: %s", s.filename)

                  return(False, None)

              cmd = '/usr/bin/diff -U2 -r %s %s' % (upstream, local)

-             self.log.debug(' Diff cmd: ' + cmd)

+             self.log.debug(' Diff cmd: %s', cmd)

              try:

                  p = Popen(cmd.split(), stdout=PIPE, stderr=PIPE)

                  output = p.communicate()[0]

              except OSError:

                  self.log.error("Cannot run diff", exc_info=True)

                  return (False, None)

-             if output and len(output) > 0:

+             if output:

                  return (False, output)

          return (True, None)

  
@@ -1262,8 +1259,8 @@ 

          all_sources_passed = True

          for source in sources:

              if source.local:

-                 self.log.debug('Skipping md5-test for '

-                                + source.filename)

+                 self.log.debug('Skipping md5-test for %s',

+                                source.filename)

                  continue

              if source.local_src:

                  text += "Using local file " + source.local_src + \
@@ -1307,7 +1304,7 @@ 

                      text += 'diff -r also reports differences\n'

                      msg = 'Upstream MD5sum check error, diff is in ' + p

          except AttributeError as e:

-             self.log.debug("CheckSourceMD5(): Attribute error " + str(e),

+             self.log.debug("CheckSourceMD5(): Attribute error %s", str(e),

                             exc_info=True)

              msg = 'Internal Error!'

              passed = False
@@ -1492,8 +1489,8 @@ 

                  self.set_passed(

                      self.FAIL,

                      'A package with this name already exists.  Please check'

-                     ' https://admin.fedoraproject.org/pkgdb/package/'

-                     + name)

+                     ' https://admin.fedoraproject.org/pkgdb/package/' +

+                     name)

              else:

                  self.set_passed(self.PASS)

          except pycurl.error:
@@ -1544,11 +1541,11 @@ 

                  if self.flags['EPEL5'] or self.flags['EPEL6'] \

                      or self.flags['EPEL7']:

                      if not in_list('gtk-update-icon-cache',

-                                 [rpm_pkg.postun, rpm_pkg.posttrans]):

+                                    [rpm_pkg.postun, rpm_pkg.posttrans]):

                          failed = True

                  else:

                      if in_list('gtk-update-icon-cache',

-                                 [rpm_pkg.postun, rpm_pkg.posttrans]):

+                                [rpm_pkg.postun, rpm_pkg.posttrans]):

                          failed = True

          if not using:

              self.set_passed(self.NA)
@@ -1590,7 +1587,7 @@ 

              if os.path.isdir(path):

                  return False

              elif not os.path.exists(path):

-                 self.log.warning("Can't access desktop file: " + path)

+                 self.log.warning("Can't access desktop file: %s", path)

                  return False

              with open(path) as f:

                  for line in f.readlines():
@@ -1609,11 +1606,11 @@ 

                  if self.flags['EPEL5'] or self.flags['EPEL6'] \

                      or self.flags['EPEL7']:

                      if not in_list('update-desktop-database',

-                                 [rpm_pkg.post, rpm_pkg.postun]):

+                                    [rpm_pkg.post, rpm_pkg.postun]):

                          failed = True

                  else:

                      if in_list('update-desktop-database',

-                             [rpm_pkg.post, rpm_pkg.postun]):

+                                [rpm_pkg.post, rpm_pkg.postun]):

                          failed = True

          if not using:

              self.set_passed(self.NA)
@@ -1654,11 +1651,11 @@ 

                  if self.flags['EPEL5'] or self.flags['EPEL6'] \

                      or self.flags['EPEL7']:

                      if not in_list('gio-querymodules',

-                                 [rpmpkg.post, rpmpkg.postun]):

+                                    [rpmpkg.post, rpmpkg.postun]):

                          failed = True

                  else:

                      if in_list('gio-querymodules',

-                                 [rpmpkg.post, rpmpkg.postun]):

+                                [rpmpkg.post, rpmpkg.postun]):

                          failed = True

          if not using:

              self.set_passed(self.NA)
@@ -1698,11 +1695,11 @@ 

                  if self.flags['EPEL5'] or self.flags['EPEL6'] \

                      or self.flags['EPEL7']:

                      if not in_list('gtk-query-immodules',

-                                 [rpmpkg.postun, rpmpkg.posttrans]):

+                                    [rpmpkg.postun, rpmpkg.posttrans]):

                          failed = True

                  else:

                      if in_list('gtk-query-immodules',

-                                 [rpmpkg.postun, rpmpkg.posttrans]):

+                                [rpmpkg.postun, rpmpkg.posttrans]):

                          failed = True

  

          if not using:
@@ -1742,11 +1739,11 @@ 

                  if self.flags['EPEL5'] or self.flags['EPEL6'] \

                      or self.flags['EPEL7']:

                      if not in_list('glib-compile-schemas',

-                                 [rpm_pkg.postun, rpm_pkg.posttrans]):

+                                    [rpm_pkg.postun, rpm_pkg.posttrans]):

                          failed = True

                  else:

                      if in_list('glib-compile-schemas',

-                                 [rpm_pkg.postun, rpm_pkg.posttrans]):

+                                [rpm_pkg.postun, rpm_pkg.posttrans]):

                          failed = True

  

          if not using:
@@ -1845,11 +1842,11 @@ 

                  if self.flags['EPEL5'] or self.flags['EPEL6'] \

                      or self.flags['EPEL7']:

                      if not in_list('gdk-pixbuf-query-loaders',

-                                 [rpmpkg.postun, rpmpkg.post]):

+                                    [rpmpkg.postun, rpmpkg.post]):

                          failed = True

                  else:

                      if in_list('gdk-pixbuf-query-loaders',

-                                 [rpmpkg.postun, rpmpkg.post]):

+                                [rpmpkg.postun, rpmpkg.post]):

                          failed = True

  

          if not using:
@@ -1965,7 +1962,6 @@ 

                  pkgs.extend(deps.resolve(requires_to_process))

              return set(pkgs)

  

- 

          pkg_deps = set()

          for pkg in self.spec.packages:

              pkg_deps |= resolve(self.rpms.get(pkg).requires)
@@ -1975,8 +1971,8 @@ 

          for pkg in pkg_deps:

              provides = deps.list_provides(pkg)

              if any('deprecated()' in p for p in provides):

-                 self.set_passed(self.FAIL, pkg + ' is deprecated, you must '\

-                     'not depend on it.')

+                 self.set_passed(self.FAIL, pkg + ' is deprecated, you must '

+                                 'not depend on it.')

                  return

  

          self.set_passed(self.PASS)

file modified
+3 -3
@@ -104,7 +104,7 @@ 

          # check_for('autoconf', ['autoconf', 'autoconf213'])

          check_for('libtool', ['libtool'])

  

-         self.log.debug("autotools used: " + ' '.join(self.used_tools))

+         self.log.debug("autotools used: %s", ' '.join(self.used_tools))

  

  

  # CHECKERS #
@@ -170,7 +170,7 @@ 

              cmd = trace_cmd + [configure_ac]

  

              try:

-                 self.log.debug("running: " + ' '.join(cmd))

+                 self.log.debug("running: %s", ' '.join(cmd))

                  p = Popen(cmd, stdout=PIPE, stderr=PIPE)

                  stdout, stderr = p.communicate()

              except IOError:
@@ -234,7 +234,7 @@ 

          # trace for warnings

          self.trace()

  

-         if not len(self.warn_items):

+         if not self.warn_items:

              self.set_passed(self.PASS)

              return

  

file modified
+3 -3
@@ -88,7 +88,7 @@ 

          with open('rpmlint.txt', 'w') as f:

              f.write(out)

          for line in out.split('\n'):

-             if line and len(line) > 0:

+             if line:

                  self.rpmlint_output.append(line)

          no_errors, msg = self.check_rpmlint_errors(out, self.log)

          return no_errors, msg if msg else out
@@ -268,7 +268,7 @@ 

                  self.set_passed(self.FAIL,

                                  '--no-build: package(s) not installed')

                  self.log.info('Packages required by --no-build are'

-                               ' not installed: ' + ', '.join(bad_ones))

+                               ' not installed: %s', ', '.join(bad_ones))

              return

          _mock_root_setup('While installing built packages', force=True)

          rpms = Mock.get_package_rpm_paths(self.spec)
@@ -415,7 +415,7 @@ 

              else:

                  shutil.rmtree('BUILD')

          os.symlink(Mock.get_builddir('BUILD'), 'BUILD')

-         self.log.info('Active plugins: ' +

+         self.log.info('Active plugins: %s',

                        ', '.join(self.checks.get_plugins(True)))

          self.set_passed(self.NA, None, [self.setup_attachment()])

  

file modified
+7 -7
@@ -22,11 +22,11 @@ 

  import os

  import re

  

- import rpm

- 

  from glob import glob

  from subprocess import Popen, PIPE

  

+ import rpm

+ 

  from FedoraReview import CheckBase, Mock, ReviewDirs

  from FedoraReview import ReviewError             # pylint: disable=W0611

  from FedoraReview import RegistryBase, Settings
@@ -85,7 +85,7 @@ 

              self.text = \

                  "Explicit BuildRoot: tag as required by EPEL5 present."

          br_tags = self.spec.find_all_re('^BuildRoot')

-         if len(br_tags) == 0:

+         if not br_tags:

              if self.flags['EPEL5']:

                  self.set_passed(self.FAIL,

                                  'Missing buildroot (required for EPEL5)')
@@ -221,7 +221,7 @@ 

              prov_txt += get_provides(pkg, rpm_pkg.provides) + '\n'

          attachments = [self.Attachment('Requires', req_txt),

                         self.Attachment('Provides', prov_txt)]

-         if len(wrong_req) == 0:

+         if not wrong_req:

              self.set_passed(self.PASS, None, attachments)

          else:

              text = "Incorrect Requires : %s " % (', '.join(wrong_req))
@@ -568,7 +568,7 @@ 

              self.log.error("Cannot run diff", exc_info=True)

              self.set_passed(self.FAIL, "OS error runnning diff")

              return

-         if output and len(output) > 0:

+         if output:

              a = self.Attachment("Diff spec file in url and in SRPM",

                                  output)

              text = ('Spec file as given by url is not the same as in '
@@ -768,11 +768,11 @@ 

                  if self.flags['EPEL5'] or self.flags['EPEL6'] \

                      or self.flags['EPEL7']:

                      if not in_list('update-mime-database',

-                                 [rpm_pkg.postun, rpm_pkg.posttrans]):

+                                    [rpm_pkg.postun, rpm_pkg.posttrans]):

                          failed = True

                  else:

                      if in_list('update-mime-database',

-                                 [rpm_pkg.postun, rpm_pkg.posttrans]):

+                                [rpm_pkg.postun, rpm_pkg.posttrans]):

                          failed = True

  

          if not using:

file modified
+2 -3
@@ -61,10 +61,9 @@ 

          regex = re.compile('(^python-)|(-python$)|(-python-)', re.M)

          br = self.spec.build_requires

          rr = self.spec.get_requires()

-         

+ 

          if regex.search('\n'.join(br)) or regex.search('\n'.join(rr)):

-             self.set_passed(self.FAIL, 'Unversionned Python dependency ' \

+             self.set_passed(self.FAIL, 'Unversionned Python dependency '

                                         'found.')

              return

          self.set_passed(self.PASS)

- 

file modified
+1 -1
@@ -457,7 +457,7 @@ 

              if value is False:

                  err_message.append(key.pattern.replace('\\s+', ' '))

  

-         if len(err_message) == 0:

+         if not err_message:

              self.set_passed(self.PASS)

          else:

              self.set_passed(self.PENDING,

@@ -107,7 +107,7 @@ 

              self.dir = ReviewDirs.srpm

  

          if has_srpm() and Settings.cache:

-             self.log.debug("Using cached source: " + self.srpm_file)

+             self.log.debug("Using cached source: %s", self.srpm_file)

              return

          srpm_name = os.path.basename(self.srpm_url)

          self.srpm_file = os.path.join(self.dir, srpm_name)
@@ -127,9 +127,8 @@ 

  

      def is_downloaded(self):

          ''' Return true iff self.{specfile, srpmfile} are valid. '''

-         ok = (self.spec_file and os.path.exists(self.spec_file)

-               and self.srpm_file and

-               os.path.exists(self.srpm_file))

+         ok = (self.spec_file and os.path.exists(self.spec_file) and

+               self.srpm_file and os.path.exists(self.srpm_file))

          return ok

  

      def _check_cache(self):
@@ -148,8 +147,8 @@ 

              self.spec_url = 'file://' + specs[0]

              self.srpm_url = 'file://' + srpms[0]

              return True

-         else:

-             return False

+ 

+         return False

  

      def download_files(self):

          """ Download the spec file and srpm extracted from the bug
@@ -162,7 +161,7 @@ 

              self.do_download_files()

          except IOError as ex:

              self.log.debug('bug download error', exc_info=True)

-             self.log.error('Cannot download file(s): ' + str(ex))

+             self.log.error('Cannot download file(s): %s', str(ex))

              return False

          return True

  
@@ -185,16 +184,16 @@ 

      def find_urls(self):

          """ Retrieve the page and parse for srpm and spec url. """

          # pylint: disable=bare-except

-         self.log.info('Getting .spec and .srpm Urls from : '

-             + self.get_location())

+         self.log.info('Getting .spec and .srpm Urls from : %s',

+                       self.get_location())

          try:

              self.find_srpm_url()

-             self.log.info("  --> SRPM url: " + self.srpm_url)

+             self.log.info("  --> SRPM url: %s", self.srpm_url)

              if Settings.rpm_spec:

                  self._get_spec_from_srpm()

              else:

                  self.find_spec_url()

-                 self.log.info("  --> Spec url: " + self.spec_url)

+                 self.log.info("  --> Spec url: %s", self.spec_url)

          except ReviewError as fre:

              raise fre

          except:
@@ -215,16 +214,16 @@ 

          elif self.srpm_url:

              basename = os.path.basename(urlparse(self.srpm_url).path)

              return basename.rsplit('-', 2)[0]

-         else:

-             return '?'

+ 

+         return '?'

  

      def get_dirname(self, prefix='review-'):

          ''' Return dirname to be used for this bug. '''

          if self.get_name() != '?':

              return prefix + self.get_name()

-         else:

-             return prefix + tempfile.mkdtemp(prefix=prefix,

-                                              dir=os.getcwd())

+ 

+         return prefix + tempfile.mkdtemp(prefix=prefix,

+                                          dir=os.getcwd())

  

      @staticmethod

      def do_check_options(mode, bad_opts):

@@ -43,8 +43,8 @@ 

          ''' Return dirname to be used for this bug. '''

          if self.get_name() != '?':

              return self.bug_num + '-' + self.get_name()

-         else:

-             return self.bug_num

+ 

+         return self.bug_num

  

      def check_options(self):                    # pylint: disable=R0201

          ''' Raise SettingsError if Settings combinations is invalid. '''

file modified
+17 -17
@@ -62,9 +62,9 @@ 

  

          def log_duplicate(first, second):

              ''' Log warning for duplicate test. '''

-             self.log.warning("Duplicate checks %s in %s, %s in %s" %

-                              (first.name, first.defined_in,

-                               second.name, second.defined_in))

+             self.log.warning("Duplicate checks %s in %s, %s in %s",

+                              first.name, first.defined_in, second.name,

+                              second.defined_in)

  

          if key in self.iterkeys():

              log_duplicate(value, self[key])
@@ -109,9 +109,9 @@ 

  

          def log_kill(victim, killer):

              ''' Log test skipped due to deprecation. '''

-             self.log.debug("Skipping %s in %s, deprecated by %s in %s" %

-                            (victim.name, victim.defined_in,

-                             killer.name, killer.defined_in))

+             self.log.debug("Skipping %s in %s, deprecated by %s in %s",

+                            victim.name, victim.defined_in, killer.name,

+                            killer.defined_in)

  

          value = self[key]

          for victim in value.deprecates:
@@ -221,7 +221,7 @@ 

                  self.checkdict[c].result = None

                  self.checkdict[c].is_disabled = True

              else:

-                 self.log.warn("I can't remove check: " + c)

+                 self.log.warn("I can't remove check: %s", c)

  

      def set_single_check(self, check):

          ''' Remove all checks but arg and it's deps. '''
@@ -286,8 +286,8 @@ 

          ''' Write hidden file usable when writing tests. '''

          with open('.testlog.txt', 'w') as f:

              for r in results:

-                 f.write('\n' + 24 * ' '

-                         + "('%s', '%s')," % (r.state, r.name))

+                 f.write('\n' + 24 * ' ' +

+                         "('%s', '%s')," % (r.state, r.name))

  

      def _ready_to_run(self, name):

          """
@@ -305,9 +305,9 @@ 

                  return False

          for dep in check.needs:

              if dep not in self.checkdict:

-                 self.log.warning('%s depends on deprecated %s' %

-                                  (name, dep))

-                 self.log.warning('Removing %s, cannot resolve deps' %

+                 self.log.warning('%s depends on deprecated %s',

+                                  name, dep)

+                 self.log.warning('Removing %s, cannot resolve deps',

                                   name)

                  del self.checkdict[name]

                  return True
@@ -348,11 +348,11 @@ 

              check = self.checkdict[name]

              if check.is_run:

                  return

-             self.log.debug('Running check: ' + name)

+             self.log.debug('Running check: %s', name)

              check.run()

              now = time.time()

-             self.log.debug('    %s completed: %.3f seconds'

-                            % (name, (now - self._clock)))

+             self.log.debug('    %s completed: %.3f seconds', name,

+                            now - self._clock)

              self._clock = now

              attachments.extend(check.attachments)

              result = check.result
@@ -388,8 +388,8 @@ 

          else:

              with open('.testlog.txt', 'w') as f:

                  for r in results:

-                     f.write('\n' + 24 * ' '

-                             + "('%s', '%s')," % (r.state, r.name))

+                     f.write('\n' + 24 * ' ' +

+                             "('%s', '%s')," % (r.state, r.name))

  

  

  # vim: set expandtab ts=4 sw=4:

file modified
+22 -19
@@ -35,12 +35,20 @@ 

  COPR_API_URL_TEMPLATE = 'https://copr.fedoraproject.org/api/coprs/build/{}/'

  

  

+ def get_build_data(build_id):

+     """ Fetch build data from COPR and return them as json """

+     build_data_file = urllib.urlopen(

+         COPR_API_URL_TEMPLATE.format(build_id))

+     return json.load(build_data_file)

+ 

+ 

  class CoprBug(UrlBug):

      """ This class is used for running review on a COPR build """

  

      def __init__(self, build_descriptor):

          """ Constructor.

-         :arg build_id: numeric id of COPR build or direct frontend or backend build uri

+         :arg build_id: numeric id of COPR build or direct frontend or backend

+         build uri

          """

          self.log = Settings.get_logger()

  
@@ -58,10 +66,10 @@ 

          self.copr_build_dir = os.path.join(os.getcwd(), builddir_name)

          try:

              os.makedirs(self.copr_build_dir)

-         except:

+         except os.error:

              self.log.error(

-                 "Build directory ./{} cannot be created or exists already."

-                 .format(builddir_name))

+                 "Build directory ./%s cannot be created or exists already.",

+                 builddir_name)

              sys.exit(1)

  

          self.dir = self.copr_build_dir
@@ -74,11 +82,11 @@ 

          self.log.info('Getting .spec, .srpm, .rpm')

          try:

              self.find_srpm_url()

-             self.log.info("  --> SRPM url: " + self.srpm_url)

+             self.log.info("  --> SRPM url: %s", self.srpm_url)

              self.find_spec_url()

-             self.log.info("  --> Spec url: " + self.spec_url)

+             self.log.info("  --> Spec url: %s", self.spec_url)

              self.find_rpm_urls()

-             self.log.info("  --> RPM urls: " + ' '.join(self.rpm_urls))

+             self.log.info("  --> RPM urls: %s", ' '.join(self.rpm_urls))

          except ReviewError as fre:

              raise fre

          except:
@@ -94,7 +102,7 @@ 

          for url in urls:

              if not re.match(r'.*.src.rpm$', url):

                  filtered_urls.append(url)

-         if len(filtered_urls) == 0:

+         if not filtered_urls:

              raise self.BugError('Cannot find rpm URL')

          self.rpm_urls = filtered_urls

  
@@ -119,25 +127,21 @@ 

              self.urlretrieve(rpm_url, rpm_path)

  

      def parse_build_id_from_uri(self, uri):

+         """ Get the COPR build-id from a given uri """

          uri_last_part = os.path.basename(uri.strip('/'))

          match = re.match(r'(\d+).*', uri_last_part)

          if not match:

-             self.log.error("Invalid build uri: {}.".format(uri))

+             self.log.error("Invalid build uri: %s.", uri)

              sys.exit(1)

          return match.group(1).lstrip('0')

  

-     def get_build_data(self, build_id):

-         """ Fetch build data from COPR and return them as json """

-         build_data_file = urllib.urlopen(

-             COPR_API_URL_TEMPLATE.format(build_id))

-         return json.load(build_data_file)

- 

      def get_location(self, build_id):

          """ Get COPR build URI """

-         build_data = self.get_build_data(build_id)

+         build_data = get_build_data(build_id)

  

          if build_data['status'] != 'succeeded':

-             self.log.error("Build did not succeeded in all its chroots. Exiting.")

+             self.log.error("Build did not succeeded in all its chroots."

+                            "Exiting.")

              sys.exit(1)

  

          required_chroot = 'fedora-rawhide-' + platform.machine()
@@ -150,10 +154,9 @@ 

              if chroot == required_chroot:

                  return uri

  

-         self.log.error("{} chroot not found. Exiting.".format(required_chroot))

+         self.log.error("%s chroot not found. Exiting.", required_chroot)

          sys.exit(1)

  

- 

      def check_options(self):  # pylint: disable=R0201

          ''' Raise error if Settings combination is invalid. '''

          UrlBug.do_check_options('--copr-build', ['other_bz', 'url'])

@@ -32,17 +32,17 @@ 

  

  import argparse

  import ConfigParser

- import fedora_cert

  import getpass

  import logging

  import os

- import rpm

  import stat

  import subprocess

+ from subprocess import Popen

  import sys

  from xmlrpclib import Fault

+ import fedora_cert

+ import rpm

  from bugzilla.rhbugzilla import RHBugzilla

- from subprocess import Popen

  

  

  SETTINGS_FILE = os.path.join(os.environ['HOME'],

file modified
+8 -9
@@ -215,7 +215,6 @@ 

              self.rpms_by_pkg[pkg] = RpmFile(

                  pkg, nvr.version, nvr.release)

  

- 

      def get_filelist(self, container=None):

          self.init()

          if container and container not in self.containers:
@@ -261,7 +260,7 @@ 

          source = self.sources_by_tag[tag]

          if not source.extract_dir:

              source.extract()

-         self.log.debug('Adding files in : %s' % source.filename)

+         self.log.debug('Adding files in : %s', source.filename)

          self.files_by_tag[tag] = []

          # pylint: disable=W0612

          for root, dirs, files in os.walk(source.extract_dir):
@@ -272,8 +271,8 @@ 

  

      def get_filelist(self, container=None):

          if container and container not in self.containers:

-             raise ValueError('SourcesDataSource: bad package: '

-                              + container)

+             raise ValueError('SourcesDataSource: bad package: ' +

+                              container)

          if container:

              self._load_files(container)

              return self.files_by_tag[container]
@@ -283,14 +282,14 @@ 

              all_.extend(self.files_by_tag[key])

          return all_

  

-     def get(self, tag=None):

+     def get(self, key=None):

          ''' Return Source object bound to a Source/patch tag '''

-         if not tag:

-             tag = 'Source0'

+         if not key:

+             key = 'Source0'

              self.log.warning('Retrieving default source as Source0')

-         if tag not in self.sources_by_tag.iterkeys():

+         if key not in self.sources_by_tag.iterkeys():

              return None

-         return self.sources_by_tag[tag]

+         return self.sources_by_tag[key]

  

      def get_keys(self):

          return self.sources_by_tag.iterkeys()

file modified
+15 -15
@@ -48,11 +48,11 @@ 

  

      cmd = ['dnf', 'repoquery', '-q', '-C', '--requires', '--resolve']

      cmd.extend(list(set(pkgs)))

-     Settings.get_logger().debug("Running: " + ' '.join(cmd))

+     Settings.get_logger().debug("Running: %s", ' '.join(cmd))

      try:

          dnf = subprocess.Popen(cmd, stdout=subprocess.PIPE)

      except OSError:

-         Settings.get_logger().warning("Cannot run " + " ".join(cmd))

+         Settings.get_logger().warning("Cannot run %s", " ".join(cmd))

          return []

      deps = []

      while True:
@@ -74,11 +74,11 @@ 

  

      cmd = ['dnf', 'repoquery', '-q', '-C', '--provides']

      cmd.extend(list(set(pkgs)))

-     Settings.get_logger().debug("Running: " + ' '.join(cmd))

+     Settings.get_logger().debug("Running: %s", ' '.join(cmd))

      try:

          dnf = subprocess.Popen(cmd, stdout=subprocess.PIPE)

      except OSError:

-         Settings.get_logger().warning("Cannot run " + " ".join(cmd))

+         Settings.get_logger().warning("Cannot run %s", " ".join(cmd))

          return []

      provides = []

      while True:
@@ -108,12 +108,12 @@ 

  def resolve_one(req):

      ''' Return the packages providing the req symbol. '''

      cmd = ['dnf', 'repoquery', '-C', '--whatprovides', req]

-     Settings.get_logger().debug("Running: " + ' '.join(cmd))

+     Settings.get_logger().debug("Running: %s", ' '.join(cmd))

  

      try:

          dnf = subprocess.Popen(cmd, stdout=subprocess.PIPE)

      except OSError:

-         Settings.get_logger().warning("Cannot run " + " ".join(cmd))

+         Settings.get_logger().warning("Cannot run %s", " ".join(cmd))

          return []

  

      pkgs = []
@@ -138,7 +138,7 @@ 

      try:

          rpm = subprocess.Popen(cmd, stdout=subprocess.PIPE)

      except OSError:

-         Settings.get_logger().warning("Cannot run " + " ".join(cmd))

+         Settings.get_logger().warning("Cannot run %s", " ".join(cmd))

          return []

      dirs = []

      while True:
@@ -165,8 +165,8 @@ 

  

      owners = []

      paths_to_exam = list(paths)

-     for i in range(len(paths)):

-         p = subprocess.Popen(['rpm', '--qf', '%{NAME}\n', '-qf', paths[i]],

+     for path in paths:

+         p = subprocess.Popen(['rpm', '--qf', '%{NAME}\n', '-qf', path],

                               stdout=subprocess.PIPE,

                               stderr=subprocess.PIPE)

          path_owners = p.communicate()[0].split()
@@ -178,11 +178,11 @@ 

                  [p for p in path_owners if not p.startswith('error:')]

          if not path_owners or not path_owners[0]:

              continue

-         paths_to_exam.remove(paths[i])

+         paths_to_exam.remove(path)

          owners.extend(path_owners)

      for path in paths_to_exam:

          cmd = ['dnf', 'repoquery', '-C', '--quiet', '--file', path]

-         Settings.get_logger().debug("Running: " + ' '.join(cmd))

+         Settings.get_logger().debug("Running: %s", ' '.join(cmd))

          try:

              lines = check_output(cmd).split()

              lines = [l.strip() for l in lines]
@@ -192,7 +192,7 @@ 

              lines = [l.rsplit('-', 2)[0] for l in lines]

              owners.extend(list(set(lines)))

          except subprocess.CalledProcessError:

-             Settings.get_logger().error("Cannot run " + " ".join(cmd))

+             Settings.get_logger().error("Cannot run %s", " ".join(cmd))

              return owners

      return owners

  
@@ -207,7 +207,7 @@ 

      cmd = ['dnf', 'repoquery', '-C', '-l']

      cmd.extend(list(set(pkgs)))

  

-     Settings.get_logger().debug("Running: " + ' '.join(cmd))

+     Settings.get_logger().debug("Running: %s", ' '.join(cmd))

      try:

          paths = check_output(cmd)

      except OSError:
@@ -220,11 +220,11 @@ 

      ''' Return lists of files and dirs in local pkg. '''

  

      cmd = ['rpm', '-ql', '--dump', '--nosignature', '-p', pkg_filename]

-     Settings.get_logger().debug("Running: " + ' '.join(cmd))

+     Settings.get_logger().debug("Running: %s", ' '.join(cmd))

      try:

          rpm = subprocess.Popen(cmd, stdout=subprocess.PIPE)

      except OSError:

-         Settings.get_logger().warning("Cannot run " + " ".join(cmd))

+         Settings.get_logger().warning("Cannot run %s", " ".join(cmd))

          return []

      files = []

      dirs = []

@@ -3,12 +3,12 @@ 

  

  ''' Download a scrath build from koji. '''

  

+ import optparse

  import os

  import sys

  import koji

  import urlgrabber

  import urlgrabber.progress

- import optparse

  

  BASEURL = 'https://kojipkgs.fedoraproject.org/work/'

  HUBURL = 'https://koji.fedoraproject.org/kojihub'

@@ -56,8 +56,8 @@ 

          try:

              output, error = proc.communicate()

          except OSError, e:

-             self.log.debug("OS error, stderr: " + error, exc_info=True)

-             self.log.error("OS error running " + ' '.join(cmd), str(e))

+             self.log.debug("OS error, stderr: %s", error, exc_info=True)

+             self.log.error("OS error running %s %s", ' '.join(cmd), str(e))

          return output

  

      @staticmethod
@@ -100,9 +100,9 @@ 

          if os.path.exists(path) and Settings.cache:

              if logger:

                  logger(True)

-             logging.debug('Using cached source: ' + fname)

+             logging.debug('Using cached source: %s', fname)

              return path

-         self.log.debug("  --> %s : %s" % (directory, link))

+         self.log.debug("  --> %s : %s", directory, link)

          if logger:

              logger(False)

          self.urlretrieve(link, path)
@@ -120,7 +120,7 @@ 

          stdout, stderr = p.communicate()

          if p.returncode != 0:

              log = Settings.get_logger()

-             log.debug("Cannot unpack " + archive)

+             log.debug("Cannot unpack %s", archive)

              log.debug("Status: %d, stdout: %s, stderr: %s.",

                        p.returncode, str(stdout), str(stderr))

          return p.returncode == 0
@@ -137,9 +137,9 @@ 

          lines = out.split('\n')[:-1]

          err_lines = filter(lambda l: l.lower().find('error') != -1,

                             lines)

-         if len(err_lines) == 0:

-             Settings.get_logger().debug('Cannot parse rpmlint output: '

-                                         + out)

+         if not err_lines:

+             Settings.get_logger().debug('Cannot parse rpmlint output: %s',

+                                         out)

              return False, 'Cannot parse rpmlint output:'

  

          res = problems.search(err_lines[-1])
@@ -147,11 +147,11 @@ 

              errors, warnings = res.groups()

              if errors == '0' and warnings == '0':

                  return True, None

-             else:

-                 return False, None

-         else:

-             log.debug('Cannot parse rpmlint output: ' + out)

-             return False, 'Cannot parse rpmlint output:'

+ 

+             return False, None

+ 

+         log.debug('Cannot parse rpmlint output: ' + out)

+         return False, 'Cannot parse rpmlint output:'

  

  

  # vim: set expandtab ts=4 sw=4:

file modified
+29 -28
@@ -99,7 +99,7 @@ 

      if set(arches) == set(['noarch']):

          buildarch = 'noarch'

      else:

-         buildarch = [a for a in arches if a is not 'noarch'][0]

+         buildarch = [a for a in arches if a != 'noarch'][0]

      macros['%buildarch'] = buildarch

      if buildarch == 'x86_64':

          macros['%_libdir'] = '/usr/lib64'
@@ -129,8 +129,8 @@ 

          macros = {}

          values = self._rpm_eval(tags).split()

          taglist = tags.split()

-         for i in range(0, len(taglist)):

-             macros[taglist[i]] = values[i]

+         for i, tag in enumerate(taglist):

+             macros[tag] = values[i]

          return macros

  

      def _get_prebuilt_macros(self, spec, flags):
@@ -146,7 +146,7 @@ 

              _arch = check_output('rpm --eval %_arch'.split()).strip()

          except CalledProcessError:

              raise ReviewError("Can't evaluate 'rpm --eval %_arch")

-         if buildarch is 'x86_64' and _arch is not 'x86_64':

+         if buildarch == 'x86_64' and _arch != 'x86_64':

              raise ReviewError("Can't build x86_64 on i86 host")

          return macros

  
@@ -162,7 +162,9 @@ 

          config_opts = {}

          with open(path) as f:

              content = f.read()

-             content = re.sub(r'include\((.*)\)', r'exec(open(\g<1>).read(), {}, locals())', content)

+             content = re.sub(r'include\((.*)\)',

+                              r'exec(open(\g<1>).read(), {}, locals())',

+                              content)

              config = compile(content, path, 'exec')

          exec(config)

          self.mock_root = config_opts['root']
@@ -218,8 +220,8 @@ 

              logging.error("Command failed", exc_info=True)

              return "Command utterly failed. See logs for details"

          if p.returncode != 0 and header:

-             logging.info(header + " command returned error code %i",

-                          p.returncode)

+             logging.info("%s command returned error code %i",

+                          header, p.returncode)

          return None if p.returncode == 0 else str(output)

  

      def _get_topdir(self):
@@ -230,7 +232,7 @@ 

          cmd.extend(['-q', '--chroot', '--', 'rpm --eval %_topdir'])

          try:

              self._topdir = check_output(cmd).strip()

-             self.log.debug("_topdir: " + str(self._topdir))

+             self.log.debug("_topdir: %s", str(self._topdir))

          except (CalledProcessError, OSError):

              self.log.info("Cannot evaluate %topdir in mock, using"

                            " hardcoded /builddir/build")
@@ -281,8 +283,8 @@ 

          ''' Return resultdir used by mock. '''

          if Settings.resultdir:

              return Settings.resultdir

-         else:

-             return ReviewDirs.results

+ 

+         return ReviewDirs.results

  

      def get_package_rpm_path(self, nvr):

          '''
@@ -292,9 +294,9 @@ 

          '''

          pattern = '%s-%s*' % (nvr.name, nvr.version)

          paths = self._get_rpm_paths(pattern)

-         paths = filter(lambda p: p.endswith('.rpm')

-                        and not p.endswith('.src.rpm'), paths)

-         if len(paths) == 0:

+         paths = filter(lambda p: p.endswith('.rpm') and

+                        not p.endswith('.src.rpm'), paths)

+         if not paths:

              raise ReviewError('No built package found for ' + nvr.name)

          elif len(paths) > 1:

              raise ReviewError('Multiple packages found for ' + nvr.name)
@@ -312,7 +314,7 @@ 

              pattern = '*%s-%s*' % (spec.name, spec.version)

              paths = self._get_rpm_paths(pattern)

              paths = [p for p in paths if p.endswith('.src.rpm')]

-             if len(paths) == 0:

+             if not paths:

                  raise ReviewError('No srpm found for ' + spec.name)

              elif len(paths) > 1:

                  raise ReviewError('Multiple srpms found for ' + spec.name)
@@ -370,8 +372,7 @@ 

          cmd.append('rm -rf $(rpm --eval %_builddir)/*')

          errmsg = self._run_cmd(cmd)

          if errmsg:

-             self.log.debug('Cannot clear build area: ' + errmsg +

-                            ' (ignored)')

+             self.log.debug('Cannot clear build area: %s (ignored)', errmsg)

          return None

  

      @staticmethod
@@ -403,7 +404,7 @@ 

          cmd.append(os.path.basename(srpm.filename))

          errmsg = self._run_cmd(cmd)

          if errmsg:

-             self.log.warning("Cannot run mock --copyin: " + errmsg)

+             self.log.warning("Cannot run mock --copyin: %s", errmsg)

              return errmsg

          cmd = self._mock_cmd()

          cmd += ['--chroot', '--']
@@ -414,8 +415,8 @@ 

          cmd.append(script)

          errmsg = self._run_cmd(cmd)

          if errmsg:

-             self.log.warning("Cannot run mock --chroot rpmbuild -bp: "

-                              + errmsg)

+             self.log.warning("Cannot run mock --chroot rpmbuild -bp: %s",

+                              errmsg)

              return errmsg

          return None

  
@@ -434,7 +435,7 @@ 

          cmd += ' ' + filename + ' 2>&1 | tee build.log'

          if not Settings.verbose and ' -q' not in cmd:

              cmd += ' | egrep "Results and/or logs|ERROR" '

-         self.log.debug('Build command: %s' % cmd)

+         self.log.debug('Build command: %s', cmd)

          rc = call(cmd, shell=True)

          self.builddir_cleanup()

          rc = str(rc)
@@ -449,9 +450,9 @@ 

              self.log.info('Build completed')

              return None

          else:

-             self.log.debug('Build failed rc = ' + rc)

-             error = ReviewError('mock build failed, see ' + self.resultdir

-                                 + '/build.log')

+             self.log.debug('Build failed rc = %s', rc)

+             error = ReviewError('mock build failed, see %s/build.log',

+                                 self.resultdir)

              error.show_logs = False

              raise error

  
@@ -471,7 +472,7 @@ 

              if os.path.exists(p):

                  p = os.path.basename(p).rsplit('-', 2)[0]

              if self.is_installed(p):

-                 self.log.debug('Skipping already installed: ' + p)

+                 self.log.debug('Skipping already installed: %s', p)

                  return False

              return True

  
@@ -480,8 +481,8 @@ 

          self._run_cmd(cmd, '--init')

          cmd = self._mock_cmd()

          rpms = filter(is_not_installed, list(set(packages)))

-         if len(rpms) == 0:

-             return

+         if not rpms:

+             return None

          self._clear_rpm_db()

  

          cmd = self._mock_cmd()
@@ -519,7 +520,7 @@ 

              ''' Return part of output to be displayed. '''

              lines = output.split('\n')

              l = ''

-             while not l.startswith('rpmlint:') and len(lines) > 0:

+             while not l.startswith('rpmlint:') and lines:

                  l = lines.pop(0)

              text = ''

              for l in lines:
@@ -545,7 +546,7 @@ 

          script = script.replace('@config@', config)

          script = script.replace('@rpm_names@', rpm_names)

          ok, output = _run_script(script)

-         self.log.debug("Script output: " + output)

+         self.log.debug("Script output: %s", output)

          if not ok:

              return False, output + '\n'

          ok, err_msg = self.check_rpmlint_errors(output, self.log)

file modified
+3 -4
@@ -21,10 +21,9 @@ 

  import os.path

  

  from glob import glob

+ from urlparse import urlparse

  from review_dirs import ReviewDirs

  from settings import Settings

- from urlparse import urlparse

- 

  from abstract_bug import AbstractBug

  

  
@@ -53,7 +52,7 @@ 

          pattern = os.path.join(ReviewDirs.startdir,

                                 self.name + '*.src.rpm')

          srpms = glob(pattern)

-         if len(srpms) == 0:

+         if not srpms:

              raise self.BugError("Cannot find srpm: " + pattern)

          elif len(srpms) > 1:

              raise self.BugError("More than one srpm found for: "
@@ -65,7 +64,7 @@ 

          pattern = os.path.join(ReviewDirs.startdir,

                                 self.name + '*.spec')

          specs = glob(pattern)

-         if len(specs) == 0:

+         if not specs:

              raise self.BugError("Cannot find spec: " + pattern)

          elif len(specs) > 1:

              raise self.BugError("More than one spec found for: "

file modified
+2 -2
@@ -94,8 +94,8 @@ 

              return '1' + str(result.check.sort_key)

          elif result.check.is_passed:

              return '2' + str(result.check.sort_key)

-         else:

-             return '3' + str(result.check.sort_key)

+ 

+         return '3' + str(result.check.sort_key)

  

      groups = list(set([hdr(test.group) for test in results]))

      res = None

@@ -90,7 +90,7 @@ 

                      buildlink = os.readlink(os.path.join(wd, 'BUILD'))

                  except OSError:

                      buildlink = None

-             logging.info("Clearing old review directory: " + wd)

+             logging.info("Clearing old review directory: %s", wd)

              shutil.rmtree(wd)

              os.mkdir(wd)

              if Settings.cache:
@@ -118,7 +118,7 @@ 

                                                  ' new dir: ' + wd)

          self._create_and_copy_wd(wd, reuse_old)

          os.chdir(wd)

-         logging.info("Using review directory: " + wd)

+         logging.info("Using review directory: %s", wd)

          self.wdir = wd

          for d in self.WD_DIRS:

              if not os.path.exists(d):

@@ -22,10 +22,10 @@ 

  Tools for helping Fedora package reviewers

  '''

  

- import ansi

  import os.path

  import sys

  import time

+ import ansi

  

  from bugzilla_bug import BugzillaBug

  from check_base import SimpleTestResult
@@ -80,8 +80,8 @@ 

          Settings.dump()

          if not self.bug.find_urls():

              raise self.HelperError('Cannot find .spec or .srpm URL(s)')

-         self.log.debug("find_urls completed: %.3f"

-                        % (time.time() - clock))

+         self.log.debug("find_urls completed: %.3f",

+                        time.time() - clock)

          clock = time.time()

  

          if not ReviewDirs.is_inited:
@@ -92,7 +92,7 @@ 

  

          if not self.bug.download_files():

              raise self.HelperError('Cannot download .spec and .srpm')

-         self.log.debug("Url download completed: %.3f" % (time.time() - clock))

+         self.log.debug("Url download completed: %.3f", time.time() - clock)

  

          Settings.name = self.bug.get_name()

          self._run_checks(self.bug.spec_file, self.bug.srpm_file, outfile)
@@ -211,16 +211,17 @@ 

              self._list_plugins()

              make_report = False

          elif Settings.url:

-             self.log.info("Processing bug on url: " + Settings.url)

+             self.log.info("Processing bug on url: %s", Settings.url)

              self.bug = UrlBug(Settings.url)

          elif Settings.copr_build_descriptor:

-             self.log.info("Processing COPR build: " + Settings.copr_build_descriptor)

+             self.log.info("Processing COPR build: %s",

+                           Settings.copr_build_descriptor)

              self.bug = CoprBug(Settings.copr_build_descriptor)

          elif Settings.bug:

-             self.log.info("Processing bugzilla bug: " + Settings.bug)

+             self.log.info("Processing bugzilla bug: %s", Settings.bug)

              self.bug = BugzillaBug(Settings.bug)

          elif Settings.name:

-             self.log.info("Processing local files: " + Settings.name)

+             self.log.info("Processing local files: %s", Settings.name)

              self.bug = NameBug(Settings.name)

          if make_report:

              if not Mock.is_available() and not Settings.prebuilt:
@@ -231,9 +232,8 @@ 

          ''' Load urls, run checks and make report, '''

          # pylint: disable=bare-except

          started_at = time.time()

-         self.log.debug('fedora-review ' + __version__ + ' ' +

-                        BUILD_FULL + ' started')

-         self.log.debug("Command  line: " + ' '.join(sys.argv))

+         self.log.debug('fedora-review %s %s started', __version__, BUILD_FULL)

+         self.log.debug("Command  line: %s", ' '.join(sys.argv))

          try:

              rcode = 0

              self._do_run(outfile)
@@ -244,7 +244,7 @@ 

                                            "Can't parse the spec file: ",

                                            str(err))

                  write_xml_report(nvr, [result])

-             self.log.debug("ReviewError: " + str(err), exc_info=True)

+             self.log.debug("ReviewError: %s", str(err), exc_info=True)

              if not err.silent:

                  msg = 'ERROR: ' + str(err)

                  if err.show_logs:
@@ -254,10 +254,10 @@ 

          except:

              self.log.debug("Exception down the road...", exc_info=True)

              self.log.error('Exception down the road...'

-                            '(logs in ' + Settings.session_log + ')')

+                            '(logs in %s)', Settings.session_log)

              rcode = 1

-         self.log.debug("Report completed:  %.3f seconds"

-                        % (time.time() - started_at))

+         self.log.debug("Report completed:  %.3f seconds",

+                        time.time() - started_at)

          return rcode

  

  

file modified
+4 -5
@@ -63,7 +63,7 @@ 

                  s += '> '

              else:

                  s += 'What? : ' + bin(sense[ix])

-                 self.log.warning("Bad RPMSENSE: " + bin(sense[ix]))

+                 self.log.warning("Bad RPMSENSE: %s", bin(sense[ix]))

              s += versions[ix]

              dp.append(s)

          return dp
@@ -85,11 +85,10 @@ 

      # RPMTAG_POSTTRANSPROG are given as list on F18, but not before

      def header_to_str(self, tag):

          ''' Convert header in a string, to cope with API changes in F18'''

-         if isinstance(self.header[tag], dict) or isinstance(self.header[tag],

-                                                             list):

+         if isinstance(self.header[tag], (dict, list)):

              return ' '.join(self.header[tag])

-         else:

-             return self.header[tag]

+ 

+         return self.header[tag]

  

      def _scriptlet(self, prog_tag, script_tag):

          ''' Return inline -p script, script or None. '''

file modified
+9 -5
@@ -29,8 +29,8 @@ 

  import subprocess

  import sys

  

- import ansi

  from packaging import version

+ import ansi

  from review_error import ReviewError

  from xdg_dirs import XdgDirs

  
@@ -56,6 +56,7 @@ 

  

  

  def _check_mock_ver():

+     """ Returns mock version """

      try:

          mock_ver = subprocess.check_output(['mock', '--version'])

      except subprocess.CalledProcessError:
@@ -64,8 +65,9 @@ 

  

  

  def _mock_options_default():

+     """ Returns the default mock options """

      mock_opts = '--no-cleanup-after --no-clean'

-     if(version.parse(_check_mock_ver()) >= version.parse('1.4.1')):

+     if version.parse(_check_mock_ver()) >= version.parse('1.4.1'):

          mock_opts = '--no-bootstrap-chroot %s' % mock_opts

      return mock_opts

  
@@ -82,7 +84,8 @@ 

                         metavar='<url>',

                         help='Use another bugzilla, using complete'

                         ' url to bug page.')

-     modes.add_argument('--copr-build', default=None, dest='copr_build_descriptor',

+     modes.add_argument('--copr-build', default=None,

+                        dest='copr_build_descriptor',

                         metavar='<build_descriptor>',

                         help='Use COPR build identified by its ID,'

                         ' frontend URI, or backend URI. \
@@ -136,7 +139,7 @@ 

                            default=_mock_options_default(),

                            dest='mock_options',

                            help=('Options to specify to mock for the build,'

-                           ' defaults to %s' % _mock_options_default()))

+                                 ' defaults to %s' % _mock_options_default()))

      optional.add_argument('--other-bz', default=None,

                            metavar='<bugzilla url>', dest='other_bz',

                            help='Alternative bugzilla URL')
@@ -259,6 +262,7 @@ 

          self.uniqueext = None

          self.configdir = None

          self.log_level = None

+         self.prebuilt = None

          self.verbose = False

          self.name = None

          self.use_colors = False
@@ -351,7 +355,7 @@ 

              try:

                  self.log.debug("    " + k + ": " + v.__str__())

              except AttributeError:

-                 self.log.debug("    " + k + ": not printable")

+                 self.log.debug("    %s: not printable", k)

  

      def do_logger_setup(self, lvl=None):

          ''' Setup Python logging. lvl is a logging.* thing like

file modified
+14 -14
@@ -51,10 +51,10 @@ 

              ''' Default logger logs info messages. '''

              if cache:

                  path = urlparse(url).path

-                 self.log.info("Using cached data for (%s): %s" %

-                               (tag, os.path.basename(path)))

+                 self.log.info("Using cached data for (%s): %s",

+                               tag, os.path.basename(path))

              else:

-                 self.log.info("Downloading (%s): %s" % (tag, url))

+                 self.log.info("Downloading (%s): %s", tag, url)

  

          HelpersMixin.__init__(self)

          self.specurl = url
@@ -69,20 +69,20 @@ 

                  cached = os.path.join(ReviewDirs.upstream,

                                        url.rsplit('/', 1)[1])

                  if os.path.exists(cached):

-                     self.log.info("Using cached upstream: " + cached)

+                     self.log.info("Using cached upstream: %s", cached)

                      self.filename = cached

                      return

                  self.log.warning(

-                     "No cache found for %s, downloading anyway."

-                     % cached)

+                     "No cache found for %s, downloading anyway.",

+                     cached)

              try:

                  self.filename = self._get_file(url,

                                                 ReviewDirs.upstream,

                                                 my_logger)

              except DownloadError as ex:

-                 self.log.debug('Download error on %s, : %s' % (url, str(ex)),

+                 self.log.debug('Download error on %s, : %s', url, str(ex),

                                 exc_info=True)

-                 self.log.warning('Cannot download url: ' + url)

+                 self.log.warning('Cannot download url: %s', url)

                  self.downloaded = False

                  # get the filename

                  url = urlparse(url)[2].split('/')[-1]
@@ -91,27 +91,27 @@ 

              local_src = os.path.join(ReviewDirs.startdir, url)

              if os.path.isfile(local_src):

                  self.log.info(

-                     "Using local file " + url + " as " + tag)

+                     "Using local file %s as %s", url, tag)

                  srcdir = ReviewDirs.startdir

                  self.local_src = local_src

              else:

-                 self.log.info("No upstream for (%s): %s" % (tag, url))

+                 self.log.info("No upstream for (%s): %s", tag, url)

                  srcdir = ReviewDirs.srpm_unpacked

              self.filename = os.path.join(srcdir, url)

              self.url = 'file://' + self.filename

  

      # True if the source is a local file in srpm, false if a downloaded

      # url or tarball provided by user.

-     local = property(lambda self: (not self.is_url or not self.downloaded)

-                      and not self.local_src)

+     local = property(lambda self: (not self.is_url or not self.downloaded) and

+                      not self.local_src)

  

      # True if downloadable, but the uri couldn't be accessed.

      is_failed = property(lambda self: self.is_url and not self.downloaded)

  

      def check_source_checksum(self):

          ''' Check source with upstream source using checksumming. '''

-         self.log.debug("Checking source {0} : {1}".format(Settings.checksum,

-                                                           self.filename))

+         self.log.debug("Checking source %s : %s", Settings.checksum,

+                        self.filename)

          if self.downloaded:

              return self._checksum(self.filename)

          else:

@@ -20,10 +20,10 @@ 

  '''

  

  import re

- import rpm

  import sys

  import urllib

  

+ import rpm

  from review_error import ReviewError, SpecParseReviewError

  from settings import Settings

  from mock import Mock
@@ -105,8 +105,8 @@ 

              try:

                  return Mock.get_package_rpm_path(nvr)

              except ReviewError:

-                 self.log.warning("Package %s-%s-%s not built"

-                                  % (nvr.name, nvr.version, nvr.release))

+                 self.log.warning("Package %s-%s-%s not built",

+                                  nvr.name, nvr.version, nvr.release)

                  return None

  

          pkgs = [p.header[rpm.RPMTAG_NAME] for p in self.spec.packages]

@@ -56,7 +56,7 @@ 

          rc = call(cmd, shell=True)

          if rc != 0:

              self.log.warn(

-                 "Cannot unpack %s into %s" % (self.filename, wdir))

+                 "Cannot unpack %s into %s", self.filename, wdir)

          else:

              self._unpacked_src = wdir

          os.chdir(oldpwd)
@@ -68,7 +68,7 @@ 

          files = glob(os.path.join(self._unpacked_src, '*'))

          if self.filename not in [os.path.basename(f) for f in files]:

              self.log.error(

-                 'Trying to unpack non-existing source: ' + path)

+                 'Trying to unpack non-existing source: %s', path)

              return None

          extract_dir = os.path.join(self._unpacked_src,

                                     self.filename + '-extract')
@@ -81,7 +81,7 @@ 

                                   extract_dir)

          if not rv:

              self.log.debug("Cannot unpack %s, so probably not an "

-                            "archive. Copying instead" % self.filename)

+                            "archive. Copying instead", self.filename)

              shutil.copy(os.path.join(self._unpacked_src, self.filename),

                          extract_dir)

          return extract_dir
@@ -98,10 +98,10 @@ 

              self.log.warn('No unpacked sources found (!)')

              return "ERROR"

          if filename not in [os.path.basename(f) for f in src_files]:

-             self.log.warn('Cannot find source: ' + filename)

+             self.log.warn('Cannot find source: %s', filename)

              return "ERROR"

          path = os.path.join(self._unpacked_src, filename)

-         self.log.debug("Checking {0} for {1}".format(Settings.checksum, path))

+         self.log.debug("Checking %s for %s", Settings.checksum, path)

          return self._checksum(path)

  

  

file modified
+2 -2
@@ -63,13 +63,13 @@ 

  

      def find_srpm_url(self):

          urls = self._find_urls_by_ending('.src.rpm')

-         if len(urls) == 0:

+         if not urls:

              raise self.BugError('Cannot find source rpm URL')

          self.srpm_url = urls[0]

  

      def find_spec_url(self):

          urls = self._find_urls_by_ending('.spec')

-         if len(urls) == 0:

+         if not urls:

              raise self.BugError('Cannot find spec file URL')

          self.spec_url = urls[0]

  

This PR aims to fix a bunch of warnings in a lot of files (e.g. logging warnings, line-too-long, trailing-whitespaces, wrong-import-order, no-else-return, bad-continuation). In a future PR I will address the remaining warnings case by case, because they probably deserve a bit more attention.

Metadata Update from @ngompa:
- Request assigned

5 years ago

I'm sorry, but the merge of your PR is currently blocked on pagure#4142. Once this is resolved, I will merge your PR shortly thereafter.

Pull-Request has been merged by ngompa

5 years ago

Thanks for your patience, and thanks for the patches!