#1117 python3 kojid
Merged 4 years ago by mikem. Opened 4 years ago by tkopecek.
tkopecek/koji issue1116  into  master

file modified
+1 -1
@@ -81,7 +81,7 @@ 

  	PYTHONPATH=hub/.:plugins/hub/.:plugins/builder/.:plugins/cli/.:cli/. coverage3 run \

  	    --rcfile .coveragerc3 --source . \

  	    /usr/bin/nosetests \

- 	    tests/test_lib tests/test_cli

+ 	    tests/test_lib tests/test_cli tests/test_builder

  	coverage3 report --rcfile .coveragerc3

  	coverage3 html --rcfile .coveragerc3

  	@echo Full coverage report at file://${PWD}/htmlcov/index.html

file modified
+313 -167
@@ -23,64 +23,81 @@ 

  

  from __future__ import absolute_import

  from __future__ import division

- import six

- try:

-     import krbV

- except ImportError:  # pragma: no cover

-     krbV = None

- import koji

- import koji.plugin

- import koji.rpmdiff

- import koji.util

- import koji.tasks

+ 

+ import Cheetah.Template

+ import copy

  import glob

+ import grp

  import json

  import logging

  import logging.handlers

- from koji.daemon import incremental_upload, log_output, TaskManager, SCM

- from koji.tasks import ServerExit, ServerRestart, BaseTaskHandler, MultiPlatformTask

- from koji.util import parseStatus, isSuccess, dslice, dslice_ex, to_list

- import multilib.multilib as multilib

  import os

  import pwd

- import grp

  import random

  import re

  import rpm

- import rpmUtils.arch

  import shutil

  import signal

+ import six

+ import six.moves.xmlrpc_client

  import smtplib

  import socket

  import sys

  import time

  import traceback

  import xml.dom.minidom

- import six.moves.xmlrpc_client

  import zipfile

- import copy

- import Cheetah.Template

  from six.moves.configparser import ConfigParser

  from fnmatch import fnmatch

  from gzip import GzipFile

  from optparse import OptionParser, SUPPRESS_HELP

- from yum import repoMDObject

- import yum.packages

- import yum.Errors

  

- #imports for LiveCD, LiveMedia, and Appliance handler

- image_enabled = False

+ from multilib import multilib

+ import koji

+ import koji.arch

+ import koji.plugin

+ import koji.rpmdiff

+ import koji.util

+ import koji.tasks

+ from koji.daemon import incremental_upload, log_output, TaskManager, SCM

+ from koji.tasks import ServerExit, ServerRestart, BaseTaskHandler, MultiPlatformTask

+ from koji.util import parseStatus, isSuccess, dslice, dslice_ex, to_list

+ 

+ try:

+     import krbV

+ except ImportError:  # pragma: no cover

+     krbV = None

+ 

+ try:

+     import librepo

+     import io

+ except ImportError:

+     librepo = None

+ 

+ try:

+     import dnf

+ except ImportError:

+     dnf = None

+ 

+ try:

+     # yum

+     from yum import repoMDObject

+     import yum.packages

+     import yum.Errors

+     yum_available = True

+ except ImportError:

+     yum_available = False

+ 

+ # imports for LiveCD, LiveMedia, and Appliance handler

  try:

      import pykickstart.parser as ksparser

      import pykickstart.handlers.control as kscontrol

      import pykickstart.errors as kserrors

-     import hashlib

      import iso9660 # from pycdio

      image_enabled = True

  except ImportError:  # pragma: no cover

-     pass

+     image_enabled = False

  

- ozif_enabled = False

  try:

      from imgfac.BuildDispatcher import BuildDispatcher

      from imgfac.Builder import Builder
@@ -96,7 +113,7 @@ 

      from imgfac.FactoryUtils import qemu_convert_cmd

      ozif_enabled = True

  except ImportError:  # pragma: no cover

-     pass

+     ozif_enabled = False

  

  def main(options, session):

      logger = logging.getLogger("koji.build")
@@ -260,9 +277,8 @@ 

          output = koji.genMockConfig(self.name, self.br_arch, managed=True, **opts)

  

          #write config

-         fo = open(configfile,'w')

-         fo.write(output)

-         fo.close()

+         with open(configfile,'w') as fo:

+             fo.write(output)

  

      def _repositoryEntries(self, pi, plugin=False):

          entries = []
@@ -360,9 +376,8 @@ 

  </settings>

  """

          settings = settings % locals()

-         fo = open(self.rootdir() + destfile, 'w')

-         fo.write(settings)

-         fo.close()

+         with open(self.rootdir() + destfile, 'w') as fo:

+             fo.write(settings)

  

      def mock(self, args):

          """Run mock"""
@@ -605,7 +620,7 @@ 

          try:

              ts = rpm.TransactionSet()

              for h in ts.dbMatch():

-                 pkg = koji.get_header_fields(h,fields)

+                 pkg = koji.get_header_fields(h, fields)

                  #skip our fake packages

                  if pkg['name'] in ['buildsys-build', 'gpg-pubkey']:

                      #XXX config
@@ -703,35 +718,59 @@ 

              ext_url = erepo['url'].replace('$arch', self.br_arch)

              erepo_idx[ext_url] = erepo

          pathinfo = koji.PathInfo(topdir='')

-         #XXX - cheap hack to get relative paths

-         repodir = pathinfo.repo(self.repo_info['id'], self.repo_info['tag_name'])

-         repomdpath = os.path.join(repodir, self.br_arch, 'repodata', 'repomd.xml')

  

+         repodir = pathinfo.repo(self.repo_info['id'], self.repo_info['tag_name'])

          opts = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])

          opts['tempdir'] = self.options.workdir

-         fo = koji.openRemoteFile(repomdpath, **opts)

-         try:

-             repodata = repoMDObject.RepoMD('ourrepo', fo)

-         except:

-             raise koji.BuildError("Unable to parse repomd.xml file for %s" % os.path.join(repodir, self.br_arch))

-         data  = repodata.getData('origin')

-         pkgorigins  = data.location[1]

+ 

+         # prefer librepo

+         if librepo is not None:

+             repo_url = os.path.join(repodir, self.br_arch)

+             # repo_url can start with '/', don't use os.path.join

+             if self.options.topurl:

+                 repo_url = '%s/%s' % (self.options.topurl, repo_url)

+             elif self.options.topdir:

+                 repo_url = '%s/%s' % (self.options.topdir, repo_url)

+             logging.error(repo_url)

+             tmpdir = os.path.join(self.options.workdir, 'librepo-markExternalRPMs')

+             koji.ensuredir(tmpdir)

+             h = librepo.Handle()

+             r = librepo.Result()

+             h.setopt(librepo.LRO_REPOTYPE, librepo.LR_YUMREPO)

+             h.setopt(librepo.LRO_URLS, [repo_url])

+             h.setopt(librepo.LRO_DESTDIR, tmpdir)

+             h.perform(r)

+             pkgorigins = r.getinfo(librepo.LRR_YUM_REPOMD)['origin']['location_href']

+             koji.util.rmtree(tmpdir)

+         elif yum_available:

+             #XXX - cheap hack to get relative paths

+             repomdpath = os.path.join(repodir, self.br_arch, 'repodata', 'repomd.xml')

+             with koji.openRemoteFile(repomdpath, **opts) as fo:

+                 try:

+                     repodata = repoMDObject.RepoMD('ourrepo', fo)

+                 except:

+                     raise koji.BuildError("Unable to parse repomd.xml file for %s" % os.path.join(repodir, self.br_arch))

+             data  = repodata.getData('origin')

+             pkgorigins  = data.location[1]

+         else:

+             # shouldn't occur

+             raise koji.GenericError("install librepo or yum")

  

          relpath = os.path.join(repodir, self.br_arch, pkgorigins)

-         fo = koji.openRemoteFile(relpath, **opts)

-         #at this point we know there were external repos at the create event,

-         #so there should be an origins file.

-         origin_idx = {}

-         fo2 = GzipFile(fileobj=fo, mode='r')

-         for line in fo2:

-             parts=line.split(None, 2)

-             if len(parts) < 2:

-                 continue

-             #first field is formated by yum as [e:]n-v-r.a

-             nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" %  koji.parse_NVRA(parts[0])

-             origin_idx[nvra] = parts[1]

-         fo2.close()

-         fo.close()

+         with koji.openRemoteFile(relpath, **opts) as fo:

+             #at this point we know there were external repos at the create event,

+             #so there should be an origins file.

+             origin_idx = {}

+             with GzipFile(fileobj=fo, mode='r') as fo2:

+                 if six.PY3:

+                     fo2 = io.TextIOWrapper(fo2, encoding='utf-8')

+                 for line in fo2:

+                     parts=line.split(None, 2)

+                     if len(parts) < 2:

+                         continue

+                     #first field is formated by yum as [e:]n-v-r.a

+                     nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" %  koji.parse_NVRA(parts[0])

+                     origin_idx[nvra] = parts[1]

          # mergerepo starts from a local repo in the task workdir, so internal

          # rpms have an odd-looking origin that we need to look for

          localtail = '/repo_%s_premerge/' % self.repo_info['id']
@@ -922,7 +961,7 @@ 

              self.event_id = self.session.getLastEvent()['id']

          srpm = self.getSRPM(src, build_tag, repo_info['id'])

          h = self.readSRPMHeader(srpm)

-         data = koji.get_header_fields(h,['name','version','release','epoch'])

+         data = koji.get_header_fields(h, ['name','version','release','epoch'])

          data['task_id'] = self.id

          if getattr(self, 'source', False):

              data['source'] = self.source['source']
@@ -1006,10 +1045,9 @@ 

          relpath = "work/%s" % srpm

          opts = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])

          opts['tempdir'] = self.workdir

-         fo = koji.openRemoteFile(relpath, **opts)

-         h = koji.get_rpm_header(fo)

-         fo.close()

-         if h[rpm.RPMTAG_SOURCEPACKAGE] != 1:

+         with koji.openRemoteFile(relpath, **opts) as fo:

+             h = koji.get_rpm_header(fo)

+         if not koji.get_header_field(h, 'sourcepackage'):

              raise koji.BuildError("%s is not a source package" % srpm)

          return h

  
@@ -1028,9 +1066,9 @@ 

          archlist = arches.split()

          self.logger.debug('base archlist: %r' % archlist)

          # - adjust arch list based on srpm macros

-         buildarchs = h[rpm.RPMTAG_BUILDARCHS]

-         exclusivearch = h[rpm.RPMTAG_EXCLUSIVEARCH]

-         excludearch = h[rpm.RPMTAG_EXCLUDEARCH]

+         buildarchs = koji.get_header_field(h, 'buildarchs')

+         exclusivearch = koji.get_header_field(h, 'exclusivearch')

+         excludearch = koji.get_header_field(h, 'excludearch')

          if buildarchs:

              archlist = buildarchs

              self.logger.debug('archlist after buildarchs: %r' % archlist)
@@ -1071,8 +1109,8 @@ 

          # see https://pagure.io/koji/issue/19

  

          h = self.readSRPMHeader(srpm)

-         exclusivearch = h[rpm.RPMTAG_EXCLUSIVEARCH]

-         excludearch = h[rpm.RPMTAG_EXCLUDEARCH]

+         exclusivearch = koji.get_header_field(h, 'exclusivearch')

+         excludearch = koji.get_header_field(h, 'excludearch')

  

          if exclusivearch or excludearch:

              # if one of the tag arches is filtered out, then we can't use a
@@ -1216,13 +1254,13 @@ 

          return self.checkHostArch(tag, hostdata)

  

      def srpm_sanity_checks(self, filename):

-         header = koji.get_rpm_header(filename)

+         h_fields = koji.get_header_fields(filename, ['packager', 'vendor', 'distribution'])

  

-         if not header[rpm.RPMTAG_PACKAGER]:

+         if not h_fields['packager']:

              raise koji.BuildError("The build system failed to set the packager tag")

-         if not header[rpm.RPMTAG_VENDOR]:

+         if not h_fields['vendor']:

              raise koji.BuildError("The build system failed to set the vendor tag")

-         if not header[rpm.RPMTAG_DISTRIBUTION]:

+         if not h_fields['distribution']:

              raise koji.BuildError("The build system failed to set the distribution tag")

  

      def handler(self, pkg, root, arch, keep_srpm, opts=None):
@@ -1243,15 +1281,12 @@ 

              raise koji.BuildError("SRPM file missing: %s" % fn)

          # peel E:N-V-R from package

          h = koji.get_rpm_header(fn)

-         name = h[rpm.RPMTAG_NAME]

-         ver = h[rpm.RPMTAG_VERSION]

-         rel = h[rpm.RPMTAG_RELEASE]

-         epoch = h[rpm.RPMTAG_EPOCH]

-         if h[rpm.RPMTAG_SOURCEPACKAGE] != 1:

+         name = koji.get_header_field(h, 'name')

+         if not koji.get_header_field(h, 'sourcepackage'):

              raise koji.BuildError("not a source package")

          # Disable checking for distribution in the initial SRPM because it

          # might have been built outside of the build system

-         # if not h[rpm.RPMTAG_DISTRIBUTION]:

+         # if not koji.get_header_field(h, 'distribution'):

          #    raise koji.BuildError, "the distribution tag is not set in the original srpm"

  

          self.updateWeight(name)
@@ -1870,9 +1905,8 @@ 

          contents = contents.encode('utf-8')

  

          specfile = spec_template[:-5]

-         specfd = open(specfile, 'w')

-         specfd.write(contents)

-         specfd.close()

+         with open(specfile, 'w') as specfd:

+             specfd.write(contents)

  

          # Run spec file sanity checks.  Any failures will throw a BuildError

          self.spec_sanity_checks(specfile)
@@ -1959,8 +1993,8 @@ 

              raise koji.BuildError('no rpms found')

  

          try:

-             for rpm in [srpm] + rpms:

-                 self.uploadFile(os.path.join(resultdir, rpm))

+             for rpm_fn in [srpm] + rpms:

+                 self.uploadFile(os.path.join(resultdir, rpm_fn))

          except (SystemExit, ServerExit, KeyboardInterrupt):

              raise

          except:
@@ -2228,8 +2262,8 @@ 

          task = self.session.getTaskInfo(self.id)

          user_id = task['owner']

          try:

-             build = self.session.getBuild(build_id, strict=True)

-             tag = self.session.getTag(tag_id, strict=True)

+             self.session.getBuild(build_id, strict=True)

+             self.session.getTag(tag_id, strict=True)

  

              #several basic sanity checks have already been run (and will be run

              #again when we make the final call). Our job is to perform the more
@@ -2337,7 +2371,6 @@ 

                          ignored_arches.add(arch)

  

              # wrap in an RPM if asked

-             rpm_results = None

              spec_url = opts.get('specfile')

              for arch in arches:

                  # get around an xmlrpc limitation, use arches for keys instead
@@ -2354,7 +2387,7 @@ 

              # 1 results hash from the subtasks

              if 'kickstart' in opts:

                  saw_ks = False

-                 for arch in results.keys():

+                 for arch in results:

                      if arch in ignored_arches:

                          continue

                      ks = os.path.basename(opts.get('kickstart'))
@@ -2442,7 +2475,6 @@ 

              self.logger.info('results: %s' % results)

  

              # wrap in an RPM if asked

-             rpm_results = None

              spec_url = opts.get('specfile')

              if spec_url:

                  results[create_task_id]['rpmresults'] = self.buildWrapperRPM(
@@ -2528,7 +2560,6 @@ 

  

              # wrap in an RPM if needed

              spec_url = opts.get('specfile')

-             rpm_results = None

              if spec_url:

                  results[create_task_id]['rpmresults'] = self.buildWrapperRPM(

                      spec_url, create_task_id,
@@ -2860,9 +2891,8 @@ 

          kskoji = os.path.join(broot.tmpdir(), 'koji-image-%s-%i.ks' %

                                (target_info['build_tag_name'], self.id))

          koji.ensuredir(broot.tmpdir())

-         outfile = open(kskoji, 'w')

-         outfile.write(str(self.ks.handler))

-         outfile.close()

+         with open(kskoji, 'w') as outfile:

+             outfile.write(str(self.ks.handler))

  

          # put the new ksfile in the output directory

          if not os.path.exists(kskoji):
@@ -3444,11 +3474,10 @@ 

          else:

              tops = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])

              tops['tempdir'] = self.workdir

-             ks_src = koji.openRemoteFile(ksfile, **tops)

-             kspath = os.path.join(self.workdir, os.path.basename(ksfile))

-             ks_dest = open(kspath, 'w')

-             ks_dest.write(ks_src.read())

-             ks_dest.close()

+             with koji.openRemoteFile(ksfile, **tops) as ks_src:

+                 kspath = os.path.join(self.workdir, os.path.basename(ksfile))

+                 with open(kspath, 'w') as ks_dest:

+                     ks_dest.write(ks_src.read())

          self.logger.debug('uploading kickstart from here: %s' % kspath)

          self.uploadFile(kspath) # upload the original ks file

          return kspath # absolute path to the ks file
@@ -3534,9 +3563,8 @@ 

              an absolute path to the kickstart file we wrote

          """

          kspath = os.path.join(self.workdir, ksname)

-         outfile = open(kspath, 'w')

-         outfile.write(str(ksobj.handler))

-         outfile.close()

+         with open(kspath, 'w') as outfile:

+             outfile.write(str(ksobj.handler))

  

          # put the new ksfile in the output directory

          if not os.path.exists(kspath):
@@ -3668,9 +3696,8 @@ 

          edriver = newxml.getElementsByTagName('driver')[0]

          edriver.setAttribute('type', format)

          xml_path = os.path.join(self.workdir, filename)

-         xmlfd = open(xml_path, 'w')

-         xmlfd.write(newxml.toprettyxml())

-         xmlfd.close()

+         with open(xml_path, 'w') as xmlfd:

+             xmlfd.write(newxml.toprettyxml())

          return xml_path

  

      def getScreenshot(self):
@@ -3719,7 +3746,7 @@ 

          if len(formats) == 0:

              # we only want a raw disk image (no format option given)

              f_dict['raw'] = True

-         elif 'raw' not in f_dict.keys():

+         elif 'raw' not in f_dict:

              f_dict['raw'] = False

          self.logger.debug('Image delivery plan: %s' % f_dict)

          return f_dict
@@ -3785,7 +3812,7 @@ 

              that points to the path of the XML file for that image

          """

          imginfo = {}

-         for fmt in images.keys():

+         for fmt in images:

              imginfo[fmt] = images[fmt]

              lxml = self.fixImageXML(fmt, 'libvirt-%s-%s.xml' % (fmt, self.arch),

                  self.base_img.base_image.parameters['libvirt_xml'])
@@ -4108,9 +4135,8 @@ 

          ApplicationConfiguration(configuration=config)

  

          tdl_path = os.path.join(self.workdir, 'tdl-%s.xml' % self.arch)

-         tdl = open(tdl_path, 'w')

-         tdl.write(template)

-         tdl.close()

+         with open(tdl_path, 'w') as tdl:

+             tdl.write(template)

          self.uploadFile(tdl_path)

  

          # ImageFactory picks a port to the guest VM using a rolling integer.
@@ -4141,8 +4167,8 @@ 

          }

          # record the RPMs that were installed

          if not opts.get('scratch'):

-             fields = ('name', 'version', 'release', 'arch', 'epoch', 'size',

-                 'payloadhash', 'buildtime')

+             #fields = ('name', 'version', 'release', 'arch', 'epoch', 'size',

+             #    'payloadhash', 'buildtime')

              icicle = xml.dom.minidom.parseString(images['raw']['icicle'])

              self.logger.debug('ICICLE: %s' % images['raw']['icicle'])

              for p in icicle.getElementsByTagName('extra'):
@@ -4170,7 +4196,7 @@ 

              br.markExternalRPMs(imgdata['rpmlist'])

  

          # upload the results

-         for format in (f for f in self.formats.keys() if self.formats[f]):

+         for format in (f for f in self.formats if self.formats[f]):

              newimg = images[format]['image']

              if ('ova' in format or format in ('raw-xz', 'liveimg-squashfs', 'tar-gz')):

                  newname = self.imgname + '.' + format.replace('-', '.')
@@ -4257,11 +4283,10 @@ 

          else:

              tops = dict([(k, getattr(self.options, k)) for k in ('topurl','topdir')])

              tops['tempdir'] = self.workdir

-             remote_fileobj = koji.openRemoteFile(filepath, **tops)

              final_path = os.path.join(self.workdir, os.path.basename(filepath))

-             final_fileobj = open(final_path, 'w')

-             final_fileobj.write(remote_fileobj.read())

-             final_fileobj.close()

+             with koji.openRemoteFile(filepath, **tops) as remote_fileobj:

+                 with open(final_path, 'w') as final_fileobj:

+                     shutil.copyfileobj(remote_fileobj, final_fileobj)

          self.logger.debug('uploading retrieved file from here: %s' % final_path)

          self.uploadFile(final_path) # upload the original ks file

          return final_path # absolute path to the ks file
@@ -4386,8 +4411,6 @@ 

  

          # TODO: Copy-paste from BaseImage - refactor

          target_info = self.session.getBuildTarget(opts['target'], strict=True)

-         build_tag = target_info['build_tag']

-         repo_info = self.getRepo(build_tag)

  

          name = opts['name']

          version = opts['version']
@@ -4471,8 +4494,8 @@ 

                      pim = PersistentImageManager.default_manager()

                      pim.add_image(target_image)

                      target.target_image = target_image

-                     open(target_image.data, "w").write("Mock build from task ID: %s" %

-                                                  (str(self.id)))

+                     with open(target_image.data, "w") as f:

+                         f.write("Mock build from task ID: %s" % self.id)

                      target_image.status='COMPLETE'

                  else:

                      target = bd.builder_for_target_image('indirection',
@@ -4641,9 +4664,9 @@ 

  

          # check srpm name

          h = koji.get_rpm_header(srpm)

-         name = h[rpm.RPMTAG_NAME]

-         version = h[rpm.RPMTAG_VERSION]

-         release = h[rpm.RPMTAG_RELEASE]

+         name = koji.get_header_field(h, 'name')

+         version = koji.get_header_field(h, 'version')

+         release = koji.get_header_field(h, 'release')

          srpm_name = "%(name)s-%(version)s-%(release)s.src.rpm" % locals()

          if srpm_name != os.path.basename(srpm):

              raise koji.BuildError('srpm name mismatch: %s != %s' % (srpm_name, os.path.basename(srpm)))
@@ -5045,9 +5068,8 @@ 

          if external_repos:

              self.merge_repos(external_repos, arch, groupdata)

          elif pkglist is None:

-             fo = open(os.path.join(self.datadir, "EMPTY_REPO"), 'w')

-             fo.write("This repo is empty because its tag has no content for this arch\n")

-             fo.close()

+             with open(os.path.join(self.datadir, "EMPTY_REPO"), 'w') as fo:

+                 fo.write("This repo is empty because its tag has no content for this arch\n")

  

          uploadpath = self.getUploadDir()

          files = []
@@ -5169,7 +5191,7 @@ 

              canonArches.add(koji.canonArch(arch))

          arch32s = set()

          for arch in canonArches:

-             if not rpmUtils.arch.isMultiLibArch(arch):

+             if not koji.arch.isMultiLibArch(arch):

                  arch32s.add(arch)

          for arch in arch32s:

              # we do 32-bit multilib arches first so the 64-bit ones can
@@ -5265,8 +5287,11 @@ 

          # sort out our package list(s)

          self.uploadpath = self.getUploadDir()

          self.get_rpms(tag, arch, keys, opts)

-         if opts['multilib'] and rpmUtils.arch.isMultiLibArch(arch):

-             self.do_multilib(arch, self.archmap[arch], opts['multilib'])

+         if opts['multilib'] and koji.arch.isMultiLibArch(arch):

+             if dnf is not None:

+                 self.do_multilib_dnf(arch, self.archmap[arch], opts['multilib'])

+             else:

+                 self.do_multilib_yum(arch, self.archmap[arch], opts['multilib'])

          self.split_pkgs(opts)

          self.write_kojipkgs()

          self.write_pkglist()
@@ -5359,7 +5384,135 @@ 

              raise koji.GenericError('failed to create repo: %s' \

                      % parseStatus(status, ' '.join(cmd)))

  

-     def do_multilib(self, arch, ml_arch, conf):

+ 

+     def do_multilib_dnf(self, arch, ml_arch, conf):

+         repodir = koji.pathinfo.distrepo(self.rinfo['id'], self.rinfo['tag_name'])

+         mldir = os.path.join(repodir, koji.canonArch(ml_arch))

+         ml_true = set()  # multilib packages we need to include before depsolve

+         ml_conf = os.path.join(koji.pathinfo.work(), conf)

+ 

+         # read pkgs data from multilib repo

+         ml_pkgfile = os.path.join(mldir, 'kojipkgs')

+         ml_pkgs = json.load(open(ml_pkgfile, 'r'))

+ 

+         # step 1: figure out which packages are multilib (should already exist)

+         dnfbase = dnf.Base()

+         mlm = multilib.DevelMultilibMethod(ml_conf)

+         fs_missing = set()

+         for bnp in self.kojipkgs:

+                 rpminfo = self.kojipkgs[bnp]

+                 ppath = rpminfo['_pkgpath']

+                 dnfbase.fill_sack(load_system_repo=False, load_available_repos=False)

+                 po = dnfbase.sack.add_cmdline_package(ppath)

+                 if mlm.select(po):

+                     # we need a multilib package to be included

+                     ml_bnp = bnp.replace(arch, self.archmap[arch])

+                     ml_path = os.path.join(mldir, ml_bnp[0].lower(), ml_bnp)

+                     # ^ XXX - should actually generate this

+                     if ml_bnp not in ml_pkgs:

+                         # not in our multilib repo

+                         self.logger.error('%s (multilib) is not on the filesystem' % ml_path)

+                         fs_missing.add(ml_path)

+                         # we defer failure so can report all the missing deps

+                         continue

+                     ml_true.add(ml_path)

+ 

+         # step 2: set up architectures for dnf configuration

+         self.logger.info("Resolving multilib for %s using method devel" % arch)

+         dnfdir = os.path.join(self.workdir, 'dnf')

+         # TODO: unwind this arch mess

+         archlist = (arch, 'noarch')

+         transaction_arch = arch

+         archlist = archlist + self.compat[self.biarch[arch]]

+         best_compat = self.compat[self.biarch[arch]][0]

+         if koji.arch.archDifference(best_compat, arch) > 0:

+             transaction_arch = best_compat

+         dnfconfig = """

+ [main]

+ debuglevel=2

+ #pkgpolicy=newest

+ #exactarch=1

+ gpgcheck=0

+ #reposdir=/dev/null

+ #cachedir=/dnfcache

+ installroot=%s

+ #logfile=/dnf.log

+ 

+ [koji-%s]

+ name=koji multilib task

+ baseurl=file://%s

+ enabled=1

+ 

+ """ % (dnfdir, self.id, mldir)

+         os.makedirs(os.path.join(dnfdir, "dnfcache"))

+         os.makedirs(os.path.join(dnfdir, 'var/lib/rpm'))

+ 

+         # step 3: proceed with dnf config and set up

+         yconfig_path = os.path.join(dnfdir, 'dnf.conf-koji-%s' % arch)

+         with open(yconfig_path, 'w') as f:

+             f.write(dnfconfig)

+         self.session.uploadWrapper(yconfig_path, self.uploadpath,

+             os.path.basename(yconfig_path))

+         conf = dnf.conf.Conf()

+         conf.reposdir = [] # don't use system repos at all

+         conf.read(yconfig_path)

+         dnfbase = dnf.Base(conf)

+         if hasattr(koji.arch, 'ArchStorage'):

+             dnfbase.conf.arch = transaction_arch

+         else:

+             koji.arch.canonArch = transaction_arch

+         dnfbase.read_all_repos()

+         dnfbase.fill_sack(load_system_repo=False, load_available_repos=True)

+         for pkg in ml_true:

+             dnfbase.install(pkg)

+ 

+         # step 4: execute dnf transaction to get dependencies

+         self.logger.info("Resolving dependencies for arch %s" % arch)

+ 

+         ml_needed = {}

+         try:

+             dnfbase.resolve()

+             self.logger.info('dnf depsolve successfully finished')

+             for po in dnfbase.transaction.install_set:

+                 bnp = os.path.basename(po.localPkg())

+                 dep_path = os.path.join(mldir, bnp[0].lower(), bnp)

+                 ml_needed[dep_path] = po

+                 if not os.path.exists(dep_path):

+                     self.logger.error('%s (multilib dep) not on filesystem' % dep_path)

+                     fs_missing.add(dep_path)

+         except dnf.exceptions.DepsolveError:

+             self.logger.error('dnf depsolve was unsuccessful')

+             raise

+ 

+         if len(fs_missing) > 0:

+             missing_log = os.path.join(self.workdir, 'missing_multilib.log')

+             with open(missing_log, 'w') as outfile:

+                 outfile.write('The following multilib files were missing:\n')

+                 for ml_path in fs_missing:

+                     outfile.write(ml_path + '\n')

+             self.session.uploadWrapper(missing_log, self.uploadpath)

+             raise koji.GenericError('multilib packages missing. '

+                     'See missing_multilib.log')

+ 

+         # step 5: update kojipkgs

+         for dep_path in ml_needed:

+             tspkg = ml_needed[dep_path]

+             bnp = os.path.basename(dep_path)

+             if bnp in self.kojipkgs:

+                 # we expect duplication with noarch, but not other arches

+                 if tspkg.arch != 'noarch':

+                     self.logger.warning("Multilib duplicate: %s", bnp)

+                 continue

+             rpminfo = ml_pkgs[bnp].copy()

+             # fix _pkgpath, which comes from another task and could be wrong

+             # for us

+             # TODO: would be better if we could use the proper path here

+             rpminfo['_pkgpath'] = dep_path

+             rpminfo['_multilib'] = True

+             self.kojipkgs[bnp] = rpminfo

+ 

+ 

+     def do_multilib_yum(self, arch, ml_arch, conf):

          repodir = koji.pathinfo.distrepo(self.rinfo['id'], self.rinfo['tag_name'])

          mldir = os.path.join(repodir, koji.canonArch(ml_arch))

          ml_true = set()  # multilib packages we need to include before depsolve
@@ -5399,12 +5552,12 @@ 

          transaction_arch = arch

          archlist = archlist + self.compat[self.biarch[arch]]

          best_compat = self.compat[self.biarch[arch]][0]

-         if rpmUtils.arch.archDifference(best_compat, arch) > 0:

+         if koji.arch.archDifference(best_compat, arch) > 0:

              transaction_arch = best_compat

-         if hasattr(rpmUtils.arch, 'ArchStorage'):

+         if hasattr(koji.arch, 'ArchStorage'):

              yumbase.preconf.arch = transaction_arch

          else:

-             rpmUtils.arch.canonArch = transaction_arch

+             koji.arch.canonArch = transaction_arch

  

          yconfig = """

  [main]
@@ -5428,9 +5581,8 @@ 

  

          # step 3: proceed with yum config and set up

          yconfig_path = os.path.join(yumdir, 'yum.conf-koji-%s' % arch)

-         f = open(yconfig_path, 'w')

-         f.write(yconfig)

-         f.close()

+         with open(yconfig_path, 'w') as f:

+             f.write(yconfig)

          self.session.uploadWrapper(yconfig_path, self.uploadpath,

              os.path.basename(yconfig_path))

          yumbase.doConfigSetup(fn=yconfig_path)
@@ -5467,12 +5619,11 @@ 

              raise koji.GenericError(errors)

          if len(fs_missing) > 0:

              missing_log = os.path.join(self.workdir, 'missing_multilib.log')

-             outfile = open(missing_log, 'w')

-             outfile.write('The following multilib files were missing:\n')

-             for ml_path in fs_missing:

-                 outfile.write(ml_path)

-                 outfile.write('\n')

-             outfile.close()

+             with open(missing_log, 'w') as outfile:

+                 outfile.write('The following multilib files were missing:\n')

+                 for ml_path in fs_missing:

+                     outfile.write(ml_path)

+                     outfile.write('\n')

              self.session.uploadWrapper(missing_log, self.uploadpath)

              raise koji.GenericError('multilib packages missing. '

                      'See missing_multilib.log')
@@ -5570,33 +5721,31 @@ 

          # report problems

          if len(fs_missing) > 0:

              missing_log = os.path.join(self.workdir, 'missing_files.log')

-             outfile = open(missing_log, 'w')

-             outfile.write('Some rpm files were missing.\n'

-                 'Most likely, you want to create these signed copies.\n\n'

-                 'Missing files:\n')

-             for pkgpath in sorted(fs_missing):

-                 outfile.write(pkgpath)

-                 outfile.write('\n')

-             outfile.close()

+             with open(missing_log, 'w') as outfile:

+                 outfile.write('Some rpm files were missing.\n'

+                     'Most likely, you want to create these signed copies.\n\n'

+                     'Missing files:\n')

+                 for pkgpath in sorted(fs_missing):

+                     outfile.write(pkgpath)

+                     outfile.write('\n')

              self.session.uploadWrapper(missing_log, self.uploadpath)

              raise koji.GenericError('Packages missing from the filesystem. '

                      'See missing_files.log.')

          if sig_missing:

              # log missing signatures and possibly error

              missing_log = os.path.join(self.workdir, 'missing_signatures.log')

-             outfile = open(missing_log, 'w')

-             outfile.write('Some rpms were missing requested signatures.\n')

-             if opts['skip_missing_signatures']:

-                 outfile.write('The skip_missing_signatures option was specified, so '

-                         'these files were excluded.\n')

-             outfile.write('Acceptable keys: %r\n\n' % keys)

-             outfile.write('# RPM name: available keys\n')

-             fmt = '%(name)s-%(version)s-%(release)s.%(arch)s'

-             filenames = [[fmt % selected[r], r] for r in sig_missing]

-             for fname, rpm_id in sorted(filenames):

-                 avail = to_list(rpm_idx.get(rpm_id, {}).keys())

-                 outfile.write('%s: %r\n' % (fname, avail))

-             outfile.close()

+             with open(missing_log, 'w') as outfile:

+                 outfile.write('Some rpms were missing requested signatures.\n')

+                 if opts['skip_missing_signatures']:

+                     outfile.write('The skip_missing_signatures option was specified, so '

+                             'these files were excluded.\n')

+                 outfile.write('Acceptable keys: %r\n\n' % keys)

+                 outfile.write('# RPM name: available keys\n')

+                 fmt = '%(name)s-%(version)s-%(release)s.%(arch)s'

+                 filenames = [[fmt % selected[r], r] for r in sig_missing]

+                 for fname, rpm_id in sorted(filenames):

+                     avail = to_list(rpm_idx.get(rpm_id, {}).keys())

+                     outfile.write('%s: %r\n' % (fname, avail))

              self.session.uploadWrapper(missing_log, self.uploadpath)

              if (not opts['skip_missing_signatures']

                          and not opts['allow_missing_signatures']):
@@ -5645,11 +5794,8 @@ 

  

      def write_kojipkgs(self):

          filename = os.path.join(self.repodir, 'kojipkgs')

-         datafile = open(filename, 'w')

-         try:

+         with open(filename, 'w') as datafile:

              json.dump(self.kojipkgs, datafile, indent=4, sort_keys=True)

-         finally:

-             datafile.close()

  

  

  class WaitrepoTask(BaseTaskHandler):

file modified
+9 -7
@@ -168,13 +168,9 @@ 

  Group: Applications/System

  License: LGPLv2 and GPLv2+

  #mergerepos (from createrepo) is GPLv2+

- Requires: %{name} = %{version}-%{release}

- # we need the python2 lib here

- Requires: python2-%{name} = %{version}-%{release}

  Requires: mock >= 0.9.14

  Requires(pre): /usr/sbin/useradd

  Requires: squashfs-tools

- Requires: python2-multilib

  %if %{use_systemd}

  Requires(post): systemd

  Requires(preun): systemd
@@ -188,9 +184,15 @@ 

  Requires: /usr/bin/cvs

  Requires: /usr/bin/svn

  Requires: /usr/bin/git

- Requires: python-cheetah

- %if 0%{?fedora} >= 9

  Requires: createrepo >= 0.9.2

+ %if 0%{with python3}

+ Requires: python%{python3_pkgversion}-%{name} = %{version}-%{release}

+ Requires: python%{python3_pkgversion}-multilib

+ Requires: python%{python3_pkgversion}-cheetah

+ %else

+ Requires: python2-%{name} = %{version}-%{release}

+ Requires: python2-multilib

+ Requires: python-cheetah

  %endif

  

  %description builder
@@ -201,7 +203,6 @@ 

  Summary: Koji virtual machine management daemon

  Group: Applications/System

  License: LGPLv2

- Requires: %{name} = %{version}-%{release}

  # we need the python2 lib here

  Requires: python2-%{name} = %{version}-%{release}

  %if %{use_systemd}
@@ -278,6 +279,7 @@ 

  make DESTDIR=$RPM_BUILD_ROOT PYTHON=%{__python3} %{?install_opt} install

  # alter python interpreter in koji CLI

  sed -i 's/\#\!\/usr\/bin\/python2/\#\!\/usr\/bin\/python3/' $RPM_BUILD_ROOT/usr/bin/koji

+ sed -i 's/\#\!\/usr\/bin\/python2/\#\!\/usr\/bin\/python3/' $RPM_BUILD_ROOT/usr/sbin/kojid

  %endif

  

  %clean

file modified
+33 -6
@@ -890,6 +890,22 @@ 

      return hdr

  

  

+ def _decode_item(item):

+     """Decode rpm header byte strings to str in py3"""

+     if six.PY2:

+         return item

+     elif isinstance(item, bytes):

+         try:

+             return item.decode()

+         except UnicodeDecodeError:

+             # typically signatures

+             return item

+     elif isinstance(item, list):

+         return [_decode_item(x) for x in item]

+     else:

+         return item

+ 

+ 

  def get_header_field(hdr, name, src_arch=False):

      """Extract named field from an rpm header"""

      name = name.upper()
@@ -905,6 +921,7 @@ 

              return "nosrc"

          return "src"

  

+     # REMOVED?

      result = _get_header_field(hdr, name)

  

      if name in ("NOSOURCE", "NOPATCH"):
@@ -913,12 +930,7 @@ 

              result = []

          elif isinstance(result, six.integer_types):

              result = [result]

-     if six.PY3 and isinstance(result, bytes):

-         try:

-             result = result.decode('utf-8')

-         except UnicodeDecodeError:

-             # typically signatures

-             pass

+     result = _decode_item(result)

  

      sizetags = ('SIZE', 'ARCHIVESIZE', 'FILESIZES', 'SIGSIZE')

      if name in sizetags and (result is None or result == []):
@@ -929,6 +941,8 @@ 

              pass

  

      return result

+     # REPLACED

+     # return _get_header_field(hdr, name)

  

  

  def _get_header_field(hdr, name):
@@ -2389,6 +2403,14 @@ 

          else:

              handler = self.baseurl

          request = dumps(args, name, allow_none=1)

+         try:

+             request.encode('latin-1')

+         except (UnicodeEncodeError, UnicodeDecodeError):

+             # py2 string throws UnicodeDecodeError

+             # py3 string throws UnicodeEncodeError

+             # if string is not converted to UTF, requests will raise an error

+             # on identical check before sending data

+             request = request.encode('utf-8')

          headers = [

              # connection class handles Host

              ('User-Agent', 'koji/1'),
@@ -2682,6 +2704,11 @@ 

              ("Content-length", str(size)),

          ]

          request = chunk

+         if six.PY3 and isinstance(chunk, str):

+             request = chunk.encode('utf-8')

+         else:

+             # py2 or bytes

+             request = chunk

          return handler, headers, request

  

      def uploadWrapper(self, localfile, path, name=None, callback=None, blocksize=None, overwrite=True, volume=None):

file added
+512
@@ -0,0 +1,512 @@ 

+ #!/usr/bin/python

+ #

+ 

+ import os

+ import rpm

+ import ctypes

+ import struct

+ 

+ _ppc64_native_is_best = True

+ 

+ # dict mapping arch -> ( multicompat, best personality, biarch personality )

+ multilibArches = { "x86_64":  ( "athlon", "x86_64", "athlon" ),

+                    "sparc64v": ( "sparcv9v", "sparcv9v", "sparc64v" ),

+                    "sparc64": ( "sparcv9", "sparcv9", "sparc64" ),

+                    "ppc64":   ( "ppc", "ppc", "ppc64" ),

+                    "s390x":   ( "s390", "s390x", "s390" ),

+                    }

+ if _ppc64_native_is_best:

+     multilibArches["ppc64"] = ( "ppc", "ppc64", "ppc64" )

+ 

+ arches = {

+     # ia32

+     "athlon": "i686",

+     "i686": "i586",

+     "geode": "i686",

+     "i586": "i486",

+     "i486": "i386",

+     "i386": "noarch",

+ 

+     # amd64

+     "x86_64": "athlon",

+     "amd64": "x86_64",

+     "ia32e": "x86_64",

+ 

+     #ppc64le

+     "ppc64le":  "noarch",

+ 

+     # ppc

+     "ppc64p7": "ppc64",

+     "ppc64pseries": "ppc64",

+     "ppc64iseries": "ppc64",

+     "ppc64": "ppc",

+     "ppc": "noarch",

+ 

+     # s390{,x}

+     "s390x": "s390",

+     "s390": "noarch",

+ 

+     # sparc

+     "sparc64v": "sparcv9v",

+     "sparc64": "sparcv9",

+     "sparcv9v": "sparcv9",

+     "sparcv9": "sparcv8",

+     "sparcv8": "sparc",

+     "sparc": "noarch",

+ 

+     # alpha

+     "alphaev7":   "alphaev68",

+     "alphaev68":  "alphaev67",

+     "alphaev67":  "alphaev6",

+     "alphaev6":   "alphapca56",

+     "alphapca56": "alphaev56",

+     "alphaev56":  "alphaev5",

+     "alphaev5":   "alphaev45",

+     "alphaev45":  "alphaev4",

+     "alphaev4":   "alpha",

+     "alpha":      "noarch",

+ 

+     # arm

+     "armv7l": "armv6l",

+     "armv6l": "armv5tejl",

+     "armv5tejl": "armv5tel",

+     "armv5tel": "noarch",

+ 

+     #arm hardware floating point

+     "armv7hnl": "armv7hl",

+     "armv7hl": "armv6hl",

+     "armv6hl": "noarch",

+ 

+     # arm64

+     "arm64": "noarch",

+ 

+     # super-h

+     "sh4a": "sh4",

+     "sh4": "noarch",

+     "sh3": "noarch",

+ 

+     #itanium

+     "ia64": "noarch",

+     }

+ 

+ #  Will contain information parsed from /proc/self/auxv via _parse_auxv().

+ # Should move into rpm really.

+ _aux_vector = {

+     "platform": "",

+     "hwcap": 0,

+     }

+ 

+ def legitMultiArchesInSameLib(arch=None):

+     # this is completely crackrock - if anyone has a better way I

+     # am all ears

+ 

+     arch = getBestArch(arch)

+     if isMultiLibArch(arch):

+         arch = getBaseArch(myarch=arch)

+ 

+     results = [arch]

+ 

+     if arch in ('x86_64', 'ppc64') or arch.startswith('sparcv9'):

+         for (k, v) in arches.items():

+             if v == arch:

+                 results.append(k)

+     return results

+ 

+ 

+ def canCoinstall(arch1, arch2):

+     """Take two arches and return True if it is possible that they can be

+        installed together with the same nevr. Ex: arch1=i386 and arch2=i686 then

+        it will return False. arch1=i386 and arch2=x86_64 will return True.

+        It does not determine whether or not the arches make any sense. Just whether

+        they could possibly install w/o conflict"""

+ 

+     # if both are a multlibarch then we can't coinstall  (x86_64, ia32e)

+     # if both are not multilibarches then we can't coinstall (i386, i686)

+ 

+     if 'noarch' in [arch1, arch2]: # noarch can never coinstall

+         return False

+ 

+     if isMultiLibArch(arch=arch1) == isMultiLibArch(arch=arch2):

+         return False

+     # this section keeps arch1=x86_64 arch2=ppc from returning True

+     if arch1 in getArchList(arch2) or arch2 in getArchList(arch1):

+         return True

+     return False

+ 

+ # this computes the difference between myarch and targetarch

+ def archDifference(myarch, targetarch):

+     if myarch == targetarch:

+         return 1

+     if myarch in arches:

+         ret = archDifference(arches[myarch], targetarch)

+         if ret != 0:

+             return ret + 1

+         return 0

+     return 0

+ 

+ def score(arch):

+     return archDifference(canonArch, arch)

+ 

+ def isMultiLibArch(arch=None):

+     """returns true if arch is a multilib arch, false if not"""

+     if arch is None:

+         arch = canonArch

+ 

+     if arch not in arches: # or we could check if it is noarch

+         return 0

+ 

+     if arch in multilibArches:

+         return 1

+ 

+     if arches[arch] in multilibArches:

+         return 1

+ 

+     return 0

+ 

+ def getBestArchFromList(archlist, myarch=None):

+     """

+         return the best arch from the list for myarch if - myarch is not given,

+         then return the best arch from the list for the canonArch.

+     """

+ 

+     if len(archlist) == 0:

+         return None

+ 

+     if myarch is None:

+         myarch = canonArch

+ 

+     mybestarch = getBestArch(myarch)

+ 

+     bestarch = getBestArch(myarch)

+     if bestarch != myarch:

+         bestarchchoice = getBestArchFromList(archlist, bestarch)

+         if bestarchchoice != None and bestarchchoice != "noarch":

+             return bestarchchoice

+ 

+     thisarch = archlist[0]

+     for arch in archlist[1:]:

+         val1 = archDifference(myarch, thisarch)

+         val2 = archDifference(myarch, arch)

+         if val1 == 0 and val2 == 0:

+             continue

+         if val1 < val2:

+             if val1 == 0:

+                 thisarch = arch

+         if val2 < val1:

+             if val2 != 0:

+                 thisarch = arch

+         if val1 == val2:

+             pass

+ 

+     # thisarch should now be our bestarch

+     # one final check to make sure we're not returning a bad arch

+     val = archDifference(myarch, thisarch)

+     if val == 0:

+         return None

+ 

+     return thisarch

+ 

+ 

+ def getArchList(thisarch=None):

+     # this returns a list of archs that are compatible with arch given

+     if not thisarch:

+         thisarch = canonArch

+ 

+     archlist = [thisarch]

+     while thisarch in arches:

+         thisarch = arches[thisarch]

+         archlist.append(thisarch)

+ 

+     # hack hack hack

+     # sparc64v is also sparc64 compat

+     if archlist[0] == "sparc64v":

+         archlist.insert(1,"sparc64")

+ 

+     # if we're a weirdo arch - add noarch on there.

+     if len(archlist) == 1 and archlist[0] == thisarch:

+         archlist.append('noarch')

+     return archlist

+ 

+ def _try_read_cpuinfo():

+     """ Try to read /proc/cpuinfo ... if we can't ignore errors (ie. proc not

+         mounted). """

+     try:

+         return open("/proc/cpuinfo", "r")

+     except:

+         return []

+ 

+ def _parse_auxv():

+     """ Read /proc/self/auxv and parse it into global dict for easier access

+         later on, very similar to what rpm does. """

+     # In case we can't open and read /proc/self/auxv, just return

+     try:

+         data = open("/proc/self/auxv", "rb").read()

+     except:

+         return

+ 

+     # Define values from /usr/include/elf.h

+     AT_PLATFORM = 15

+     AT_HWCAP = 16

+     fmtlen = struct.calcsize("LL")

+     offset = 0

+     platform = ctypes.c_char_p()

+ 

+     # Parse the data and fill in _aux_vector dict

+     while offset <= len(data) - fmtlen:

+         at_type, at_val = struct.unpack_from("LL", data, offset)

+         if at_type == AT_PLATFORM:

+             platform.value = at_val

+             _aux_vector["platform"] = platform.value

+         if at_type == AT_HWCAP:

+             _aux_vector["hwcap"] = at_val

+         offset = offset + fmtlen

+ 

+ def getCanonX86Arch(arch):

+     #

+     if arch == "i586":

+         for line in _try_read_cpuinfo():

+             if line.startswith("model name"):

+                 if line.find("Geode(TM)") != -1:

+                     return "geode"

+                 break

+         return arch

+     # only athlon vs i686 isn't handled with uname currently

+     if arch != "i686":

+         return arch

+ 

+     # if we're i686 and AuthenticAMD, then we should be an athlon

+     for line in _try_read_cpuinfo():

+         if line.startswith("vendor") and line.find("AuthenticAMD") != -1:

+             return "athlon"

+         # i686 doesn't guarantee cmov, but we depend on it

+         elif line.startswith("flags"):

+             if line.find("cmov") == -1:

+                 return "i586"

+             break

+ 

+     return arch

+ 

+ def getCanonARMArch(arch):

+     # the %{_target_arch} macro in rpm will let us know the abi we are using

+     target = rpm.expandMacro('%{_target_cpu}')

+     if target.startswith('armv6h'):

+         return target

+     if target.startswith('armv7h'):

+         return target

+     return arch

+ 

+ def getCanonPPCArch(arch):

+     # FIXME: should I do better handling for mac, etc?

+     if arch != "ppc64":

+         return arch

+ 

+     machine = None

+     for line in _try_read_cpuinfo():

+         if line.find("machine") != -1:

+             machine = line.split(':')[1]

+             break

+ 

+     platform = _aux_vector["platform"]

+     if machine is None and not platform:

+         return arch

+ 

+     try:

+         if platform.startswith("power") and int(platform[5:].rstrip('+')) >= 7:

+             return "ppc64p7"

+     except:

+         pass

+ 

+     if machine is None:

+         return arch

+ 

+     if machine.find("CHRP IBM") != -1:

+         return "ppc64pseries"

+     if machine.find("iSeries") != -1:

+         return "ppc64iseries"

+     return arch

+ 

+ def getCanonSPARCArch(arch):

+     # Deal with sun4v, sun4u, sun4m cases

+     SPARCtype = None

+     for line in _try_read_cpuinfo():

+         if line.startswith("type"):

+             SPARCtype = line.split(':')[1]

+             break

+     if SPARCtype is None:

+         return arch

+ 

+     if SPARCtype.find("sun4v") != -1:

+         if arch.startswith("sparc64"):

+             return "sparc64v"

+         else:

+             return "sparcv9v"

+     if SPARCtype.find("sun4u") != -1:

+         if arch.startswith("sparc64"):

+             return "sparc64"

+         else:

+             return "sparcv9"

+     if SPARCtype.find("sun4m") != -1:

+         return "sparcv8"

+     return arch

+ 

+ def getCanonX86_64Arch(arch):

+     if arch != "x86_64":

+         return arch

+ 

+     vendor = None

+     for line in _try_read_cpuinfo():

+         if line.startswith("vendor_id"):

+             vendor = line.split(':')[1]

+             break

+     if vendor is None:

+         return arch

+ 

+     if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1:

+         return "amd64"

+     if vendor.find("GenuineIntel") != -1:

+         return "ia32e"

+     return arch

+ 

+ def getCanonArch(skipRpmPlatform = 0):

+     if not skipRpmPlatform and os.access("/etc/rpm/platform", os.R_OK):

+         try:

+             f = open("/etc/rpm/platform", "r")

+             line = f.readline()

+             f.close()

+             (arch, vendor, opersys) = line.split("-", 2)

+             return arch

+         except:

+             pass

+ 

+     arch = os.uname()[4]

+ 

+     _parse_auxv()

+ 

+     if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"):

+         return getCanonX86Arch(arch)

+ 

+     if arch.startswith("arm"):

+         return getCanonARMArch(arch)

+     if arch.startswith("ppc"):

+         return getCanonPPCArch(arch)

+     if arch.startswith("sparc"):

+         return getCanonSPARCArch(arch)

+     if arch == "x86_64":

+         return getCanonX86_64Arch(arch)

+ 

+     return arch

+ 

+ canonArch = getCanonArch()

+ 

+ # this gets you the "compat" arch of a biarch pair

+ def getMultiArchInfo(arch = canonArch):

+     if arch in multilibArches:

+         return multilibArches[arch]

+     if arch in arches and arches[arch] != "noarch":

+         return getMultiArchInfo(arch = arches[arch])

+     return None

+ 

+ # get the best usual userspace arch for the arch we're on.  this is

+ # our arch unless we're on an arch that uses the secondary as its

+ # userspace (eg ppc64, sparc64)

+ def getBestArch(myarch=None):

+     if myarch:

+         arch = myarch

+     else:

+         arch = canonArch

+ 

+     if arch.startswith("sparc64"):

+         arch = multilibArches[arch][1]

+ 

+     if arch.startswith("ppc64") and not _ppc64_native_is_best and arch != "ppc64le":

+         arch = 'ppc'

+ 

+     return arch

+ 

+ def getBaseArch(myarch=None):

+     """returns 'base' arch for myarch, if specified, or canonArch if not.

+        base arch is the arch before noarch in the arches dict if myarch is not

+        a key in the multilibArches."""

+ 

+     if not myarch:

+         myarch = canonArch

+ 

+     if myarch not in arches: # this is dumb, but <shrug>

+         return myarch

+ 

+     if myarch.startswith("sparc64"):

+         return "sparc"

+     elif myarch == "ppc64le":

+         return "ppc64le"

+     elif myarch.startswith("ppc64") and not _ppc64_native_is_best:

+         return "ppc"

+     elif myarch.startswith("arm64"):

+         return "arm64"

+     elif myarch.startswith("armv6h"):

+         return "armhfp"

+     elif myarch.startswith("armv7h"):

+         return "armhfp"

+     elif myarch.startswith("arm"):

+         return "arm"

+ 

+     if isMultiLibArch(arch=myarch):

+         if myarch in multilibArches:

+             return myarch

+         else:

+             return arches[myarch]

+ 

+     if myarch in arches:

+         basearch = myarch

+         value = arches[basearch]

+         while value != 'noarch':

+             basearch = value

+             value = arches[basearch]

+ 

+         return basearch

+ 

+ 

+ class ArchStorage(object):

+     """class for keeping track of what arch we have set and doing various

+        permutations based on it"""

+     def __init__(self):

+         self.canonarch = None

+         self.basearch = None

+         self.bestarch = None

+         self.compatarches = []

+         self.archlist = []

+         self.multilib = False

+         self.setup_arch()

+ 

+     def setup_arch(self, arch=None, archlist_includes_compat_arch=True):

+         if arch:

+             self.canonarch = arch

+         else:

+             self.canonarch = canonArch

+ 

+         self.basearch = getBaseArch(myarch=self.canonarch)

+         self.archlist = getArchList(thisarch=self.canonarch)

+ 

+         if not archlist_includes_compat_arch: # - do we bother including i686 and below on x86_64

+             limit_archlist = []

+             for a in self.archlist:

+                 if isMultiLibArch(a) or a == 'noarch':

+                     limit_archlist.append(a)

+             self.archlist = limit_archlist

+ 

+         self.bestarch = getBestArch(myarch=self.canonarch)

+         self.compatarches = getMultiArchInfo(arch=self.canonarch)

+         self.multilib = isMultiLibArch(arch=self.canonarch)

+         self.legit_multi_arches = legitMultiArchesInSameLib(arch = self.canonarch)

+ 

+     def get_best_arch_from_list(self, archlist, fromarch=None):

+         if not fromarch:

+             fromarch = self.canonarch

+         return getBestArchFromList(archlist, myarch=fromarch)

+ 

+     def score(self, arch):

+         return archDifference(self.canonarch, arch)

+ 

+     def get_arch_list(self, arch):

+         if not arch:

+             return self.archlist

+         return getArchList(thisarch=arch)

file modified
+16 -10
@@ -31,7 +31,6 @@ 

  import os

  import signal

  import logging

- from six.moves import urllib

  from fnmatch import fnmatch

  import base64

  import time
@@ -40,6 +39,7 @@ 

  import traceback

  import errno

  from six.moves import range

+ from six.moves import urllib

  import six

  

  
@@ -122,7 +122,10 @@ 

              if logerror:

                  os.dup2(fd, 2)

              # echo the command we're running into the logfile

-             os.write(fd, '$ %s\n' % ' '.join(args))

+             msg = '$ %s\n' % ' '.join(args)

+             if six.PY3:

+                 msg = msg.encode('utf-8')

+             os.write(fd, msg)

              environ = os.environ.copy()

              if env:

                  environ.update(env)
@@ -131,7 +134,10 @@ 

              msg = ''.join(traceback.format_exception(*sys.exc_info()))

              if fd:

                  try:

-                     os.write(fd, msg)

+                     if six.PY3:

+                         os.write(fs, msg.encode('utf-8'))

+                     else:

+                         os.write(fd, msg)

                      os.close(fd)

                  except:

                      pass
@@ -588,7 +594,7 @@ 

  

      def shutdown(self):

          """Attempt to shut down cleanly"""

-         for task_id in self.pids.keys():

+         for task_id in self.pids:

              self.cleanupTask(task_id)

          self.session.host.freeTasks(to_list(self.tasks.keys()))

          self.session.host.updateHost(task_load=0.0, ready=False)
@@ -628,7 +634,7 @@ 

              return

          local_br = self._scanLocalBuildroots()

          # get info on local_only buildroots (most likely expired)

-         local_only = [id for id in six.iterkeys(local_br) if id not in db_br]

+         local_only = [id for id in local_br if id not in db_br]

          if local_only:

              missed_br = self.session.listBuildroots(buildrootID=tuple(local_only))

              #get all the task info in one call
@@ -796,14 +802,14 @@ 

              #about). This will happen after a daemon restart, for example.

              self.logger.info("freeing stale tasks: %r" % stale)

              self.session.host.freeTasks(stale)

-         for id, pid in self.pids.items():

+         for id, pid in list(self.pids.items()):

              if self._waitTask(id, pid):

                  # the subprocess handles most everything, we just need to clear things out

                  if self.cleanupTask(id, wait=False):

                      del self.pids[id]

                  if id in self.tasks:

                      del self.tasks[id]

-         for id, pid in self.pids.items():

+         for id, pid in list(self.pids.items()):

              if id not in tasks:

                  # expected to happen when:

                  #  - we are in the narrow gap between the time the task
@@ -860,7 +866,7 @@ 

              # Note: we may still take an assigned task below

          #sort available capacities for each of our bins

          avail = {}

-         for bin in six.iterkeys(bins):

+         for bin in bins:

              avail[bin] = [host['capacity'] - host['task_load'] for host in bin_hosts[bin]]

              avail[bin].sort()

              avail[bin].reverse()
@@ -893,7 +899,7 @@ 

                  #accept this task)

                  bin_avail = avail.get(bin, [0])

                  self.logger.debug("available capacities for bin: %r" % bin_avail)

-                 median = bin_avail[(len(bin_avail) - 1) // 2]

+                 median = bin_avail[int((len(bin_avail) - 1) // 2)]

                  self.logger.debug("ours: %.2f, median: %.2f" % (our_avail, median))

                  if not self.checkRelAvail(bin_avail, our_avail):

                      if self.checkAvailDelay(task):
@@ -944,7 +950,7 @@ 

          Check our available capacity against the capacity of other hosts in this bin.

          Return True if we should take a task, False otherwise.

          """

-         median = bin_avail[(len(bin_avail)-1)//2]

+         median = bin_avail[int((len(bin_avail) - 1) // 2)]

          self.logger.debug("ours: %.2f, median: %.2f" % (avail, median))

          if avail >= median:

              return True

file modified
+9 -1
@@ -27,6 +27,12 @@ 

  import six

  from six.moves import zip

  

+ class BytesJSONEncoder(json.JSONEncoder):

+     def default(self, o):

+         if six.PY3 and isinstance(o, bytes):

+             return o.decode('utf-8')

+         return json.JSONEncoder.default(self, o)

+ 

  class Rpmdiff:

  

      # constants
@@ -227,5 +233,7 @@ 

              data = self.old_data

          if not data:

              raise ValueError("rpm header data are empty")

-         s = json.dumps(data, sort_keys=True)

+         s = json.dumps(data, sort_keys=True, cls=BytesJSONEncoder)

+         if six.PY3:

+             s = s.encode('utf-8')

          return hashlib.sha256(s).hexdigest()

file modified
+14 -12
@@ -19,22 +19,25 @@ 

  # Authors:

  #       Mike McLean <mikem@redhat.com>

  #       Mike Bonnet <mikeb@redhat.com>

- 

  from __future__ import absolute_import

- import koji

- import koji.plugin

- import koji.util

- import os

+ 

  import logging

- import six.moves.xmlrpc_client

- import signal

- import shutil

+ import os

+ import pprint

  import random

+ import shutil

+ import signal

  import time

- import pprint

+ 

+ import six.moves.xmlrpc_client

  import six.moves.urllib.request

  from six.moves import range

  

+ import koji

+ import koji.plugin

+ import koji.util

+ 

+ 

  def scan_mounts(topdir):

      """Search path for mountpoints"""

      mplist = []
@@ -476,10 +479,9 @@ 

              fsrc = six.moves.urllib.request.urlopen(url)

              if not os.path.exists(os.path.dirname(fn)):

                  os.makedirs(os.path.dirname(fn))

-             fdst = open(fn, 'w')

-             shutil.copyfileobj(fsrc, fdst)

+             with open(fn, 'w') as fdst:

+                 shutil.copyfileobj(fsrc, fdst)

              fsrc.close()

-             fdst.close()

          else:

              fn = "%s/%s" % (self.options.topdir, relpath)

          return fn

file modified
+1 -1
@@ -75,7 +75,7 @@ 

      if isinstance(params, Fault):

          methodresponse = 1

      elif not isinstance(params, tuple):

-         raise TypeError('params must be a tuple of Fault instance')

+         raise TypeError('params must be a tuple or Fault instance')

      elif methodresponse and len(params) != 1:

          raise ValueError('response tuple must be a singleton')

  

file modified
+5 -3
@@ -1,12 +1,12 @@ 

  # kojid plugin

  

  from __future__ import absolute_import

- import commands

  import koji

  import six.moves.configparser

  import os

  import platform

  import re

+ import subprocess

  

  import koji.tasks

  from koji.tasks import scan_mounts
@@ -304,8 +304,10 @@ 

          failed = []

          self.logger.info("Unmounting (runroot): %s" % mounts)

          for dir in mounts:

-             (rv, output) = commands.getstatusoutput("umount -l '%s'" % dir)

-             if rv != 0:

+             proc = subprocess.Popen(["umount", "-l", dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)

+             if proc.wait() != 0:

+                 output = proc.stdout.read()

+                 output += proc.stderr.read()

                  failed.append("%s: %s" % (dir, output))

          if failed:

              msg = "Unable to unmount: %s" % ', '.join(failed)

@@ -6,7 +6,9 @@ 

  KOJID_FILENAME = os.path.dirname(__file__) + "/../../builder/kojid"

  if sys.version_info[0] >= 3:

      import importlib.util

-     spec = importlib.util.spec_from_file_location("koji_kojid", KOJID_FILENAME)

+     import importlib.machinery

+     loader = importlib.machinery.SourceFileLoader('koji_kojid', KOJID_FILENAME)

+     spec = importlib.util.spec_from_file_location("koji_kojid", loader=loader)

      kojid = importlib.util.module_from_spec(spec)

      spec.loader.exec_module(kojid)

  else:

@@ -2,7 +2,6 @@ 

  import json

  import mock

  import os

- import smtplib

  import tempfile

  try:

      import unittest2 as unittest
@@ -25,7 +24,7 @@ 

          fn = os.path.join(os.path.dirname(__file__), 'data/calls', name,'calls.json')

          with open(fn) as fp:

              data = json.load(fp)

-             data = koji.fixEncodingRecurse(data)

+             #data = koji.fixEncodingRecurse(data)

          for call in data:

              key = self._munge([call['method'], call['args'], call['kwargs']])

              self._testcalls[key] = call
@@ -101,6 +100,6 @@ 

          self.assertEqual(from_addr, "koji@example.com")

          self.assertEqual(recipients, ["user@example.com"])

          fn = os.path.join(os.path.dirname(__file__), 'data/calls', 'build_notif_1', 'message.txt')

-         with open(fn) as fp:

+         with open(fn, 'rb') as fp:

              msg_expect = fp.read()

          self.assertEqual(message, msg_expect)

@@ -84,7 +84,7 @@ 

  

      def test_volume_id_substitutions(self):

          """Check that volume ID is shorten corect by shortenVolID method."""

-         for test_name, values in self.test_cases.iteritems():

+         for test_name, values in self.test_cases.items():

              name = values['name']

              expected_vol_id = values['expected-id']

              result_vol_id = self.handler._shortenVolID(name, self.version, self.release)

@@ -293,14 +293,14 @@ 

      @mock_open()

      @mock.patch('runroot.scan_mounts')

      @mock.patch('os.unlink')

-     @mock.patch('commands.getstatusoutput')

+     @mock.patch('subprocess.Popen')

      @mock.patch('os.path.exists')

-     def test_undo_mounts(self, path_exists, getstatusoutput, os_unlink, scan_mounts, m_open):

+     def test_undo_mounts(self, path_exists, popen, os_unlink, scan_mounts, m_open):

          self.t.logger = mock.MagicMock()

          scan_mounts.return_value = ['mount_1', 'mount_2']

  

          # correct

-         getstatusoutput.return_value = (0, 'ok')

+         popen.return_value.wait.return_value = 0

          path_exists.return_value = True

          m_open.return_value.__enter__.return_value.readlines.return_value = ['mountpoint']

          self.t.undo_mounts('rootdir')
@@ -312,7 +312,9 @@ 

  

          # fail

          os_unlink.reset_mock()

-         getstatusoutput.return_value = (1, 'error')

+         popen.return_value.wait.return_value = 1

+         popen.return_value.stdout.read.return_value = ''

+         popen.return_value.stderr.read.return_value = 'error'

          path_exists.return_value = True

          with self.assertRaises(koji.GenericError) as cm:

              self.t.undo_mounts('rootdir')

I've this WIP branch, so putting here for discussion and refinement.

I suggest dropping the YUM code here entirely, as librepo (with Python bindings) is available all the way back to EL6.

This logic implies we prefer YUM. If we want to keep YUM code at all (which I think we shouldn't), we should flip this logic so it tries librepo first, and only if that fails, try yum.

1 new commit added

  • fix runroot test
4 years ago

1 new commit added

  • use librepo instead of yum where possible
4 years ago

You're right. I've deleted 'if yum_available' branch in that point. Nevertheless, dist-repo is still yum-dependent.

1 new commit added

  • make librepo optional
4 years ago

1 new commit added

  • fix typo
4 years ago

1 new commit added

  • distrepo via dnf
4 years ago

Rather than writing a config file and going that route, you can set these options through Python directly with dnf.conf objects. You can see an example of this here: https://pagure.io/releng/blob/master/f/scripts/spam-o-matic

I don't think you mean to use the dnf API for the yum implementation here?

rebased onto 1652442e6fef7c0ca855185aafa46a97791ad27a

4 years ago

I don't think you mean to use the dnf API for the yum implementation here?

This is more DNF API usage in the YUM functions...

rebased onto fc77341e41893f96221cca9a6668c99553854aa2

4 years ago

rebased onto cffff91

4 years ago

We shouldn't be using DNF APIs in the YUM function...

I don't think you mean to use the dnf API for the yum implementation here?

I concur. Perhaps this was a merge/rebase error along the way.

This change reverts that part, leaving it in line with the old yum code (apart from using koji.arch and with open().

https://github.com/mikem23/koji-playground/commits/pagure/pr/1117

@mikem The diff I see here looks good to me: https://github.com/mikem23/koji-playground/compare/pagure/pr/1117

At this point, I think it's good to merge.

2 new commits added

  • Fix unit tests
  • drop dnf from yum handler
4 years ago

@tkopecek This needs rebasing. It might be better to also clean up the commit history so that the changes are more logically separated and make sense.

Merging this with a manual rebase and two small fixes

  • fixing a unit test that broke on py3
  • accounting for the license of the included code from yum

Commit 4ad8a0e fixes this pull-request

Pull-Request has been merged by mikem

4 years ago

@mikem Commit 96d3ee5 was somehow missed during merge.