| |
@@ -35,12 +35,14 @@
|
| |
from koji.daemon import incremental_upload, log_output, TaskManager, SCM
|
| |
from koji.tasks import ServerExit, ServerRestart, BaseTaskHandler, MultiPlatformTask
|
| |
from koji.util import parseStatus, isSuccess, dslice, dslice_ex
|
| |
+ import multilib
|
| |
import os
|
| |
import pwd
|
| |
import grp
|
| |
import random
|
| |
import re
|
| |
import rpm
|
| |
+ import rpmUtils.arch
|
| |
import shutil
|
| |
import signal
|
| |
import smtplib
|
| |
@@ -59,6 +61,8 @@
|
| |
from optparse import OptionParser, SUPPRESS_HELP
|
| |
from StringIO import StringIO
|
| |
from yum import repoMDObject
|
| |
+ import yum.packages
|
| |
+ import yum.Errors
|
| |
|
| |
#imports for LiveCD, LiveMedia, and Appliance handler
|
| |
image_enabled = False
|
| |
@@ -3014,7 +3018,6 @@
|
| |
livemedia_log = '/tmp/lmc-logs/livemedia-out.log'
|
| |
resultdir = '/tmp/lmc'
|
| |
|
| |
-
|
| |
# Common LMC command setup, needs extending
|
| |
cmd = ['/sbin/livemedia-creator',
|
| |
'--ks', kskoji,
|
| |
@@ -4721,8 +4724,8 @@
|
| |
if os.path.getsize(pkglist) == 0:
|
| |
pkglist = None
|
| |
self.create_local_repo(rinfo, arch, pkglist, groupdata, oldrepo)
|
| |
-
|
| |
- external_repos = self.session.getExternalRepoList(rinfo['tag_id'], event=rinfo['create_event'])
|
| |
+ external_repos = self.session.getExternalRepoList(
|
| |
+ rinfo['tag_id'], event=rinfo['create_event'])
|
| |
if external_repos:
|
| |
self.merge_repos(external_repos, arch, groupdata)
|
| |
elif pkglist is None:
|
| |
@@ -4735,10 +4738,9 @@
|
| |
for f in os.listdir(self.datadir):
|
| |
files.append(f)
|
| |
self.session.uploadWrapper('%s/%s' % (self.datadir, f), uploadpath, f)
|
| |
-
|
| |
return [uploadpath, files]
|
| |
|
| |
- def create_local_repo(self, rinfo, arch, pkglist, groupdata, oldrepo):
|
| |
+ def create_local_repo(self, rinfo, arch, pkglist, groupdata, oldrepo, drpms=False):
|
| |
koji.ensuredir(self.outdir)
|
| |
if self.options.use_createrepo_c:
|
| |
cmd = ['/usr/bin/createrepo_c']
|
| |
@@ -4750,7 +4752,9 @@
|
| |
if os.path.isfile(groupdata):
|
| |
cmd.extend(['-g', groupdata])
|
| |
#attempt to recycle repodata from last repo
|
| |
- if pkglist and oldrepo and self.options.createrepo_update:
|
| |
+ if pkglist and oldrepo and self.options.createrepo_update and not drpms:
|
| |
+ # signed repos overload the use of "oldrepo", so the conditional
|
| |
+ # explicitly make sure this does not get executed with that on
|
| |
oldpath = self.pathinfo.repo(oldrepo['id'], rinfo['tag_name'])
|
| |
olddatadir = '%s/%s/repodata' % (oldpath, arch)
|
| |
if not os.path.isdir(olddatadir):
|
| |
@@ -4765,6 +4769,11 @@
|
| |
cmd.append('--update')
|
| |
if self.options.createrepo_skip_stat:
|
| |
cmd.append('--skip-stat')
|
| |
+ if drpms:
|
| |
+ # generate delta-rpms
|
| |
+ cmd.append('--deltas')
|
| |
+ for repo in oldrepo:
|
| |
+ cmd.extend(['--oldpackagedirs', repo])
|
| |
# note: we can't easily use a cachedir because we do not have write
|
| |
# permission. The good news is that with --update we won't need to
|
| |
# be scanning many rpms.
|
| |
@@ -4809,6 +4818,332 @@
|
| |
raise koji.GenericError, 'failed to merge repos: %s' \
|
| |
% parseStatus(status, ' '.join(cmd))
|
| |
|
| |
+
|
| |
+ class NewSignedRepoTask(BaseTaskHandler):
|
| |
+ Methods = ['signedRepo']
|
| |
+ _taskWeight = 0.1
|
| |
+
|
| |
+ def handler(self, tag, repo_id, keys, task_opts):
|
| |
+ tinfo = self.session.getTag(tag, strict=True, event=task_opts['event'])
|
| |
+ path = koji.pathinfo.signedrepo(repo_id, tinfo['name'])
|
| |
+ if len(task_opts['arch']) == 0:
|
| |
+ task_opts['arch'] = tinfo['arches'].split()
|
| |
+ if len(task_opts['arch']) == 0:
|
| |
+ raise koji.GenericError('No arches specified nor for the tag!')
|
| |
+ subtasks = {}
|
| |
+ # weed out subarchitectures
|
| |
+ canonArches = set()
|
| |
+ for arch in task_opts['arch']:
|
| |
+ canonArches.add(koji.canonArch(arch))
|
| |
+ arch32s = set()
|
| |
+ for arch in canonArches:
|
| |
+ if not rpmUtils.arch.isMultiLibArch(arch):
|
| |
+ arch32s.add(arch)
|
| |
+ for arch in arch32s:
|
| |
+ # we do 32-bit multilib arches first so the 64-bit ones can
|
| |
+ # get a task ID and wait for them to complete
|
| |
+ arglist = [tag, repo_id, arch, keys, task_opts]
|
| |
+ subtasks[arch] = self.session.host.subtask(
|
| |
+ method='createsignedrepo', arglist=arglist, label=arch,
|
| |
+ parent=self.id, arch='noarch')
|
| |
+ if len(subtasks) > 0 and task_opts['multilib']:
|
| |
+ results = self.wait(subtasks.values(), all=True, failany=True)
|
| |
+ for arch in arch32s:
|
| |
+ # move the 32-bit task output to the final resting place
|
| |
+ # so the 64-bit arches can use it for multilib
|
| |
+ upload, files, keypaths = results[subtasks[arch]]
|
| |
+ self.session.host.signedRepoMove(
|
| |
+ repo_id, upload, files, arch, keypaths)
|
| |
+ for arch in canonArches:
|
| |
+ # do the other arches
|
| |
+ if arch not in arch32s:
|
| |
+ arglist = [tag, repo_id, arch, keys, task_opts]
|
| |
+ subtasks[arch] = self.session.host.subtask(
|
| |
+ method='createsignedrepo', arglist=arglist, label=arch,
|
| |
+ parent=self.id, arch='noarch')
|
| |
+ # wait for 64-bit subtasks to finish
|
| |
+ data = {}
|
| |
+ results = self.wait(subtasks.values(), all=True, failany=True)
|
| |
+ for (arch, task_id) in subtasks.iteritems():
|
| |
+ data[arch] = results[task_id]
|
| |
+ self.logger.debug("DEBUG: %r : %r " % (arch, data[arch]))
|
| |
+ if task_opts['multilib']:
|
| |
+ # we moved the 32-bit results before, do the 64-bit
|
| |
+ if arch not in arch32s:
|
| |
+ upload, files, keypaths = results[subtasks[arch]]
|
| |
+ self.session.host.signedRepoMove(
|
| |
+ repo_id, upload, files, arch, keypaths)
|
| |
+ else:
|
| |
+ upload, files, keypaths = results[subtasks[arch]]
|
| |
+ self.session.host.signedRepoMove(
|
| |
+ repo_id, upload, files, arch, keypaths)
|
| |
+ self.session.host.repoDone(repo_id, data, expire=False, signed=True)
|
| |
+ return 'Signed repository #%s successfully generated' % repo_id
|
| |
+
|
| |
+
|
| |
+ class createSignedRepoTask(CreaterepoTask):
|
| |
+ Methods = ['createsignedrepo']
|
| |
+ _taskWeight = 1.5
|
| |
+
|
| |
+ archmap = {'s390x': 's390', 'ppc64': 'ppc', 'x86_64': 'i686'}
|
| |
+ compat = {"i386": ("athlon", "i686", "i586", "i486", "i386", "noarch"),
|
| |
+ "x86_64": ("amd64", "ia32e", "x86_64", "noarch"),
|
| |
+ "ia64": ("ia64", "noarch"),
|
| |
+ "ppc": ("ppc", "noarch"),
|
| |
+ "ppc64": ("ppc64p7", "ppc64pseries", "ppc64iseries", "ppc64", "noarch"),
|
| |
+ "ppc64le": ("ppc64le", "noarch"),
|
| |
+ "s390": ("s390", "noarch"),
|
| |
+ "s390x": ("s390x", "noarch"),
|
| |
+ "sparc": ("sparcv9v", "sparcv9", "sparcv8", "sparc", "noarch"),
|
| |
+ "sparc64": ("sparc64v", "sparc64", "noarch"),
|
| |
+ "alpha": ("alphaev6", "alphaev56", "alphaev5", "alpha", "noarch"),
|
| |
+ "arm": ("arm", "armv4l", "armv4tl", "armv5tel", "armv5tejl", "armv6l", "armv7l", "noarch"),
|
| |
+ "armhfp": ("armv7hl", "armv7hnl", "noarch"),
|
| |
+ "aarch64": ("aarch64", "noarch"),
|
| |
+ "src": ("src",)
|
| |
+ }
|
| |
+
|
| |
+ biarch = {"ppc": "ppc64", "x86_64": "i386", "sparc":
|
| |
+ "sparc64", "s390x": "s390", "ppc64": "ppc"}
|
| |
+
|
| |
+ def handler(self, tag, repo_id, arch, keys, opts):
|
| |
+ #arch is the arch of the repo, not the task
|
| |
+ self.rinfo = self.session.repoInfo(repo_id, strict=True)
|
| |
+ if self.rinfo['state'] != koji.REPO_INIT:
|
| |
+ raise koji.GenericError, "Repo %(id)s not in INIT state (got %(state)s)" % self.rinfo
|
| |
+ self.repo_id = self.rinfo['id']
|
| |
+ self.pathinfo = koji.PathInfo(self.options.topdir)
|
| |
+ groupdata = os.path.join(
|
| |
+ self.pathinfo.signedrepo(repo_id, self.rinfo['tag_name']),
|
| |
+ 'groups', 'comps.xml')
|
| |
+ #set up our output dir
|
| |
+ self.repodir = '%s/repo' % self.workdir
|
| |
+ koji.ensuredir(self.repodir)
|
| |
+ self.outdir = self.repodir # workaround create_local_repo use
|
| |
+ self.datadir = '%s/repodata' % self.repodir
|
| |
+ self.keypaths = {}
|
| |
+ if len(opts['delta']) > 0:
|
| |
+ for path in opts['delta']:
|
| |
+ if not os.path.exists(path):
|
| |
+ raise koji.GenericError(
|
| |
+ 'drpm path %s does not exist!' % path)
|
| |
+ self.uploadpath = self.getUploadDir()
|
| |
+ self.pkglist = self.make_pkglist(tag, arch, keys, opts)
|
| |
+ if opts['multilib'] and rpmUtils.arch.isMultiLibArch(arch):
|
| |
+ self.do_multilib(arch, self.archmap[arch], opts['multilib'])
|
| |
+ self.logger.debug('package list is %s' % self.pkglist)
|
| |
+ self.session.uploadWrapper(self.pkglist, self.uploadpath,
|
| |
+ os.path.basename(self.pkglist))
|
| |
+ if os.path.getsize(self.pkglist) == 0:
|
| |
+ self.pkglist = None
|
| |
+ if len(opts['delta']) > 0:
|
| |
+ do_drpms = True
|
| |
+ else:
|
| |
+ do_drpms = False
|
| |
+ self.create_local_repo(self.rinfo, arch, self.pkglist, groupdata,
|
| |
+ opts['delta'], drpms=do_drpms)
|
| |
+ if self.pkglist is None:
|
| |
+ fo = file(os.path.join(self.datadir, "EMPTY_REPO"), 'w')
|
| |
+ fo.write("This repo is empty because its tag has no content for this arch\n")
|
| |
+ fo.close()
|
| |
+ files = ['pkglist']
|
| |
+ for f in os.listdir(self.datadir):
|
| |
+ files.append(f)
|
| |
+ self.session.uploadWrapper('%s/%s' % (self.datadir, f),
|
| |
+ self.uploadpath, f)
|
| |
+ if opts['delta']:
|
| |
+ ddir = os.path.join(self.repodir, 'drpms')
|
| |
+ for f in os.listdir(ddir):
|
| |
+ files.append(f)
|
| |
+ self.session.uploadWrapper('%s/%s' % (ddir, f),
|
| |
+ self.uploadpath, f)
|
| |
+ return [self.uploadpath, files, self.keypaths]
|
| |
+
|
| |
+ def do_multilib(self, arch, ml_arch, conf):
|
| |
+ self.repo_id = self.rinfo['id']
|
| |
+ pathinfo = koji.PathInfo(self.options.topdir)
|
| |
+ repodir = pathinfo.signedrepo(self.rinfo['id'], self.rinfo['tag_name'])
|
| |
+ mldir = os.path.join(repodir, koji.canonArch(ml_arch))
|
| |
+ ml_true = set() # multilib packages we need to include before depsolve
|
| |
+ ml_conf = os.path.join(self.pathinfo.work(), conf)
|
| |
+
|
| |
+ # step 1: figure out which packages are multlib (should already exist)
|
| |
+ mlm = multilib.DevelMultilibMethod(ml_conf)
|
| |
+ fs_missing = set()
|
| |
+ with open(self.pkglist) as pkglist:
|
| |
+ for pkg in pkglist:
|
| |
+ ppath = os.path.join(self.repodir, pkg.strip())
|
| |
+ po = yum.packages.YumLocalPackage(filename=ppath)
|
| |
+ if mlm.select(po) and self.archmap.has_key(arch):
|
| |
+ # we need a multilib package to be included
|
| |
+ # we assume the same signature level is available
|
| |
+ # XXX: what is a subarchitecture is the right answer?
|
| |
+ pl_path = pkg.replace(arch, self.archmap[arch]).strip()
|
| |
+ # assume this exists in the task results for the ml arch
|
| |
+ real_path = os.path.join(mldir, pl_path)
|
| |
+ ml_true.add(real_path)
|
| |
+ if not os.path.exists(real_path):
|
| |
+ self.logger.error('%s (multilib) is not on the filesystem' % real_path)
|
| |
+ fs_missing.add(real_path)
|
| |
+
|
| |
+ # step 2: set up architectures for yum configuration
|
| |
+ self.logger.info("Resolving multilib for %s using method devel" % arch)
|
| |
+ yumbase = yum.YumBase()
|
| |
+ yumbase.verbose_logger.setLevel(logging.ERROR)
|
| |
+ yumdir = os.path.join(self.workdir, 'yum')
|
| |
+ # TODO: unwind this arch mess
|
| |
+ archlist = (arch, 'noarch')
|
| |
+ transaction_arch = arch
|
| |
+ archlist = archlist + self.compat[self.biarch[arch]]
|
| |
+ best_compat = self.compat[self.biarch[arch]][0]
|
| |
+ if rpmUtils.arch.archDifference(best_compat, arch) > 0:
|
| |
+ transaction_arch = best_compat
|
| |
+ if hasattr(rpmUtils.arch, 'ArchStorage'):
|
| |
+ yumbase.preconf.arch = transaction_arch
|
| |
+ else:
|
| |
+ rpmUtils.arch.canonArch = transaction_arch
|
| |
+
|
| |
+ yconfig = """
|
| |
+ [main]
|
| |
+ debuglevel=2
|
| |
+ pkgpolicy=newest
|
| |
+ exactarch=1
|
| |
+ gpgcheck=0
|
| |
+ reposdir=/dev/null
|
| |
+ cachedir=/yumcache
|
| |
+ installroot=%s
|
| |
+ logfile=/yum.log
|
| |
+
|
| |
+ [koji-%s]
|
| |
+ name=koji multilib task
|
| |
+ baseurl=file://%s
|
| |
+ enabled=1
|
| |
+
|
| |
+ """ % (yumdir, self.id, mldir)
|
| |
+ os.makedirs(os.path.join(yumdir, "yumcache"))
|
| |
+ os.makedirs(os.path.join(yumdir, 'var/lib/rpm'))
|
| |
+
|
| |
+ # step 3: proceed with yum config and set up
|
| |
+ yconfig_path = os.path.join(yumdir, 'yum.conf-koji-%s' % arch)
|
| |
+ f = open(yconfig_path, 'w')
|
| |
+ f.write(yconfig)
|
| |
+ f.close()
|
| |
+ self.session.uploadWrapper(yconfig_path, self.uploadpath,
|
| |
+ os.path.basename(yconfig_path))
|
| |
+ yumbase.doConfigSetup(fn=yconfig_path)
|
| |
+ yumbase.conf.cache = 0
|
| |
+ yumbase.doRepoSetup()
|
| |
+ yumbase.doTsSetup()
|
| |
+ yumbase.doRpmDBSetup()
|
| |
+ # we trust Koji's files, so skip verifying sigs and digests
|
| |
+ yumbase.ts.pushVSFlags(
|
| |
+ (rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS))
|
| |
+ yumbase.doSackSetup(archlist=archlist, thisrepo='koji-%s' % arch)
|
| |
+ yumbase.doSackFilelistPopulate()
|
| |
+ for pkg in ml_true:
|
| |
+ # TODO: store packages by first letter
|
| |
+ # ppath = os.path.join(pkgdir, pkg.name[0].lower(), pname)
|
| |
+ po = yum.packages.YumLocalPackage(filename=pkg)
|
| |
+ yumbase.tsInfo.addInstall(po)
|
| |
+
|
| |
+ # step 4: execute yum transaction to get dependencies
|
| |
+ self.logger.info("Resolving depenencies for arch %s" % arch)
|
| |
+ rc, errors = yumbase.resolveDeps()
|
| |
+ ml_needed = set()
|
| |
+ for f in yumbase.tsInfo.getMembers():
|
| |
+ bnp = os.path.basename(f.po.localPkg())
|
| |
+ dep_path = os.path.join(mldir, bnp[0].lower(), bnp)
|
| |
+ ml_needed.add(dep_path)
|
| |
+ self.logger.debug("added %s" % dep_path)
|
| |
+ if not os.path.exists(dep_path):
|
| |
+ self.logger.error('%s (multilib dep) not on filesystem' % dep_path)
|
| |
+ fs_missing.add(dep_path)
|
| |
+ self.logger.info('yum return code: %s' % rc)
|
| |
+ if not rc:
|
| |
+ self.logger.error('yum depsolve was unsuccessful')
|
| |
+ raise koji.GenericError(errors)
|
| |
+ if len(fs_missing) > 0:
|
| |
+ raise koji.GenericError('multilib packages missing:\n' +
|
| |
+ '\n'.join(fs_missing))
|
| |
+
|
| |
+ # step 5: add dependencies to our package list
|
| |
+ pkgwriter = open(self.pkglist, 'a')
|
| |
+ for ml_pkg in ml_needed:
|
| |
+ bnp = os.path.basename(ml_pkg)
|
| |
+ bnplet = bnp[0].lower()
|
| |
+ pkgwriter.write(bnplet + '/' + bnp + '\n')
|
| |
+ koji.ensuredir(os.path.join(self.repodir, bnplet))
|
| |
+ os.symlink(ml_pkg, os.path.join(self.repodir, bnplet, bnp))
|
| |
+ self.keypaths[bnp] = ml_pkg
|
| |
+
|
| |
+
|
| |
+ def make_pkglist(self, tag_id, arch, keys, opts):
|
| |
+
|
| |
+ # Need to pass event_id because even though this is a single trans,
|
| |
+ # it is possible to see the results of other committed transactions
|
| |
+ rpms = []
|
| |
+ builddirs = {}
|
| |
+ for a in self.compat[arch] + ('noarch',):
|
| |
+ rpm_iter, builds = self.session.listTaggedRPMS(tag_id,
|
| |
+ event=opts['event'], arch=a, latest=opts['latest'],
|
| |
+ inherit=opts['inherit'], rpmsigs=True)
|
| |
+ for build in builds:
|
| |
+ builddirs[build['id']] = self.pathinfo.build(build)
|
| |
+ rpms += list(rpm_iter)
|
| |
+ #get build dirs
|
| |
+ need = set(['%(name)s-%(version)s-%(release)s.%(arch)s.rpm' % r for r in rpms])
|
| |
+ #generate pkglist files
|
| |
+ pkgfile = os.path.join(self.repodir, 'pkglist')
|
| |
+ pkglist = file(pkgfile, 'w')
|
| |
+ preferred = {}
|
| |
+ if opts['unsigned']:
|
| |
+ keys.append('') # make unsigned rpms the least preferred
|
| |
+ for rpminfo in rpms:
|
| |
+ if rpminfo['sigkey'] == '' and not opts['unsigned']:
|
| |
+ # skip, this is the unsigned rpminfo
|
| |
+ continue
|
| |
+ if rpminfo['sigkey'] not in keys:
|
| |
+ # skip, not a key we are looking for
|
| |
+ continue
|
| |
+ idx = keys.index(rpminfo['sigkey'])
|
| |
+ if preferred.has_key(rpminfo['id']):
|
| |
+ if keys.index(preferred[rpminfo['id']]['sigkey']) <= idx:
|
| |
+ # key for this is not as preferable as what has been seen
|
| |
+ continue
|
| |
+ preferred[rpminfo['id']] = rpminfo
|
| |
+ seen = set()
|
| |
+ fs_missing = set()
|
| |
+ for rpminfo in preferred.values():
|
| |
+ if rpminfo['sigkey'] == '':
|
| |
+ # we're taking an unsigned rpm (--allow-unsigned)
|
| |
+ pkgpath = '%s/%s' % (builddirs[rpminfo['build_id']],
|
| |
+ self.pathinfo.rpm(rpminfo))
|
| |
+ else:
|
| |
+ pkgpath = '%s/%s' % (builddirs[rpminfo['build_id']],
|
| |
+ self.pathinfo.signed(rpminfo, rpminfo['sigkey']))
|
| |
+ seen.add(os.path.basename(pkgpath))
|
| |
+ if not os.path.exists(pkgpath):
|
| |
+ fs_missing.add(pkgpath)
|
| |
+ else:
|
| |
+ bnp = os.path.basename(pkgpath)
|
| |
+ bnplet = bnp[0].lower()
|
| |
+ pkglist.write(bnplet + '/' + bnp + '\n')
|
| |
+ koji.ensuredir(os.path.join(self.repodir, bnplet))
|
| |
+ self.keypaths[bnp] = pkgpath
|
| |
+ os.symlink(pkgpath, os.path.join(self.repodir, bnplet, bnp))
|
| |
+ pkglist.close()
|
| |
+ if len(fs_missing) > 0:
|
| |
+ raise koji.GenericError('Packages missing from the filesystem:\n' +
|
| |
+ '\n'.join(fs_missing))
|
| |
+ if not opts['skip']:
|
| |
+ missing = list(need - seen)
|
| |
+ if len(missing) != 0:
|
| |
+ missing.sort()
|
| |
+ raise koji.GenericError('Unsigned packages found: ' +
|
| |
+ '\n'.join(missing))
|
| |
+ return pkgfile
|
| |
+
|
| |
+
|
| |
class WaitrepoTask(BaseTaskHandler):
|
| |
|
| |
Methods = ['waitrepo']
|
| |