#318 Signed repos, take two
Merged 7 years ago by mikem. Opened 7 years ago by mikem.

file modified
+433 -4
@@ -30,17 +30,23 @@ 

  import koji.util

  import koji.tasks

  import glob

+ try:

+     import json

+ except ImportError:  # pragma: no cover

+     import simplejson as json

  import logging

  import logging.handlers

  from koji.daemon import incremental_upload, log_output, TaskManager, SCM

  from koji.tasks import ServerExit, ServerRestart, BaseTaskHandler, MultiPlatformTask

  from koji.util import parseStatus, isSuccess, dslice, dslice_ex

+ import multilib.multilib as multilib

  import os

  import pwd

  import grp

  import random

  import re

  import rpm

+ import rpmUtils.arch

  import shutil

  import signal

  import smtplib
@@ -58,6 +64,8 @@ 

  from gzip import GzipFile

  from optparse import OptionParser, SUPPRESS_HELP

  from yum import repoMDObject

+ import yum.packages

+ import yum.Errors

  

  #imports for LiveCD, LiveMedia, and Appliance handler

  image_enabled = False
@@ -4831,8 +4839,8 @@ 

          if os.path.getsize(pkglist) == 0:

              pkglist = None

          self.create_local_repo(rinfo, arch, pkglist, groupdata, oldrepo)

- 

-         external_repos = self.session.getExternalRepoList(rinfo['tag_id'], event=rinfo['create_event'])

+         external_repos = self.session.getExternalRepoList(

+             rinfo['tag_id'], event=rinfo['create_event'])

          if external_repos:

              self.merge_repos(external_repos, arch, groupdata)

          elif pkglist is None:
@@ -4845,10 +4853,9 @@ 

          for f in os.listdir(self.datadir):

              files.append(f)

              self.session.uploadWrapper('%s/%s' % (self.datadir, f), uploadpath, f)

- 

          return [uploadpath, files]

  

-     def create_local_repo(self, rinfo, arch, pkglist, groupdata, oldrepo):

+     def create_local_repo(self, rinfo, arch, pkglist, groupdata, oldrepo, oldpkgs=None):

          koji.ensuredir(self.outdir)

          if self.options.use_createrepo_c:

              cmd = ['/usr/bin/createrepo_c']
@@ -4875,6 +4882,11 @@ 

                  cmd.append('--update')

                  if self.options.createrepo_skip_stat:

                      cmd.append('--skip-stat')

+         if oldpkgs is not None:

+             # generate delta-rpms

+             cmd.append('--deltas')

+             for op_dir in oldpkgs:

+                 cmd.extend(['--oldpackagedirs', op_dir])

          # note: we can't easily use a cachedir because we do not have write

          # permission. The good news is that with --update we won't need to

          # be scanning many rpms.
@@ -4920,6 +4932,423 @@ 

              raise koji.GenericError('failed to merge repos: %s' \

                  % parseStatus(status, ' '.join(cmd)))

  

+ 

+ class NewDistRepoTask(BaseTaskHandler):

+     Methods = ['distRepo']

+     _taskWeight = 0.1

+ 

+     def handler(self, tag, repo_id, keys, task_opts):

+         tinfo = self.session.getTag(tag, strict=True, event=task_opts['event'])

+         path = koji.pathinfo.distrepo(repo_id, tinfo['name'])

+         if len(task_opts['arch']) == 0:

+              arches = tinfo['arches'] or ''

+              task_opts['arch'] = arches.split()

+         if len(task_opts['arch']) == 0:

+             raise koji.GenericError('No arches specified nor for the tag!')

+         subtasks = {}

+         # weed out subarchitectures

+         canonArches = set()

+         for arch in task_opts['arch']:

+             canonArches.add(koji.canonArch(arch))

+         arch32s = set()

+         for arch in canonArches:

+             if not rpmUtils.arch.isMultiLibArch(arch):

+                 arch32s.add(arch)

+         for arch in arch32s:

+             # we do 32-bit multilib arches first so the 64-bit ones can

+             # get a task ID and wait for them to complete

+             arglist = [tag, repo_id, arch, keys, task_opts]

+             subtasks[arch] = self.session.host.subtask(

+                 method='createdistrepo', arglist=arglist, label=arch,

+                 parent=self.id, arch='noarch')

+         if len(subtasks) > 0 and task_opts['multilib']:

+             results = self.wait(subtasks.values(), all=True, failany=True)

+             for arch in arch32s:

+                 # move the 32-bit task output to the final resting place

+                 # so the 64-bit arches can use it for multilib

+                 upload, files, sigmap = results[subtasks[arch]]

+                 self.session.host.distRepoMove(

+                     repo_id, upload, files, arch, sigmap)

+         for arch in canonArches:

+             # do the other arches

+             if arch not in arch32s:

+                 arglist = [tag, repo_id, arch, keys, task_opts]

+                 subtasks[arch] = self.session.host.subtask(

+                     method='createdistrepo', arglist=arglist, label=arch,

+                     parent=self.id, arch='noarch')

+         # wait for 64-bit subtasks to finish

+         data = {}

+         results = self.wait(subtasks.values(), all=True, failany=True)

+         for (arch, task_id) in subtasks.iteritems():

+             data[arch] = results[task_id]

+             self.logger.debug("DEBUG: %r : %r " % (arch, data[arch]))

+             if task_opts['multilib'] and arch in arch32s:

+                 # already moved above

+                 continue

+             #else

+             upload, files, sigmap = results[subtasks[arch]]

+             self.session.host.distRepoMove(

+                 repo_id, upload, files, arch, sigmap)

+         self.session.host.repoDone(repo_id, data, expire=False)

+         return 'Dist repository #%s successfully generated' % repo_id

+ 

+ 

+ class createDistRepoTask(CreaterepoTask):

+     Methods = ['createdistrepo']

+     _taskWeight = 1.5

+ 

+     archmap = {'s390x': 's390', 'ppc64': 'ppc', 'x86_64': 'i686'}

+     compat = {"i386": ("athlon", "i686", "i586", "i486", "i386", "noarch"),

+           "x86_64": ("amd64", "ia32e", "x86_64", "noarch"),

+           "ia64": ("ia64", "noarch"),

+           "ppc": ("ppc", "noarch"),

+           "ppc64": ("ppc64p7", "ppc64pseries", "ppc64iseries", "ppc64", "noarch"),

+           "ppc64le": ("ppc64le", "noarch"),

+           "s390": ("s390", "noarch"),

+           "s390x": ("s390x",  "noarch"),

+           "sparc": ("sparcv9v", "sparcv9", "sparcv8", "sparc", "noarch"),

+           "sparc64": ("sparc64v", "sparc64", "noarch"),

+           "alpha": ("alphaev6", "alphaev56", "alphaev5", "alpha", "noarch"),

+           "arm": ("arm", "armv4l", "armv4tl", "armv5tel", "armv5tejl", "armv6l", "armv7l", "noarch"),

+           "armhfp": ("armv7hl", "armv7hnl", "noarch"),

+           "aarch64": ("aarch64", "noarch"),

+           "src": ("src",)

+           }

+ 

+     biarch = {"ppc": "ppc64", "x86_64": "i386", "sparc":

+           "sparc64", "s390x": "s390", "ppc64": "ppc"}

+ 

+     def handler(self, tag, repo_id, arch, keys, opts):

+         #arch is the arch of the repo, not the task

+         self.rinfo = self.session.repoInfo(repo_id, strict=True)

+         if self.rinfo['state'] != koji.REPO_INIT:

+             raise koji.GenericError("Repo %(id)s not in INIT state (got %(state)s)" % self.rinfo)

+         self.repo_id = self.rinfo['id']

+         self.pathinfo = koji.PathInfo(self.options.topdir)

+         groupdata = os.path.join(

+             self.pathinfo.distrepo(repo_id, self.rinfo['tag_name']),

+             'groups', 'comps.xml')

+         #set up our output dir

+         self.repodir = '%s/repo' % self.workdir

+         koji.ensuredir(self.repodir)

+         self.outdir = self.repodir # workaround create_local_repo use

+         self.datadir = '%s/repodata' % self.repodir

+         self.sigmap = {}

+         oldpkgs = []

+         if opts.get('delta'):

+             # should be a list of repo ids to delta against

+             for repo_id in opts['delta']:

+                 oldrepo = self.session.repoInfo(repo_id, strict=True)

+                 if not oldrepo['dist']:

+                     raise koji.GenericError("Base repo for deltas must also "

+                             "be a dist repo")

+                     # regular repos don't actually have rpms, just pkglist

+                 path = koji.pathinfo.distrepo(repo_id, oldrepo['tag_name'])

+                 if not os.path.exists(path):

+                     raise koji.GenericError('Base drpm repo missing: %s' % path)

+                 oldpkgs.append(path)

+         self.uploadpath = self.getUploadDir()

+         self.pkglist = self.make_pkglist(tag, arch, keys, opts)

+         if opts['multilib'] and rpmUtils.arch.isMultiLibArch(arch):

+             self.do_multilib(arch, self.archmap[arch], opts['multilib'])

+         self.write_kojipkgs()

+         self.logger.debug('package list is %s' % self.pkglist)

+         self.session.uploadWrapper(self.pkglist, self.uploadpath,

+             os.path.basename(self.pkglist))

+         if os.path.getsize(self.pkglist) == 0:

+             self.pkglist = None

+         self.create_local_repo(self.rinfo, arch, self.pkglist, groupdata, None, oldpkgs=oldpkgs)

+         if self.pkglist is None:

+             fo = file(os.path.join(self.datadir, "EMPTY_REPO"), 'w')

+             fo.write("This repo is empty because its tag has no content for this arch\n")

+             fo.close()

+         files = ['pkglist', 'kojipkgs']

+         for f in os.listdir(self.datadir):

+             files.append(f)

+             self.session.uploadWrapper('%s/%s' % (self.datadir, f),

+                 self.uploadpath, f)

+         if opts['delta']:

+             ddir = os.path.join(self.repodir, 'drpms')

+             for f in os.listdir(ddir):

+                 files.append(f)

+                 self.session.uploadWrapper('%s/%s' % (ddir, f),

+                     self.uploadpath, f)

+         return [self.uploadpath, files, self.sigmap.items()]

+ 

+     def do_multilib(self, arch, ml_arch, conf):

+         self.repo_id = self.rinfo['id']

+         pathinfo = koji.PathInfo(self.options.topdir)

+         repodir = pathinfo.distrepo(self.rinfo['id'], self.rinfo['tag_name'])

+         mldir = os.path.join(repodir, koji.canonArch(ml_arch))

+         ml_true = set() # multilib packages we need to include before depsolve

+         ml_conf = os.path.join(self.pathinfo.work(), conf)

+ 

+         # step 1: figure out which packages are multilib (should already exist)

+         mlm = multilib.DevelMultilibMethod(ml_conf)

+         fs_missing = set()

+         with open(self.pkglist) as pkglist:

+             for pkg in pkglist:

+                 ppath = os.path.join(self.repodir, pkg.strip())

+                 po = yum.packages.YumLocalPackage(filename=ppath)

+                 if mlm.select(po) and arch in self.archmap:

+                     # we need a multilib package to be included

+                     # we assume the same signature level is available

+                     # XXX: what is a subarchitecture is the right answer?

+                     pl_path = pkg.replace(arch, self.archmap[arch]).strip()

+                     # assume this exists in the task results for the ml arch

+                     real_path = os.path.join(mldir, pl_path)

+                     if not os.path.exists(real_path):

+                         self.logger.error('%s (multilib) is not on the filesystem' % real_path)

+                         fs_missing.add(real_path)

+                         # we defer failure so can report all the missing deps

+                         continue

+                     ml_true.add(real_path)

+ 

+         # step 2: set up architectures for yum configuration

+         self.logger.info("Resolving multilib for %s using method devel" % arch)

+         yumbase = yum.YumBase()

+         yumbase.verbose_logger.setLevel(logging.ERROR)

+         yumdir = os.path.join(self.workdir, 'yum')

+         # TODO: unwind this arch mess

+         archlist = (arch, 'noarch')

+         transaction_arch = arch

+         archlist = archlist + self.compat[self.biarch[arch]]

+         best_compat = self.compat[self.biarch[arch]][0]

+         if rpmUtils.arch.archDifference(best_compat, arch) > 0:

+             transaction_arch = best_compat

+         if hasattr(rpmUtils.arch, 'ArchStorage'):

+             yumbase.preconf.arch = transaction_arch

+         else:

+             rpmUtils.arch.canonArch = transaction_arch

+ 

+         yconfig = """

+ [main]

+ debuglevel=2

+ pkgpolicy=newest

+ exactarch=1

+ gpgcheck=0

+ reposdir=/dev/null

+ cachedir=/yumcache

+ installroot=%s

+ logfile=/yum.log

+ 

+ [koji-%s]

+ name=koji multilib task

+ baseurl=file://%s

+ enabled=1

+ 

+ """ % (yumdir, self.id, mldir)

+         os.makedirs(os.path.join(yumdir, "yumcache"))

+         os.makedirs(os.path.join(yumdir, 'var/lib/rpm'))

+ 

+         # step 3: proceed with yum config and set up

+         yconfig_path = os.path.join(yumdir, 'yum.conf-koji-%s' % arch)

+         f = open(yconfig_path, 'w')

+         f.write(yconfig)

+         f.close()

+         self.session.uploadWrapper(yconfig_path, self.uploadpath,

+             os.path.basename(yconfig_path))

+         yumbase.doConfigSetup(fn=yconfig_path)

+         yumbase.conf.cache = 0

+         yumbase.doRepoSetup()

+         yumbase.doTsSetup()

+         yumbase.doRpmDBSetup()

+         # we trust Koji's files, so skip verifying sigs and digests

+         yumbase.ts.pushVSFlags(

+             (rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS))

+         yumbase.doSackSetup(archlist=archlist, thisrepo='koji-%s' % arch)

+         yumbase.doSackFilelistPopulate()

+         for pkg in ml_true:

+             # TODO: store packages by first letter

+             # ppath = os.path.join(pkgdir, pkg.name[0].lower(), pname)

+             po = yum.packages.YumLocalPackage(filename=pkg)

+             yumbase.tsInfo.addInstall(po)

+ 

+         # step 4: execute yum transaction to get dependencies

+         self.logger.info("Resolving depenencies for arch %s" % arch)

+         rc, errors = yumbase.resolveDeps()

+         ml_needed = {}

+         for tspkg in yumbase.tsInfo.getMembers():

+             bnp = os.path.basename(tspkg.po.localPkg())

+             dep_path = os.path.join(mldir, bnp[0].lower(), bnp)

+             ml_needed[dep_path] = tspkg

+             self.logger.debug("added %s" % dep_path)

+             if not os.path.exists(dep_path):

+                 self.logger.error('%s (multilib dep) not on filesystem' % dep_path)

+                 fs_missing.add(dep_path)

+         self.logger.info('yum return code: %s' % rc)

+         if not rc:

+             self.logger.error('yum depsolve was unsuccessful')

+             raise koji.GenericError(errors)

+         if len(fs_missing) > 0:

+             missing_log = os.path.join(self.workdir, 'missing_multilib.log')

+             outfile = open(missing_log, 'w')

+             outfile.write('The following multilib files were missing:\n')

+             for ml_path in fs_missing:

+                 outfile.write(ml_path)

+                 outfile.write('\n')

+             outfile.close()

+             self.session.uploadWrapper(missing_log, self.uploadpath)

+             raise koji.GenericError('multilib packages missing. '

+                     'See missing_multilib.log')

+ 

+         # get rpm ids for ml pkgs

+         kpkgfile = os.path.join(mldir, 'kojipkgs')

+         kojipkgs = json.load(open(kpkgfile, 'r'))

+ 

+         # step 5: add dependencies to our package list

+         pkgwriter = open(self.pkglist, 'a')

+         for dep_path in ml_needed:

+             tspkg = ml_needed[dep_path]

+             bnp = os.path.basename(dep_path)

+             bnplet = bnp[0].lower()

+             koji.ensuredir(os.path.join(self.repodir, bnplet))

+             dst = os.path.join(self.repodir, bnplet, bnp)

+             if os.path.exists(dst):

+                 # we expect duplication with noarch, but not other arches

+                 if tspkg.arch != 'noarch':

+                     self.logger.warning("Path exists: %r", dst)

+                 continue

+             pkgwriter.write(bnplet + '/' + bnp + '\n')

+             self.logger.debug("os.symlink(%r, %r)", dep_path, dst)

+             os.symlink(dep_path, dst)

+             rpminfo = kojipkgs[bnp]

+             self.sigmap[rpminfo['id']] = rpminfo['sigkey']

+ 

+ 

+     def pick_key(self, keys, avail_keys):

+         best = None

+         best_idx = None

+         for sigkey in avail_keys:

+             if sigkey not in keys:

+                 # skip, not a key we are looking for

+                 continue

+             idx = keys.index(sigkey)

+             # lower idx (earlier in list) is more preferrable

+             if best is None or best_idx > idx:

+                 best = sigkey

+                 best_idx = idx

+         return best

+ 

+ 

+     def make_pkglist(self, tag_id, arch, keys, opts):

+         # get the rpm data

+         rpms = []

+         builddirs = {}

+         for a in self.compat[arch] + ('noarch',):

+             rpm_iter, builds = self.session.listTaggedRPMS(tag_id,

+                 event=opts['event'], arch=a, latest=opts['latest'],

+                 inherit=opts['inherit'], rpmsigs=True)

+             for build in builds:

+                 builddirs[build['id']] = self.pathinfo.build(build)

+             rpms += list(rpm_iter)

+ 

+         # index by id and key

+         preferred = {}

+         rpm_idx = {}

+         for rpminfo in rpms:

+             sigidx = rpm_idx.setdefault(rpminfo['id'], {})

+             sigidx[rpminfo['sigkey']] = rpminfo

+ 

+         # select our rpms

+         selected = {}

+         for rpm_id in rpm_idx:

+             avail_keys = rpm_idx[rpm_id].keys()

+             best_key = self.pick_key(keys, avail_keys)

+             if best_key is None:

+                 # we lack a matching key for this rpm

+                 fallback = avail_keys[0]

+                 rpminfo = rpm_idx[rpm_id][fallback].copy()

+                 rpminfo['sigkey'] = None

+                 selected[rpm_id] = rpminfo

+             else:

+                 selected[rpm_id] = rpm_idx[rpm_id][best_key]

+ 

+         #generate pkglist files

+         pkgfile = os.path.join(self.repodir, 'pkglist')

+         pkglist = file(pkgfile, 'w')

+         fs_missing = []

+         sig_missing = []

+         kojipkgs = {}

+         for rpm_id in selected:

+             rpminfo = selected[rpm_id]

+             if rpminfo['sigkey'] is None:

+                 sig_missing.append(rpm_id)

+                 if opts['skip_missing_signatures']:

+                     continue

+                 # use the primary copy, if allowed (checked below)

+                 pkgpath = '%s/%s' % (builddirs[rpminfo['build_id']],

+                     self.pathinfo.rpm(rpminfo))

+             else:

+                 # use the signed copy

+                 pkgpath = '%s/%s' % (builddirs[rpminfo['build_id']],

+                     self.pathinfo.signed(rpminfo, rpminfo['sigkey']))

+             if not os.path.exists(pkgpath):

+                 fs_missing.append(pkgpath)

+                 # we'll raise an error below

+             else:

+                 bnp = os.path.basename(pkgpath)

+                 bnplet = bnp[0].lower()

+                 pkglist.write(bnplet + '/' + bnp + '\n')

+                 koji.ensuredir(os.path.join(self.repodir, bnplet))

+                 self.sigmap[rpminfo['id']] = rpminfo['sigkey']

+                 dst = os.path.join(self.repodir, bnplet, bnp)

+                 self.logger.debug("os.symlink(%r, %r(", pkgpath, dst)

+                 os.symlink(pkgpath, dst)

+                 kojipkgs[bnp] = rpminfo

+         pkglist.close()

+         self.kojipkgs = kojipkgs

+ 

+         # report problems

+         if len(fs_missing) > 0:

+             missing_log = os.path.join(self.workdir, 'missing_files.log')

+             outfile = open(missing_log, 'w')

+             outfile.write('Some rpm files were missing.\n'

+                 'Most likely, you want to create these signed copies.\n\n'

+                 'Missing files:\n')

+             for pkgpath in sorted(fs_missing):

+                 outfile.write(pkgpath)

+                 outfile.write('\n')

+             outfile.close()

+             self.session.uploadWrapper(missing_log, self.uploadpath)

+             raise koji.GenericError('Packages missing from the filesystem. '

+                     'See missing_files.log.')

+         if sig_missing:

+             # log missing signatures and possibly error

+             missing_log = os.path.join(self.workdir, 'missing_signatures.log')

+             outfile = open(missing_log, 'w')

+             outfile.write('Some rpms were missing requested signatures.\n')

+             if opts['skip_missing_signatures']:

+                 outfile.write('The skip_missing_signatures option was specified, so '

+                         'these files were excluded.\n')

+             outfile.write('Acceptable keys: %r\n\n' % keys)

+             outfile.write('# RPM name: available keys\n')

+             fmt = '%(name)s-%(version)s-%(release)s.%(arch)s'

+             filenames = [[fmt % selected[r], r] for r in sig_missing]

+             for fname, rpm_id in sorted(filenames):

+                 avail = rpm_idx.get(rpm_id, {}).keys()

+                 outfile.write('%s: %r\n' % (fname, avail))

+             outfile.close()

+             self.session.uploadWrapper(missing_log, self.uploadpath)

+             if (not opts['skip_missing_signatures']

+                         and not opts['allow_missing_signatures']):

+                 raise koji.GenericError('Unsigned packages found. See '

+                         'missing_signatures.log')

+         return pkgfile

+ 

+ 

+     def write_kojipkgs(self):

+         filename = os.path.join(self.repodir, 'kojipkgs')

+         datafile = file(filename, 'w')

+         try:

+             json.dump(self.kojipkgs, datafile, indent=4)

+         finally:

+             datafile.close()

+         # and upload too

+         self.session.uploadWrapper(filename, self.uploadpath, 'kojipkgs')

+ 

+ 

+ 

  class WaitrepoTask(BaseTaskHandler):

  

      Methods = ['waitrepo']

file modified
+146 -11
@@ -1872,6 +1872,8 @@ 

      parser = OptionParser(usage=usage)

      parser.add_option("--with-unsigned", action="store_true",

                        help=_("Also import unsigned sig headers"))

+     parser.add_option("--write", action="store_true",

+                       help=_("Also write the signed copies"))

      parser.add_option("--test", action="store_true",

                        help=_("Test mode -- don't actually import"))

      (options, args) = parser.parse_args(args)
@@ -1921,6 +1923,10 @@ 

          print(_("Importing signature [key %s] from %s...") % (sigkey, path))

          if not options.test:

              session.addRPMSig(rinfo['id'], base64.encodestring(sighdr))

+         print(_("Writing signed copy"))

+         if not options.test:

+             session.writeSignedRPM(rinfo['id'], sigkey)

+ 

  

  def handle_write_signed_rpm(options, session, args):

      "[admin] Write signed RPMs to disk"
@@ -1940,21 +1946,31 @@ 

      activate_session(session)

      if options.all:

          rpms = session.queryRPMSigs(sigkey=key)

-         count = 1

-         for rpm in rpms:

-             print("%d/%d" % (count, len(rpms)))

-             count += 1

-             session.writeSignedRPM(rpm['rpm_id'], key)

+         rpms = [session.getRPM(r['rpm_id']) for r in rpms]

      elif options.buildid:

          rpms = session.listRPMs(int(options.buildid))

-         for rpm in rpms:

-             session.writeSignedRPM(rpm['id'], key)

      else:

-         for nvr in args:

+         rpms = []

+         bad = []

+         for nvra in args:

+             try:

+                 koji.parse_NVRA(nvra)

+                 rinfo = session.getRPM(nvra, strict=True)

+                 if rinfo:

+                     rpms.append(rinfo)

+             except koji.GenericError:

+                 bad.append(nvra)

+         # for historical reasons, we also accept nvrs

+         for nvr in bad:

              build = session.getBuild(nvr)

-             rpms = session.listRPMs(buildID=build['id'])

-             for rpm in rpms:

-                 session.writeSignedRPM(rpm['id'], key)

+             if not build:

+                 raise koji.GenericError("No such rpm or build: %s" % nvr)

+             rpms.extend(session.listRPMs(buildID=build['id']))

+     for i, rpminfo in enumerate(rpms):

+         nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rpminfo

+         print("[%d/%d] %s" % (i+1, len(rpms), nvra))

+         session.writeSignedRPM(rpminfo['id'], key)

+ 

  

  def handle_prune_signed_copies(options, session, args):

      "[admin] Prune signed copies"
@@ -7074,6 +7090,125 @@ 

          session.logout()

          return watch_tasks(session, [task_id], quiet=options.quiet)

  

+ def handle_dist_repo(options, session, args):

+     """Create a yum repo with distribution options"""

+     usage = _("usage: %prog dist-repo [options] tag keyID [keyID...]")

+     usage += _("\n(Specify the --help option for a list of other options)")

+     parser = OptionParser(usage=usage)

+     parser.add_option('--allow-missing-signatures', action='store_true',

+         default=False,

+         help=_('For RPMs not signed with a desired key, fall back to the '

+             'primary copy'))

+     parser.add_option("--arch", action='append', default=[],

+         help=_("Indicate an architecture to consider. The default is all " +

+             "architectures associated with the given tag. This option may " +

+             "be specified multiple times."))

+     parser.add_option('--comps', help='Include a comps file in the repodata')

+     parser.add_option('--delta-rpms', metavar='REPO',default=[],

+         action='append',

+         help=_('Create delta rpms. REPO can be the id of another dist repo '

+             'or the name of a tag that has a dist repo. May be specified '

+             'multiple times.'))

+     parser.add_option('--event', type='int',

+         help=_('create a dist repository based on a Brew event'))

+     parser.add_option('--non-latest', dest='latest', default=True,

+         action='store_false', help='Include older builds, not just the latest')

+     parser.add_option('--multilib', default=None, metavar="CONFIG",

+         help=_('Include multilib packages in the repository using the given '

+             'config file'))

+     parser.add_option("--noinherit", action='store_true', default=False,

+         help=_('Do not consider tag inheritance'))

+     parser.add_option("--nowait", action='store_true', default=False,

+         help=_('Do not wait for the task to complete'))

+     parser.add_option('--skip-missing-signatures', action='store_true', default=False,

+         help=_('Skip RPMs not signed with the desired key(s)'))

+     task_opts, args = parser.parse_args(args)

+     if len(args) < 1:

+         parser.error(_('You must provide a tag to generate the repo from'))

+     if len(args) < 2 and not task_opts.allow_missing_signatures:

+         parser.error(_('Please specify one or more GPG key IDs (or '

+                 '--allow-missing-signatures)'))

+     if task_opts.allow_missing_signatures and task_opts.skip_missing_signatures:

+         parser.error(_('allow_missing_signatures and skip_missing_signatures '

+                 'are mutually exclusive'))

+     activate_session(session)

+     stuffdir = _unique_path('cli-dist-repo')

+     if task_opts.comps:

+         if not os.path.exists(task_opts.comps):

+             parser.error(_('could not find %s') % task_opts.comps)

+         session.uploadWrapper(task_opts.comps, stuffdir,

+             callback=_progress_callback)

+         print('')

+         task_opts.comps = os.path.join(stuffdir,

+             os.path.basename(task_opts.comps))

+     old_repos = []

+     if len(task_opts.delta_rpms) > 0:

+         for repo in task_opts.delta_rpms:

+             if repo.isdigit():

+                 rinfo = session.repoInfo(int(repo), strict=True)

+             else:

+                 # get dist repo for tag

+                 rinfo = session.getRepo(repo, dist=True)

+                 if not rinfo:

+                     # maybe there is an expired one

+                     rinfo = session.getRepo(repo,

+                             state=koji.REPO_STATES['EXPIRED'], dist=True)

+                 if not rinfo:

+                     parser.errpr(_("Can't find repo for tag: %s") % repo)

+             old_repos.append(rinfo['id'])

+     tag = args[0]

+     keys = args[1:]

+     taginfo = session.getTag(tag)

+     if not taginfo:

+         parser.error(_('unknown tag %s') % tag)

+     if len(task_opts.arch) == 0:

+         arches = taginfo['arches'] or ''

+         task_opts.arch = arches.split()

+         if task_opts.arch == None:

+             parser.error(_('No arches given and no arches associated with tag'))

+     else:

+         for a in task_opts.arch:

+             if not taginfo['arches'] or a not in taginfo['arches']:

+                 print(_('Warning: %s is not in the list of tag arches') % a)

+     if task_opts.multilib:

+         if not os.path.exists(task_opts.multilib):

+             parser.error(_('could not find %s') % task_opts.multilib)

+         if 'x86_64' in task_opts.arch and not 'i686' in task_opts.arch:

+             parser.error(_('The multilib arch (i686) must be included'))

+         if 's390x' in task_opts.arch and not 's390' in task_opts.arch:

+             parser.error(_('The multilib arch (s390) must be included'))

+         if 'ppc64' in task_opts.arch and not 'ppc' in task_opts.arch:

+             parser.error(_('The multilib arch (ppc) must be included'))

+         session.uploadWrapper(task_opts.multilib, stuffdir,

+             callback=_progress_callback)

+         task_opts.multilib = os.path.join(stuffdir,

+             os.path.basename(task_opts.multilib))

+         print('')

+     try:

+         task_opts.arch.remove('noarch') # handled specifically

+         task_opts.arch.remove('src') # ditto

+     except ValueError:

+         pass

+     opts = {

+         'arch': task_opts.arch,

+         'comps': task_opts.comps,

+         'delta': old_repos,

+         'event': task_opts.event,

+         'inherit': not task_opts.noinherit,

+         'latest': task_opts.latest,

+         'multilib': task_opts.multilib,

+         'skip_missing_signatures': task_opts.skip_missing_signatures,

+         'allow_missing_signatures': task_opts.allow_missing_signatures

+     }

+     task_id = session.distRepo(tag, keys, **opts)

+     print("Creating dist repo for tag " + tag)

+     if _running_in_bg() or task_opts.nowait:

+         return

+     else:

+         session.logout()

+         return watch_tasks(session, [task_id], quiet=options.quiet)

+ 

+ 

  def anon_handle_search(options, session, args):

      "[search] Search the system"

      usage = _("usage: %prog search [options] search_type pattern")

@@ -0,0 +1,7 @@ 

+ # schema updates for dist repo feature

+ # to be merged into schema upgrade script for next release

+ 

+ INSERT INTO permissions (name) VALUES ('image');

+ 

+ ALTER TABLE repo ADD COLUMN dist BOOLEAN DEFAULT 'false';

+ 

file modified
+3 -1
@@ -51,6 +51,7 @@ 

  INSERT INTO permissions (name) VALUES ('admin');

  INSERT INTO permissions (name) VALUES ('build');

  INSERT INTO permissions (name) VALUES ('repo');

+ INSERT INTO permissions (name) VALUES ('image');

  INSERT INTO permissions (name) VALUES ('livecd');

  INSERT INTO permissions (name) VALUES ('maven-import');

  INSERT INTO permissions (name) VALUES ('win-import');
@@ -409,7 +410,8 @@ 

  	id SERIAL NOT NULL PRIMARY KEY,

  	create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),

  	tag_id INTEGER NOT NULL REFERENCES tag(id),

- 	state INTEGER

+ 	state INTEGER,

+ 	dist BOOLEAN DEFAULT 'false'

  ) WITHOUT OIDS;

  

  -- external yum repos

file modified
+178 -17
@@ -2442,6 +2442,39 @@ 

      mdfile.close()

      _generate_maven_metadata(destdir)

  

+ def dist_repo_init(tag, keys, task_opts):

+     """Create a new repo entry in the INIT state, return full repo data"""

+     state = koji.REPO_INIT

+     tinfo = get_tag(tag, strict=True)

+     tag_id = tinfo['id']

+     event = task_opts.get('event')

+     arches = set([koji.canonArch(a) for a in task_opts['arch']])

+     # note: we need to match args from the other preRepoInit callback

+     koji.plugin.run_callbacks('preRepoInit', tag=tinfo, with_src=False,

+             with_debuginfo=False, event=event, repo_id=None,

+             dist=True, keys=keys, arches=arches, task_opts=task_opts)

+     if not event:

+         event = get_event()

+     repo_id = nextval('repo_id_seq')

+     insert = InsertProcessor('repo')

+     insert.set(id=repo_id, create_event=event, tag_id=tag_id,

+         state=state, dist=True)

+     insert.execute()

+     repodir = koji.pathinfo.distrepo(repo_id, tinfo['name'])

+     for arch in arches:

+         koji.ensuredir(os.path.join(repodir, arch))

+     # handle comps

+     if task_opts.get('comps'):

+         groupsdir = os.path.join(repodir, 'groups')

+         koji.ensuredir(groupsdir)

+         shutil.copyfile(os.path.join(koji.pathinfo.work(),

+             task_opts['comps']), groupsdir + '/comps.xml')

+     # note: we need to match args from the other postRepoInit callback

+     koji.plugin.run_callbacks('postRepoInit', tag=tinfo, with_src=False,

+             with_debuginfo=False, event=event, repo_id=repo_id)

+     return repo_id, event

+ 

+ 

  def repo_set_state(repo_id, state, check=True):

      """Set repo state"""

      if check:
@@ -2463,6 +2496,7 @@ 

          ('EXTRACT(EPOCH FROM events.time)', 'create_ts'),

          ('repo.tag_id', 'tag_id'),

          ('tag.name', 'tag_name'),

+         ('repo.dist', 'dist'),

      )

      q = """SELECT %s FROM repo

      JOIN tag ON tag_id=tag.id
@@ -7348,6 +7382,12 @@ 

      return event_id

  

  

+ def nextval(sequence):

+     """Get the next value for the given sequence"""

+     data = {'sequence': sequence}

+     return _singleValue("SELECT nextval(%(sequence)s)", data, strict=True)

+ 

+ 

  def parse_json(value, desc=None, errstr=None):

      if value is None:

          return value
@@ -10069,16 +10109,20 @@ 

                      taginfo['extra'][key] = ancestor['extra'][key]

          return taginfo

  

-     def getRepo(self, tag, state=None, event=None):

+     def getRepo(self, tag, state=None, event=None, dist=False):

          if isinstance(tag, (int, long)):

              id = tag

          else:

              id = get_tag_id(tag, strict=True)

  

-         fields = ['repo.id', 'repo.state', 'repo.create_event', 'events.time', 'EXTRACT(EPOCH FROM events.time)']

-         aliases = ['id', 'state', 'create_event', 'creation_time', 'create_ts']

+         fields = ['repo.id', 'repo.state', 'repo.create_event', 'events.time', 'EXTRACT(EPOCH FROM events.time)', 'repo.dist']

+         aliases = ['id', 'state', 'create_event', 'creation_time', 'create_ts', 'dist']

          joins = ['events ON repo.create_event = events.id']

          clauses = ['repo.tag_id = %(id)i']

+         if dist:

+             clauses.append('repo.dist is true')

+         else:

+             clauses.append('repo.dist is false')

          if event:

              # the repo table doesn't have all the fields of a _config table, just create_event

              clauses.append('create_event <= %(event)i')
@@ -10096,6 +10140,13 @@ 

      repoInfo = staticmethod(repo_info)

      getActiveRepos = staticmethod(get_active_repos)

  

+     def distRepo(self, tag, keys, **task_opts):

+         """Create a dist-repo task. returns task id"""

+         context.session.assertPerm('dist-repo')

+         repo_id, event_id = dist_repo_init(tag, keys, task_opts)

+         task_opts['event'] = event_id

+         return make_task('distRepo', [tag, repo_id, keys, task_opts], priority=15, channel='createrepo')

+ 

      def newRepo(self, tag, event=None, src=False, debuginfo=False):

          """Create a newRepo task. returns task id"""

          if context.session.hasPerm('regen-repo'):
@@ -12226,6 +12277,9 @@ 

          data: a dictionary of the form { arch: (uploadpath, files), ...}

          expire(optional): if set to true, mark the repo expired immediately*

  

+         If this is a dist repo, also hardlink the rpms in the final

+         directory.

+ 

          * This is used when a repo from an older event is generated

          """

          host = Host()
@@ -12236,18 +12290,22 @@ 

              raise koji.GenericError("Repo %(id)s not in INIT state (got %(state)s)" % rinfo)

          repodir = koji.pathinfo.repo(repo_id, rinfo['tag_name'])

          workdir = koji.pathinfo.work()

-         for arch, (uploadpath, files) in data.iteritems():

-             archdir = "%s/%s" % (repodir, arch)

-             if not os.path.isdir(archdir):

-                 raise koji.GenericError("Repo arch directory missing: %s" % archdir)

-             datadir = "%s/repodata" % archdir

-             koji.ensuredir(datadir)

-             for fn in files:

-                 src = "%s/%s/%s" % (workdir, uploadpath, fn)

-                 dst = "%s/%s" % (datadir, fn)

-                 if not os.path.exists(src):

-                     raise koji.GenericError("uploaded file missing: %s" % src)

-                 safer_move(src, dst)

+         if not rinfo['dist']:

+             for arch, (uploadpath, files) in data.iteritems():

+                 archdir = "%s/%s" % (repodir, koji.canonArch(arch))

+                 if not os.path.isdir(archdir):

+                     raise koji.GenericError("Repo arch directory missing: %s" % archdir)

+                 datadir = "%s/repodata" % archdir

+                 koji.ensuredir(datadir)

+                 for fn in files:

+                     src = "%s/%s/%s" % (workdir, uploadpath, fn)

+                     if fn.endswith('pkglist'):

+                         dst = '%s/%s' % (archdir, fn)

+                     else:

+                         dst = "%s/%s" % (datadir, fn)

+                     if not os.path.exists(src):

+                         raise koji.GenericError("uploaded file missing: %s" % src)

+                     safer_move(src, dst)

          if expire:

              repo_expire(repo_id)

              koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire)
@@ -12255,9 +12313,13 @@ 

          #else:

          repo_ready(repo_id)

          repo_expire_older(rinfo['tag_id'], rinfo['create_event'])

+ 

          #make a latest link

-         latestrepolink = koji.pathinfo.repo('latest', rinfo['tag_name'])

-         #XXX - this is a slight abuse of pathinfo

+         if rinfo['dist']:

+             latestrepolink = koji.pathinfo.distrepo('latest', rinfo['tag_name'])

+         else:

+             latestrepolink = koji.pathinfo.repo('latest', rinfo['tag_name'])

+             #XXX - this is a slight abuse of pathinfo

          try:

              if os.path.lexists(latestrepolink):

                  os.unlink(latestrepolink)
@@ -12267,6 +12329,105 @@ 

              log_error("Unable to create latest link for repo: %s" % repodir)

          koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire)

  

+ 

+     def distRepoMove(self, repo_id, uploadpath, files, arch, sigmap):

+         """

+         Move a dist repo into its final location

+ 

+ 

+         Unlike normal repos (which are moved into place by repoDone), dist

+         repos have all their content linked (or copied) into place.

+ 

+         repo_id - the repo to move

+         uploadpath - where the uploaded files are

+         files - a list of the uploaded file names

+         arch - the arch of the repo

+         sigmap - a list of [rpm_id, sig] pairs

+ 

+         The rpms from sigmap should match the contents of the uploaded pkglist

+         file.

+ 

+         In sigmap, use sig=None to use the primary copy of the rpm instead of a

+         signed copy.

+         """

+         workdir = koji.pathinfo.work()

+         rinfo = repo_info(repo_id, strict=True)

+         repodir = koji.pathinfo.distrepo(repo_id, rinfo['tag_name'])

+         archdir = "%s/%s" % (repodir, koji.canonArch(arch))

+         if not os.path.isdir(archdir):

+             raise koji.GenericError("Repo arch directory missing: %s" % archdir)

+         datadir = "%s/repodata" % archdir

+         koji.ensuredir(datadir)

+ 

+         pkglist = set()

+         for fn in files:

+             src = "%s/%s/%s" % (workdir, uploadpath, fn)

+             if fn.endswith('.drpm'):

+                 koji.ensuredir(os.path.join(archdir, 'drpms'))

+                 dst = "%s/drpms/%s" % (archdir, fn)

+             elif fn.endswith('pkglist') or fn.endswith('kojipkgs'):

+                 dst = '%s/%s' % (archdir, fn)

+             else:

+                 dst = "%s/%s" % (datadir, fn)

+             if not os.path.exists(src):

+                 raise koji.GenericError("uploaded file missing: %s" % src)

+             if fn.endswith('pkglist'):

+                 with open(src) as pkgfile:

+                     for pkg in pkgfile:

+                         pkg = os.path.basename(pkg.strip())

+                         pkglist.add(pkg)

+             safer_move(src, dst)

+ 

+         # get rpms

+         build_dirs = {}

+         rpmdata = {}

+         for rpm_id, sigkey in sigmap:

+             rpminfo = get_rpm(rpm_id, strict=True)

+             if sigkey is None or sigkey == '':

+                 relpath = koji.pathinfo.rpm(rpminfo)

+             else:

+                 relpath = koji.pathinfo.signed(rpminfo, sigkey)

+             rpminfo['_relpath'] = relpath

+             if rpminfo['build_id'] in build_dirs:

+                 builddir = build_dirs[rpminfo['build_id']]

+             else:

+                 binfo = get_build(rpminfo['build_id'])

+                 builddir = koji.pathinfo.build(binfo)

+                 build_dirs[rpminfo['build_id']] = builddir

+             rpminfo['_fullpath'] = os.path.join(builddir, relpath)

+             basename = os.path.basename(relpath)

+             rpmdata[basename] = rpminfo

+ 

+         # sanity check

+         for fn in rpmdata:

+             if fn not in pkglist:

+                 raise koji.GenericError("No signature data for: %s" % fn)

+         for fn in pkglist:

+             if fn  not in rpmdata:

+                 raise koji.GenericError("RPM missing from pkglist: %s" % fn)

+ 

+         for fn in rpmdata:

+             # hardlink or copy the rpms into the final repodir

+             # TODO: properly consider split-volume functionality

+             rpminfo = rpmdata[fn]

+             rpmpath = rpminfo['_fullpath']

+             bnp = fn

+             bnplet = bnp[0].lower()

+             koji.ensuredir(os.path.join(archdir, bnplet))

+             l_dst = os.path.join(archdir, bnplet, bnp)

+             if os.path.exists(l_dst):

+                 raise koji.GenericError("File already in repo: %s", l_dst)

+             logger.debug("os.link(%r, %r)", rpmpath, l_dst)

+             try:

+                 os.link(rpmpath, l_dst)

+             except OSError, ose:

+                 if ose.errno == 18:

+                     shutil.copy2(

+                         rpmpath, os.path.join(archdir, bnplet, bnp))

+                 else:

+                     raise

+ 

+ 

      def isEnabled(self):

          host = Host()

          host.verify()

file modified
+1 -1
@@ -64,7 +64,7 @@ 

  - refactor uploads

  - more flexible gc

  - introduce an ORM to do away with raw SQL queries.

- - know how to manage signed repositories of RPMs

+ - know how to manage dist repositories of RPMs

  - know how to build installation media

  - more granular access control/groups

    - things like Read, Execute, Execute scratch, Delete, Tag, so we can delegate

file modified
+2
@@ -98,6 +98,7 @@ 

  Requires: mock >= 0.9.14

  Requires(pre): /usr/sbin/useradd

  Requires: squashfs-tools

+ Requires: python2-multilib

  %if %{use_systemd}

  Requires(post): systemd

  Requires(preun): systemd
@@ -116,6 +117,7 @@ 

  Requires: createrepo >= 0.4.11-2

  Requires: python-hashlib

  Requires: python-createrepo

+ Requires: python-simplejson

  %endif

  %if 0%{?fedora} >= 9

  Requires: createrepo >= 0.9.2

file modified
+10 -1
@@ -1815,6 +1815,10 @@ 

          """Return the directory where a repo belongs"""

          return self.topdir + ("/repos/%(tag_str)s/%(repo_id)s" % locals())

  

+     def distrepo(self, repo_id, tag):

+         """Return the directory with a dist repo lives"""

+         return os.path.join(self.topdir, 'repos-dist', tag, str(repo_id))

+ 

      def repocache(self, tag_str):

          """Return the directory where a repo belongs"""

          return self.topdir + ("/repos/%(tag_str)s/cache" % locals())
@@ -2788,7 +2792,7 @@ 

          if 'request' in taskInfo:

              build = taskInfo['request'][1]

              extra = buildLabel(build)

-     elif method == 'newRepo':

+     elif method in ('newRepo', 'distRepo'):

          if 'request' in taskInfo:

              extra = str(taskInfo['request'][0])

      elif method in ('tagBuild', 'tagNotification'):
@@ -2803,6 +2807,11 @@ 

          if 'request' in taskInfo:

              arch = taskInfo['request'][1]

              extra = arch

+     elif method == 'createdistrepo':

+         if 'request' in taskInfo:

+             repo_id = taskInfo['request'][1]

+             arch = taskInfo['request'][2]

+             extra = '%s, %s' % (repo_id, arch)

      elif method == 'dependantTask':

          if 'request' in taskInfo:

              extra = ', '.join([subtask[0] for subtask in taskInfo['request'][1]])

@@ -116,6 +116,7 @@ 

  

  miscellaneous commands:

          call                      Execute an arbitrary XML-RPC call

+         dist-repo                 Create a yum repo with distribution options

          import-comps              Import group/package information from a comps file

          moshimoshi                Introduce yourself

          save-failed-tree          Create tarball with whole buildtree

@@ -0,0 +1,202 @@ 

+ 

+ import unittest

+ import mock

+ import os

+ import shutil

+ import tempfile

+ 

+ import koji

+ import kojihub

+ from koji.util import dslice_ex

+ 

+ IP = kojihub.InsertProcessor

+ 

+ 

+ class TestDistRepoInit(unittest.TestCase):

+ 

+ 

+     def getInsert(self, *args, **kwargs):

+         insert = IP(*args, **kwargs)

+         insert.execute = mock.MagicMock()

+         self.inserts.append(insert)

+         return insert

+ 

+ 

+     def setUp(self):

+         self.InsertProcessor = mock.patch('kojihub.InsertProcessor',

+                 side_effect=self.getInsert).start()

+         self.inserts = []

+  

+         self.get_tag = mock.patch('kojihub.get_tag').start()

+         self.get_event = mock.patch('kojihub.get_event').start()

+         self.nextval = mock.patch('kojihub.nextval').start()

+         self.ensuredir = mock.patch('koji.ensuredir').start()

+         self.copyfile = mock.patch('shutil.copyfile').start()

+ 

+         self.get_tag.return_value = {'id': 42, 'name': 'tag'}

+         self.get_event.return_value = 12345

+         self.nextval.return_value = 99

+ 

+ 

+     def tearDown(self):

+         mock.patch.stopall()

+ 

+ 

+     def test_simple_dist_repo_init(self):

+ 

+         # simple case

+         kojihub.dist_repo_init('tag', ['key'], {'arch': ['x86_64']})

+         self.InsertProcessor.assert_called_once()

+ 

+         ip = self.inserts[0]

+         self.assertEquals(ip.table, 'repo')

+         data = {'dist': True, 'create_event': 12345, 'tag_id': 42, 'id': 99,

+                     'state': koji.REPO_STATES['INIT']}

+         self.assertEquals(ip.data, data)

+         self.assertEquals(ip.rawdata, {})

+ 

+         # no comps option

+         self.copyfile.assert_not_called()

+ 

+ 

+     def test_dist_repo_init_with_comps(self):

+ 

+         # simple case

+         kojihub.dist_repo_init('tag', ['key'], {'arch': ['x86_64'],

+                     'comps': 'COMPSFILE'})

+         self.InsertProcessor.assert_called_once()

+ 

+         ip = self.inserts[0]

+         self.assertEquals(ip.table, 'repo')

+         data = {'dist': True, 'create_event': 12345, 'tag_id': 42, 'id': 99,

+                     'state': koji.REPO_STATES['INIT']}

+         self.assertEquals(ip.data, data)

+         self.assertEquals(ip.rawdata, {})

+ 

+         # no comps option

+         self.copyfile.assert_called_once()

+ 

+ 

+ class TestDistRepo(unittest.TestCase):

+ 

+     @mock.patch('kojihub.dist_repo_init')

+     @mock.patch('kojihub.make_task')

+     def test_DistRepo(self, make_task, dist_repo_init):

+         session = kojihub.context.session = mock.MagicMock()

+         # It seems MagicMock will not automatically handle attributes that

+         # start with "assert"

+         session.assertPerm = mock.MagicMock()

+         dist_repo_init.return_value = ('repo_id', 'event_id')

+         make_task.return_value = 'task_id'

+ 

+         exports = kojihub.RootExports()

+         ret = exports.distRepo('tag', 'keys')

+         session.assertPerm.assert_called_once_with('dist-repo')

+         dist_repo_init.assert_called_once()

+         make_task.assert_called_once()

+         self.assertEquals(ret, make_task.return_value)

+ 

+ 

+ class TestDistRepoMove(unittest.TestCase):

+ 

+     def setUp(self):

+         self.topdir = tempfile.mkdtemp()

+         self.rinfo = {

+             'create_event': 2915,

+             'create_ts': 1487256924.72718,

+             'creation_time': '2017-02-16 14:55:24.727181',

+             'id': 47,

+             'state': 1,

+             'tag_id': 2,

+             'tag_name': 'my-tag'}

+         self.arch = 'x86_64'

+ 

+         # set up a fake koji topdir

+         # koji.pathinfo._topdir = self.topdir

+         mock.patch('koji.pathinfo._topdir', new=self.topdir).start()

+         repodir = koji.pathinfo.distrepo(self.rinfo['id'], self.rinfo['tag_name'])

+         archdir = "%s/%s" % (repodir, koji.canonArch(self.arch))

+         os.makedirs(archdir)

+         self.uploadpath = 'UNITTEST'

+         workdir = koji.pathinfo.work()

+         uploaddir = "%s/%s" % (workdir, self.uploadpath)

+         os.makedirs(uploaddir)

+ 

+         # place some test files

+         self.files = ['foo.drpm', 'repomd.xml']

+         self.expected = ['x86_64/drpms/foo.drpm', 'x86_64/repodata/repomd.xml']

+         for fn in self.files:

+             path = os.path.join(uploaddir, fn)

+             koji.ensuredir(os.path.dirname(path))

+             with open(path, 'w') as fo:

+                 fo.write('%s' % fn)

+ 

+         # generate pkglist file and sigmap

+         self.files.append('pkglist')

+         plist = os.path.join(uploaddir, 'pkglist')

+         nvrs = ['aaa-1.0-2', 'bbb-3.0-5', 'ccc-8.0-13','ddd-21.0-34']

+         self.sigmap = []

+         self.rpms = {}

+         self.builds ={}

+         self.key = '4c8da725'

+         with open(plist, 'w') as f_pkglist:

+             for nvr in nvrs:

+                 binfo = koji.parse_NVR(nvr)

+                 rpminfo = binfo.copy()

+                 rpminfo['arch'] = 'x86_64'

+                 builddir = koji.pathinfo.build(binfo)

+                 relpath = koji.pathinfo.signed(rpminfo, self.key)

+                 path = os.path.join(builddir, relpath)

+                 koji.ensuredir(os.path.dirname(path))

+                 basename = os.path.basename(path)

+                 with open(path, 'w') as fo:

+                     fo.write('%s' % basename)

+                 f_pkglist.write(path)

+                 f_pkglist.write('\n')

+                 self.expected.append('x86_64/%s/%s' % (basename[0], basename))

+                 build_id = len(self.builds) + 10000

+                 rpm_id = len(self.rpms) + 20000

+                 binfo['id'] = build_id

+                 rpminfo['build_id'] = build_id

+                 rpminfo['id'] = rpm_id

+                 self.builds[build_id] = binfo

+                 self.rpms[rpm_id] = rpminfo

+                 self.sigmap.append([rpm_id, self.key])

+ 

+         # mocks

+         self.repo_info = mock.patch('kojihub.repo_info').start()

+         self.repo_info.return_value = self.rinfo.copy()

+         self.get_rpm = mock.patch('kojihub.get_rpm').start()

+         self.get_build = mock.patch('kojihub.get_build').start()

+         self.get_rpm.side_effect = self.our_get_rpm

+         self.get_build.side_effect = self.our_get_build

+ 

+ 

+     def tearDown(self):

+         mock.patch.stopall()

+         shutil.rmtree(self.topdir)

+ 

+ 

+     def our_get_rpm(self, rpminfo, strict=False, multi=False):

+         return self.rpms[rpminfo]

+ 

+ 

+     def our_get_build(self, buildInfo, strict=False):

+         return self.builds[buildInfo]

+ 

+ 

+     def test_distRepoMove(self):

+         exports = kojihub.HostExports()

+         exports.distRepoMove(self.rinfo['id'], self.uploadpath,

+                 list(self.files), self.arch, self.sigmap)

+         # check result

+         repodir = self.topdir + '/repos-dist/%(tag_name)s/%(id)s' % self.rinfo

+         for relpath in self.expected:

+             path = os.path.join(repodir, relpath)

+             basename = os.path.basename(path)

+             if not os.path.exists(path):

+                 raise Exception("Missing file: %s" % path)

+             data = open(path).read()

+             data.strip()

+             self.assertEquals(data, basename)

+ 

file modified
+32 -13
@@ -134,7 +134,14 @@ 

                               (self.tag_id, self.repo_id))

              return False

          tag_name = tag_info['name']

-         path = pathinfo.repo(self.repo_id, tag_name)

+         rinfo = self.session.repoInfo(self.repo_id, strict=True)

+         if rinfo['dist']:

+             path = pathinfo.distrepo(self.repo_id, tag_name)

+             lifetime = self.options.dist_repo_lifetime

+         else:

+             path = pathinfo.repo(self.repo_id, tag_name)

+             lifetime = self.options.deleted_repo_lifetime

+             # (should really be called expired_repo_lifetime)

          try:

              #also check dir age. We do this because a repo can be created from an older event

              #and should not be removed based solely on that event's timestamp.
@@ -152,8 +159,8 @@ 

              times = [self.event_ts, mtime, self.first_seen, self.expire_ts]

              times = [ts for ts in times if ts is not None]

              age = time.time() - max(times)

-             if age < self.options.deleted_repo_lifetime:

-                 #XXX should really be called expired_repo_lifetime

+             self.logger.debug("Repo %s (%s) age: %i sec", self.repo_id, path, age)

+             if age < lifetime:

                  return False

          self.logger.debug("Attempting to delete repo %s.." % self.repo_id)

          if self.state != koji.REPO_EXPIRED:
@@ -333,41 +340,49 @@ 

          finally:

              session.logout()

  

-     def pruneLocalRepos(self):

+     def pruneLocalRepos(self, topdir, timername):

          """Scan filesystem for repos and remove any deleted ones

  

          Also, warn about any oddities"""

          if self.delete_pids:

              #skip

              return

-         self.logger.debug("Scanning filesystem for repos")

-         topdir = "%s/repos" % pathinfo.topdir

+         self.logger.debug("Scanning %s for repos", topdir)

+         self.logger.debug('max age allowed: %s seconds (from %s)',

+                 getattr(self.options, timername), timername)

          for tag in os.listdir(topdir):

              tagdir = "%s/%s" % (topdir, tag)

              if not os.path.isdir(tagdir):

+                 self.logger.debug("%s is not a directory, skipping", tagdir)

                  continue

              for repo_id in os.listdir(tagdir):

                  try:

                      repo_id = int(repo_id)

                  except ValueError:

+                     self.logger.debug("%s not an int, skipping", tagdir)

                      continue

                  repodir = "%s/%s" % (tagdir, repo_id)

                  if not os.path.isdir(repodir):

+                     self.logger.debug("%s not a directory, skipping", repodir)

                      continue

                  if repo_id in self.repos:

                      #we're already managing it, no need to deal with it here

+                     self.logger.debug("seen %s already, skipping", repodir)

                      continue

                  try:

                      dir_ts = os.stat(repodir).st_mtime

                  except OSError:

                      #just in case something deletes the repo out from under us

+                     self.logger.debug("%s deleted already?!", repodir)

                      continue

                  rinfo = self.session.repoInfo(repo_id)

                  if rinfo is None:

                      if not self.options.ignore_stray_repos:

                          age = time.time() - dir_ts

-                         if age > self.options.deleted_repo_lifetime:

-                             self.logger.info("Removing unexpected directory (no such repo): %s" % repodir)

+                         self.logger.debug("did not expect %s; age: %s",

+                                 repodir, age)

+                         if age > getattr(self.options, timername):

+                             self.logger.info("Removing unexpected directory (no such repo): %s", repodir)

                              self.rmtree(repodir)

                      continue

                  if rinfo['tag_name'] != tag:
@@ -375,11 +390,10 @@ 

                      continue

                  if rinfo['state'] in (koji.REPO_DELETED, koji.REPO_PROBLEM):

                      age = time.time() - max(rinfo['create_ts'], dir_ts)

-                     if age > self.options.deleted_repo_lifetime:

-                         #XXX should really be called expired_repo_lifetime

+                     self.logger.debug("potential removal candidate: %s; age: %s" % (repodir, age))

+                     if age > getattr(self.options, timername):

                          logger.info("Removing stray repo (state=%s): %s" % (koji.REPO_STATES[rinfo['state']], repodir))

                          self.rmtree(repodir)

-                         pass

  

      def tagUseStats(self, tag_id):

          stats = self.tag_use_stats.get(tag_id)
@@ -627,12 +641,15 @@ 

      curr_chk_thread = start_currency_checker(session, repomgr)

      # TODO also move rmtree jobs to threads

      logger.info("Entering main loop")

+     repodir = "%s/repos" % pathinfo.topdir

+     distrepodir = "%s/repos-dist" % pathinfo.topdir

      while True:

          try:

              repomgr.updateRepos()

              repomgr.checkQueue()

              repomgr.printState()

-             repomgr.pruneLocalRepos()

+             repomgr.pruneLocalRepos(repodir, 'deleted_repo_lifetime')

+             repomgr.pruneLocalRepos(distrepodir, 'dist_repo_lifetime')

              if not curr_chk_thread.isAlive():

                  logger.error("Currency checker thread died. Restarting it.")

                  curr_chk_thread = start_currency_checker(session, repomgr)
@@ -728,6 +745,7 @@ 

                  'delete_batch_size' : 3,

                  'deleted_repo_lifetime': 7*24*3600,

                  #XXX should really be called expired_repo_lifetime

+                 'dist_repo_lifetime': 7*24*3600,

                  'sleeptime' : 15,

                  'cert': None,

                  'ca': '',  # FIXME: unused, remove in next major release
@@ -736,7 +754,8 @@ 

      if config.has_section(section):

          int_opts = ('deleted_repo_lifetime', 'max_repo_tasks', 'repo_tasks_limit',

                      'retry_interval', 'max_retries', 'offline_retry_interval',

-                     'max_delete_processes', 'max_repo_tasks_maven', 'delete_batch_size', )

+                     'max_delete_processes', 'max_repo_tasks_maven',

+                     'delete_batch_size', 'dist_repo_lifetime')

          str_opts = ('topdir', 'server', 'user', 'password', 'logfile', 'principal', 'keytab', 'krbservice',

                      'cert', 'ca', 'serverca', 'debuginfo_tags', 'source_tags')  # FIXME: remove ca here

          bool_opts = ('with_src','verbose','debug','ignore_stray_repos', 'offline_retry',

file modified
+9
@@ -39,3 +39,12 @@ 

  

  ;certificate of the CA that issued the HTTP server certificate

  ;serverca = /etc/kojira/serverca.crt

+ 

+ ;how soon (in seconds) to clean up expired repositories. 1 week default

+ ;deleted_repo_lifetime = 604800

+ 

+ ;how soon (in seconds) to clean up dist repositories. 1 week default here too

+ ;dist_repo_lifetime = 604800

+ 

+ ;turn on debugging statements in the log

+ ;debug = false

file modified
+5
@@ -42,3 +42,8 @@ 

  # ToplevelTasks = 

  # Tasks that can have children

  # ParentTasks = 

+ 

+ # Uncommenting this will show python tracebacks in the webUI, but they are the

+ # same as what you will see in apache's error_log.

+ # Not for production use

+ # PythonDebug = True

file modified
+5 -3
@@ -431,6 +431,8 @@ 

            'tagBuild',

            'newRepo',

            'createrepo',

+           'distRepo',

+           'createdistrepo',

            'buildNotification',

            'tagNotification',

            'dependantTask',
@@ -444,9 +446,9 @@ 

            'livemedia',

            'createLiveMedia']

  # Tasks that can exist without a parent

- _TOPLEVEL_TASKS = ['build', 'buildNotification', 'chainbuild', 'maven', 'chainmaven', 'wrapperRPM', 'winbuild', 'newRepo', 'tagBuild', 'tagNotification', 'waitrepo', 'livecd', 'appliance', 'image', 'livemedia']

+ _TOPLEVEL_TASKS = ['build', 'buildNotification', 'chainbuild', 'maven', 'chainmaven', 'wrapperRPM', 'winbuild', 'newRepo', 'distRepo', 'tagBuild', 'tagNotification', 'waitrepo', 'livecd', 'appliance', 'image', 'livemedia']

  # Tasks that can have children

- _PARENT_TASKS = ['build', 'chainbuild', 'maven', 'chainmaven', 'winbuild', 'newRepo', 'wrapperRPM', 'livecd', 'appliance', 'image', 'livemedia']

+ _PARENT_TASKS = ['build', 'chainbuild', 'maven', 'chainmaven', 'winbuild', 'newRepo', 'distRepo', 'wrapperRPM', 'livecd', 'appliance', 'image', 'livemedia']

  

  def tasks(environ, owner=None, state='active', view='tree', method='all', hostID=None, channelID=None, start=None, order='-id'):

      values = _initValues(environ, 'Tasks', 'tasks')
@@ -623,7 +625,7 @@ 

          build = server.getBuild(params[1])

          values['destTag'] = destTag

          values['build'] = build

-     elif task['method'] == 'newRepo':

+     elif task['method'] in ('newRepo', 'distRepo', 'createdistrepo'):

          tag = server.getTag(params[0])

          values['tag'] = tag

      elif task['method'] == 'tagNotification':

file modified
+16 -5
@@ -221,8 +221,13 @@ 

          #elif $task.method == 'newRepo'

          <strong>Tag:</strong> <a href="taginfo?tagID=$tag.id">$tag.name</a><br/>

          #if $len($params) > 1

-         $printOpts($params[1])

+           $printOpts($params[1])

          #end if

+         #elif $task.method == 'distRepo'

+         <strong>Tag:</strong> <a href="taginfo?tagID=$tag.id">$tag.name</a><br/>

+         <strong>Repo ID:</strong> $params[1]<br/>

+         <strong>Keys:</strong> $printValue(0, $params[2])<br/>

+         $printOpts($params[3])

          #elif $task.method == 'prepRepo'

          <strong>Tag:</strong> <a href="taginfo?tagID=$params[0].id">$params[0].name</a>

          #elif $task.method == 'createrepo'
@@ -230,12 +235,18 @@ 

          <strong>Arch:</strong> $params[1]<br/>

          #set $oldrepo = $params[2]

          #if $oldrepo

-         <strong>Old Repo ID:</strong> $oldrepo.id<br/>

-         <strong>Old Repo Creation:</strong> $koji.formatTimeLong($oldrepo.creation_time)<br/>

+             <strong>Old Repo ID:</strong> $oldrepo.id<br/>

+             <strong>Old Repo Creation:</strong> $koji.formatTimeLong($oldrepo.creation_time)<br/>

          #end if

-         #if $len($params) > 3

-         <strong>External Repos:</strong> $printValue(None, [ext['external_repo_name'] for ext in $params[3]])<br/>

+         #if $len($params) > 4 and $params[4]

+             <strong>External Repos:</strong> $printValue(None, [ext['external_repo_name'] for ext in $params[3]])<br/>

          #end if

+         #elif $task.method == 'createdistrepo'

+         <strong>Tag:</strong> <a href="taginfo?tagID=$tag.id">$tag.name</a><br/>

+         <strong>Repo ID:</strong> $params[1]<br/>

+         <strong>Arch:</strong> $printValue(0, $params[2])<br/>

+         <strong>Keys:</strong> $printValue(0, $params[3])<br/>

+         <strong>Options:</strong> $printMap($params[4], '&nbsp;&nbsp;&nbsp;&nbsp;')

          #elif $task.method == 'dependantTask'

          <strong>Dependant Tasks:</strong><br/>

          #for $dep in $deps

Based on PR #54
Rebased, with several fixes and adjustments

since pagure1866 is not fixed I am adding a comment so that I get notifications from this PR

Sorry for spam, subscribing by commenting...

this probably should be removed

I believe this is to avoid overwriting the progress bar from the earlier upload.
However, looking at the kojid code, it doesn't look like this option is ever used as anything other than a boolean. I'll post an update in a few

Wait, nm, I see where the conf is used

Hmm, --multilib seems to be giving me some trouble

Yeah, it is trying to double link the noarch rpms. I've got an ugly workaround here, but I think it needs a proper fix.

rebased

7 years ago

Updated and rebased:

  • fix multilib import
  • avoid duplicate hard linking
  • avoid noarch duplication

Generating a multilib signed repo now works for me locally. E.g.

lkoji signed-repo f24-repo  --multilib /etc/multilib.conf --allow-unsigned 4c8da725

This might be ready to merge

what about to not blow up koji more with yum code?

dnf is not everywhere yet. Plenty of folks running koji builders on rhel or centos, probably more than on Fedora actually.

@mikem what I wanted to say that if we don't support both, then we can't have any new features like rich dependencies.

I'm sure we'll support both eventually, but I'm starting with the original work that was submitted.

How would rich dependencies affect generating multilib repos?

@mikem I already have patch for python-multilib, so if signed repos are going to F26 cycle I would be happier if we would support DNF here as well.

@mikem basically, yum just crashes when it sees rich dependency.

I'm not going to hold this PR up over dnf, but I would certainly be happy to see a follow up that adds dnf support.

@mikem Then merge it so the work can be started.

rebased

7 years ago

I've been adding some unit tests for these changes and this revealed a design issue with the new signedRepoMove hub call. I'll need to rework that a bit.

5 new commits added

  • upload kojipkgs data
  • handle unsigned rpms in signedRepoMove
  • no integer keys in xmlrpc
  • rework signedRepoMove api a bit
  • update exception syntax in signed-repo code
7 years ago

python2-multilib requirement is missing in spec.

1 new commit added

  • require python2-multilib on builder
7 years ago

Filed #342 (hit with empty pkglist) It is questionable if task should fail or create empty repo, when there is nothing.

@mikem How is this looking now, against current master?

rebased

7 years ago

The delta rpms part of this is what's holding it back now. I just had a talk with Jay and Dennis about this and I think we have a plan for fixing that.

4 new commits added

  • command help adjustments
  • adjust cli handling of --delta-rpms arg
  • first stab at fixing delta rpm behavior for signed repos
  • add builder json requirement for rhel5
7 years ago

I've modified the existing --delta-rpms arg to take a signed repo id (or the name of a tag with a signed repo). I think that is the last red flag for me.

Working locally for me.

The code looks alright to me. The only problem I have with it now is that I think it's inappropriately named. This code does everything but sign the repodata, so it's really a misnomer...

@ngompa It makes repos consisting of signed rpms. Can you suggest better terminology?

Actually, another issue is that the signed rpms are hardly the only difference between these repos and the other koji repos. Other differences are:

  • multilib rpms
  • delta rpms
  • stored in different location
  • hardlinked rpms (so the repo can easily be copied elsewhere)

Granted, several of these are optional, while the command insists on being given at least one key. Though, you can specify a bogus key and use --allow-unsigned.

Maybe "full repos" (Fedora definition), "enhanced repos" (repositories with extra bits), or "export repos" (repos suitable for external usage)?

cobrien just suggested "dist repo" or "distribution repo." Has a nice ring to it, and seems fairly accurate

rebranding this fully would involve changing:

  • command name
  • task method names
  • task class names
  • rpc method names
  • the 'signed' field in the repo table in the db
  • the PathInfo method
  • some help text in the cli handler
  • maybe a comment or two
  • cli output
  • some function names on the hub
  • callback args
  • option name for some calls
  • the name of the permission used to govern access
  • unit test names
  • signed_repo_lifetime option in kojira
  • a variable name or two

so... yeah....

coming in from left field here... just would like to say that if the current naming is "confusing" then doing a rename now, vs explaining things to people for years might be worth it.

I like "dist repo"/"distribution repo". It's very clear and allows the wide range of functionality to fall neatly under a single umbrella term.

4 new commits added

  • last bit of renaming
  • rename files
  • more renaming
  • first stab at renaming signed repos to dist repos
7 years ago

Renaming wasn't too bad. Sed went a long way

Running it against build tag created broken link latest in repos/tag/latest as new repo was created in repos-dist/tag/60

$ koji dist-repo test-f25-build --skip-unsigned 81B46521
$ ls -l /mnt/koji/repos/test-f25-build
total 0
drwxr-xr-x 4 apache apache 34 Mar 20 13:09 56
lrwxrwxrwx 1 apache apache  2 Mar 27 12:00 latest -> 60
$ ls -l /mnt/koji/repos-dist/test-f25-build/
total 0
drwxr-xr-x 3 apache apache 20 Mar 27 11:59 60

Cosmetic: usage of deprecated py2 constructs (.has_key, print without parameters)

If taginfo['arches'] == None, split() will fail, so next line with message will not be reached.

Traceback (most recent call last):
File "/usr/bin/koji", line 7433, in <module>
rv = locals()[command].call(options, session, args)
File "/usr/bin/koji", line 7131, in handle_dist_repo
task_opts.arch = taginfo['arches'].split()
AttributeError: 'NoneType' object has no attribute 'split'

Would it make sense to extend 'Unsigned packages found' message with signatures found in these rpms? It could be helpful in debugging (if packages really are not signed, or they have beta key, etc.)

Would it make sense to extend message with signatures found in these rpms? It could be helpful in debugging (if packages really are not signed, or they have beta key, etc.)

That would be helpful, but we should probably get all of this out of the exception text. There could easily be 1000s of rpms in this list if the wrong args are given.

7 new commits added

  • log missing multilib files
  • saner error on missing multilib files
  • propagate the full name of the skip_unsigned option
  • log missing signatures even if allowing unsigned
  • log missing files and signatures for dist repos
  • handle case where tag archlist is None
  • cleanup: has_key and print
7 years ago

rebased

7 years ago

I would change this to --allow-unsigned-rpms, as we may also want to support signed repodata (as SUSE does), and so this could get confusing if we're not specific.

Like the other option, I'd clarify this. --skip-unsigned-rpms makes it more obvious.

@mikem Overall, it looks good to me, but the last bit about clarifying the options is because I fully expect that Koji will need to support signed repodata, too, since CentOS has been doing it since CentOS 7.3 and Fedora does it for the OpenH264 repository. The only reason we don't do it generally in Fedora is because it's not automatic.

Validated, that my previous issues are fixed.

What remains for me is non-clear usage of --allow-unsigned (and +1 for ngompas renaming) with differently signed rpms - maybe just extending help for the command, that it will skip rpm if there is no unsigned version?

And maybe some more clarification about sigkey's priority. It is not clear, that they are prioritized. Maybe it is not needed to be in command's help, but some section in docs?

If we later add support for signed metadata, that will work completely differently. Koji will have to generate that signature somehow during the process. There will be no corresponding --allow-X or --skip-X options for that. Koji will either sign the metadata or it won't. I'd expect something like --sign-metadata.

That said, the options are probably misnamed, but for a different reason. When a desired signature is missing, the task will fall back to the primary copy. While this is often the same as 'unsigned' that need not be the case. If an rpm is initially imported with a signature (with a manual import or perhaps a plugin that signs rpms at build time), then the primary copy will be signed. Unfortunately the data model doesn't track whether the primary copy is signed.

At any rate, I'm changing the options to --allow-missing-signatures and --skip-missing-signatures, to avoid the possibly incorrect term "unsigned".

Some fallout of this is that the code was dealing with these options all wrong (assuming that unsigned == primary copy), so I had to clean up the make_pkglist() quite a bit. Will update shortly.

4 new commits added

  • fix
  • rename some options for clarity
  • fix arg sanity check
  • deal with missing signatures more correctly
7 years ago

@mikem Could you rebase this against master?

rebased

7 years ago

Commit c41f6cc fixes this pull-request

Pull-Request has been merged by mikem@redhat.com

7 years ago