#54 Signed Repositories
Closed 8 years ago by mikem. Opened 8 years ago by jgreguske.
jgreguske/koji master  into  master

lmc: add --project to livemedia-creator
Dennis Gilmore • 8 years ago  
builder: Add LMC task handler
Jon Disnard • 8 years ago  
Optparsing for LMC
Jon Disnard • 8 years ago  
file modified
+341 -6
@@ -35,12 +35,14 @@ 

  from koji.daemon import incremental_upload, log_output, TaskManager, SCM

  from koji.tasks import ServerExit, ServerRestart, BaseTaskHandler, MultiPlatformTask

  from koji.util import parseStatus, isSuccess, dslice, dslice_ex

+ import multilib

  import os

  import pwd

  import grp

  import random

  import re

  import rpm

+ import rpmUtils.arch

  import shutil

  import signal

  import smtplib
@@ -59,6 +61,8 @@ 

  from optparse import OptionParser, SUPPRESS_HELP

  from StringIO import StringIO

  from yum import repoMDObject

+ import yum.packages

+ import yum.Errors

  

  #imports for LiveCD, LiveMedia, and Appliance handler

  image_enabled = False
@@ -3014,7 +3018,6 @@ 

          livemedia_log = '/tmp/lmc-logs/livemedia-out.log'

          resultdir = '/tmp/lmc'

  

- 

          # Common LMC command setup, needs extending

          cmd = ['/sbin/livemedia-creator',

                 '--ks', kskoji,
@@ -4721,8 +4724,8 @@ 

          if os.path.getsize(pkglist) == 0:

              pkglist = None

          self.create_local_repo(rinfo, arch, pkglist, groupdata, oldrepo)

- 

-         external_repos = self.session.getExternalRepoList(rinfo['tag_id'], event=rinfo['create_event'])

+         external_repos = self.session.getExternalRepoList(

+             rinfo['tag_id'], event=rinfo['create_event'])

          if external_repos:

              self.merge_repos(external_repos, arch, groupdata)

          elif pkglist is None:
@@ -4735,10 +4738,9 @@ 

          for f in os.listdir(self.datadir):

              files.append(f)

              self.session.uploadWrapper('%s/%s' % (self.datadir, f), uploadpath, f)

- 

          return [uploadpath, files]

  

-     def create_local_repo(self, rinfo, arch, pkglist, groupdata, oldrepo):

+     def create_local_repo(self, rinfo, arch, pkglist, groupdata, oldrepo, drpms=False):

          koji.ensuredir(self.outdir)

          if self.options.use_createrepo_c:

              cmd = ['/usr/bin/createrepo_c']
@@ -4750,7 +4752,9 @@ 

          if os.path.isfile(groupdata):

              cmd.extend(['-g', groupdata])

          #attempt to recycle repodata from last repo

-         if pkglist and oldrepo and self.options.createrepo_update:

+         if pkglist and oldrepo and self.options.createrepo_update and not drpms:

+             # signed repos overload the use of "oldrepo", so the conditional

+             # explicitly make sure this does not get executed with that on

              oldpath = self.pathinfo.repo(oldrepo['id'], rinfo['tag_name'])

              olddatadir = '%s/%s/repodata' % (oldpath, arch)

              if not os.path.isdir(olddatadir):
@@ -4765,6 +4769,11 @@ 

                  cmd.append('--update')

                  if self.options.createrepo_skip_stat:

                      cmd.append('--skip-stat')

+         if drpms:

+             # generate delta-rpms

+             cmd.append('--deltas')

+             for repo in oldrepo:

+                 cmd.extend(['--oldpackagedirs', repo])

          # note: we can't easily use a cachedir because we do not have write

          # permission. The good news is that with --update we won't need to

          # be scanning many rpms.
@@ -4809,6 +4818,332 @@ 

              raise koji.GenericError, 'failed to merge repos: %s' \

                  % parseStatus(status, ' '.join(cmd))

  

+ 

+ class NewSignedRepoTask(BaseTaskHandler):

+     Methods = ['signedRepo']

+     _taskWeight = 0.1

+ 

+     def handler(self, tag, repo_id, keys, task_opts):

+         tinfo = self.session.getTag(tag, strict=True, event=task_opts['event'])

+         path = koji.pathinfo.signedrepo(repo_id, tinfo['name'])

+         if len(task_opts['arch']) == 0:

+              task_opts['arch'] = tinfo['arches'].split()

+         if len(task_opts['arch']) == 0:

+             raise koji.GenericError('No arches specified nor for the tag!')

+         subtasks = {}

+         # weed out subarchitectures

+         canonArches = set()

+         for arch in task_opts['arch']:

+             canonArches.add(koji.canonArch(arch))

+         arch32s = set()

+         for arch in canonArches:

+             if not rpmUtils.arch.isMultiLibArch(arch):

+                 arch32s.add(arch)

+         for arch in arch32s:

+             # we do 32-bit multilib arches first so the 64-bit ones can

+             # get a task ID and wait for them to complete

+             arglist = [tag, repo_id, arch, keys, task_opts]

+             subtasks[arch] = self.session.host.subtask(

+                 method='createsignedrepo', arglist=arglist, label=arch,

+                 parent=self.id, arch='noarch')

+         if len(subtasks) > 0 and task_opts['multilib']:

+             results = self.wait(subtasks.values(), all=True, failany=True)

+             for arch in arch32s:

+                 # move the 32-bit task output to the final resting place

+                 # so the 64-bit arches can use it for multilib

+                 upload, files, keypaths = results[subtasks[arch]]

+                 self.session.host.signedRepoMove(

+                     repo_id, upload, files, arch, keypaths)

+         for arch in canonArches:

+             # do the other arches

+             if arch not in arch32s:

+                 arglist = [tag, repo_id, arch, keys, task_opts]

+                 subtasks[arch] = self.session.host.subtask(

+                     method='createsignedrepo', arglist=arglist, label=arch,

+                     parent=self.id, arch='noarch')

+         # wait for 64-bit subtasks to finish

+         data = {}

+         results = self.wait(subtasks.values(), all=True, failany=True)

+         for (arch, task_id) in subtasks.iteritems():

+             data[arch] = results[task_id]

+             self.logger.debug("DEBUG: %r : %r " % (arch, data[arch]))

+             if task_opts['multilib']:

+                 # we moved the 32-bit results before, do the 64-bit

+                 if arch not in arch32s:

+                     upload, files, keypaths = results[subtasks[arch]]

+                     self.session.host.signedRepoMove(

+                         repo_id, upload, files, arch, keypaths)

+             else:

+                 upload, files, keypaths = results[subtasks[arch]]

+                 self.session.host.signedRepoMove(

+                     repo_id, upload, files, arch, keypaths)

+         self.session.host.repoDone(repo_id, data, expire=False, signed=True)

+         return 'Signed repository #%s successfully generated' % repo_id

+ 

+ 

+ class createSignedRepoTask(CreaterepoTask):

+     Methods = ['createsignedrepo']

+     _taskWeight = 1.5

+ 

+     archmap = {'s390x': 's390', 'ppc64': 'ppc', 'x86_64': 'i686'}

+     compat = {"i386": ("athlon", "i686", "i586", "i486", "i386", "noarch"),

+           "x86_64": ("amd64", "ia32e", "x86_64", "noarch"),

+           "ia64": ("ia64", "noarch"),

+           "ppc": ("ppc", "noarch"),

+           "ppc64": ("ppc64p7", "ppc64pseries", "ppc64iseries", "ppc64", "noarch"),

+           "ppc64le": ("ppc64le", "noarch"),

+           "s390": ("s390", "noarch"),

+           "s390x": ("s390x",  "noarch"),

+           "sparc": ("sparcv9v", "sparcv9", "sparcv8", "sparc", "noarch"),

+           "sparc64": ("sparc64v", "sparc64", "noarch"),

+           "alpha": ("alphaev6", "alphaev56", "alphaev5", "alpha", "noarch"),

+           "arm": ("arm", "armv4l", "armv4tl", "armv5tel", "armv5tejl", "armv6l", "armv7l", "noarch"),

+           "armhfp": ("armv7hl", "armv7hnl", "noarch"),

+           "aarch64": ("aarch64", "noarch"),

+           "src": ("src",)

+           }

+ 

+     biarch = {"ppc": "ppc64", "x86_64": "i386", "sparc":

+           "sparc64", "s390x": "s390", "ppc64": "ppc"}

+ 

+     def handler(self, tag, repo_id, arch, keys, opts):

+         #arch is the arch of the repo, not the task

+         self.rinfo = self.session.repoInfo(repo_id, strict=True)

+         if self.rinfo['state'] != koji.REPO_INIT:

+             raise koji.GenericError, "Repo %(id)s not in INIT state (got %(state)s)" % self.rinfo

+         self.repo_id = self.rinfo['id']

+         self.pathinfo = koji.PathInfo(self.options.topdir)

+         groupdata = os.path.join(

+             self.pathinfo.signedrepo(repo_id, self.rinfo['tag_name']),

+             'groups', 'comps.xml')

+         #set up our output dir

+         self.repodir = '%s/repo' % self.workdir

+         koji.ensuredir(self.repodir)

+         self.outdir = self.repodir # workaround create_local_repo use

+         self.datadir = '%s/repodata' % self.repodir

+         self.keypaths = {}

+         if len(opts['delta']) > 0:

+             for path in opts['delta']:

+                 if not os.path.exists(path):

+                     raise koji.GenericError(

+                         'drpm path %s does not exist!' % path)

+         self.uploadpath = self.getUploadDir()

+         self.pkglist = self.make_pkglist(tag, arch, keys, opts)

+         if opts['multilib'] and rpmUtils.arch.isMultiLibArch(arch):

+             self.do_multilib(arch, self.archmap[arch], opts['multilib'])

+         self.logger.debug('package list is %s' % self.pkglist)

+         self.session.uploadWrapper(self.pkglist, self.uploadpath,

+             os.path.basename(self.pkglist))

+         if os.path.getsize(self.pkglist) == 0:

+             self.pkglist = None

+         if len(opts['delta']) > 0:

+             do_drpms = True

+         else:

+             do_drpms = False

+         self.create_local_repo(self.rinfo, arch, self.pkglist, groupdata,

+             opts['delta'], drpms=do_drpms)

+         if self.pkglist is None:

+             fo = file(os.path.join(self.datadir, "EMPTY_REPO"), 'w')

+             fo.write("This repo is empty because its tag has no content for this arch\n")

+             fo.close()

+         files = ['pkglist']

+         for f in os.listdir(self.datadir):

+             files.append(f)

+             self.session.uploadWrapper('%s/%s' % (self.datadir, f),

+                 self.uploadpath, f)

+         if opts['delta']:

+             ddir = os.path.join(self.repodir, 'drpms')

+             for f in os.listdir(ddir):

+                 files.append(f)

+                 self.session.uploadWrapper('%s/%s' % (ddir, f),

+                     self.uploadpath, f)

+         return [self.uploadpath, files, self.keypaths]

+ 

+     def do_multilib(self, arch, ml_arch, conf):

+         self.repo_id = self.rinfo['id']

+         pathinfo = koji.PathInfo(self.options.topdir)

+         repodir = pathinfo.signedrepo(self.rinfo['id'], self.rinfo['tag_name'])

+         mldir = os.path.join(repodir, koji.canonArch(ml_arch))

+         ml_true = set() # multilib packages we need to include before depsolve

+         ml_conf = os.path.join(self.pathinfo.work(), conf)

+ 

+         # step 1: figure out which packages are multlib (should already exist)

+         mlm = multilib.DevelMultilibMethod(ml_conf)

+         fs_missing = set()

+         with open(self.pkglist) as pkglist:

+             for pkg in pkglist:

+                 ppath = os.path.join(self.repodir, pkg.strip())

+                 po = yum.packages.YumLocalPackage(filename=ppath)

+                 if mlm.select(po) and self.archmap.has_key(arch):

+                     # we need a multilib package to be included

+                     # we assume the same signature level is available

+                     # XXX: what is a subarchitecture is the right answer?

+                     pl_path = pkg.replace(arch, self.archmap[arch]).strip()

+                     # assume this exists in the task results for the ml arch

+                     real_path = os.path.join(mldir, pl_path)

+                     ml_true.add(real_path)

+                     if not os.path.exists(real_path):

+                         self.logger.error('%s (multilib) is not on the filesystem' % real_path)

+                         fs_missing.add(real_path)

+ 

+         # step 2: set up architectures for yum configuration

+         self.logger.info("Resolving multilib for %s using method devel" % arch)

+         yumbase = yum.YumBase()

+         yumbase.verbose_logger.setLevel(logging.ERROR)

+         yumdir = os.path.join(self.workdir, 'yum')

+         # TODO: unwind this arch mess

+         archlist = (arch, 'noarch')

+         transaction_arch = arch

+         archlist = archlist + self.compat[self.biarch[arch]]

+         best_compat = self.compat[self.biarch[arch]][0]

+         if rpmUtils.arch.archDifference(best_compat, arch) > 0:

+             transaction_arch = best_compat

+         if hasattr(rpmUtils.arch, 'ArchStorage'):

+             yumbase.preconf.arch = transaction_arch

+         else:

+             rpmUtils.arch.canonArch = transaction_arch

+ 

+         yconfig = """

+ [main]

+ debuglevel=2

+ pkgpolicy=newest

+ exactarch=1

+ gpgcheck=0

+ reposdir=/dev/null

+ cachedir=/yumcache

+ installroot=%s

+ logfile=/yum.log

+ 

+ [koji-%s]

+ name=koji multilib task

+ baseurl=file://%s

+ enabled=1

+ 

+ """ % (yumdir, self.id, mldir)

+         os.makedirs(os.path.join(yumdir, "yumcache"))

+         os.makedirs(os.path.join(yumdir, 'var/lib/rpm'))

+ 

+         # step 3: proceed with yum config and set up

+         yconfig_path = os.path.join(yumdir, 'yum.conf-koji-%s' % arch)

+         f = open(yconfig_path, 'w')

+         f.write(yconfig)

+         f.close()

+         self.session.uploadWrapper(yconfig_path, self.uploadpath,

+             os.path.basename(yconfig_path))

+         yumbase.doConfigSetup(fn=yconfig_path)

+         yumbase.conf.cache = 0

+         yumbase.doRepoSetup()

+         yumbase.doTsSetup()

+         yumbase.doRpmDBSetup()

+         # we trust Koji's files, so skip verifying sigs and digests

+         yumbase.ts.pushVSFlags(

+             (rpm._RPMVSF_NOSIGNATURES | rpm._RPMVSF_NODIGESTS))

+         yumbase.doSackSetup(archlist=archlist, thisrepo='koji-%s' % arch)

+         yumbase.doSackFilelistPopulate()

+         for pkg in ml_true:

+             # TODO: store packages by first letter

+             # ppath = os.path.join(pkgdir, pkg.name[0].lower(), pname)

+             po = yum.packages.YumLocalPackage(filename=pkg)

+             yumbase.tsInfo.addInstall(po)

+ 

+         # step 4: execute yum transaction to get dependencies

+         self.logger.info("Resolving depenencies for arch %s" % arch)

+         rc, errors = yumbase.resolveDeps()

+         ml_needed = set()

+         for f in yumbase.tsInfo.getMembers():

+             bnp = os.path.basename(f.po.localPkg())

+             dep_path = os.path.join(mldir, bnp[0].lower(), bnp)

+             ml_needed.add(dep_path)

+             self.logger.debug("added %s" % dep_path)

+             if not os.path.exists(dep_path):

+                 self.logger.error('%s (multilib dep) not on filesystem' % dep_path)

+                 fs_missing.add(dep_path)

+         self.logger.info('yum return code: %s' % rc)

+         if not rc:

+             self.logger.error('yum depsolve was unsuccessful')

+             raise koji.GenericError(errors)

+         if len(fs_missing) > 0:

+             raise koji.GenericError('multilib packages missing:\n' +

+                 '\n'.join(fs_missing))

+ 

+         # step 5: add dependencies to our package list

+         pkgwriter = open(self.pkglist, 'a')

+         for ml_pkg in ml_needed:

+             bnp = os.path.basename(ml_pkg)

+             bnplet = bnp[0].lower()

+             pkgwriter.write(bnplet + '/' + bnp + '\n')

+             koji.ensuredir(os.path.join(self.repodir, bnplet))

+             os.symlink(ml_pkg, os.path.join(self.repodir, bnplet, bnp))

+             self.keypaths[bnp] = ml_pkg

+ 

+ 

+     def make_pkglist(self, tag_id, arch, keys, opts):

+ 

+         # Need to pass event_id because even though this is a single trans,

+         # it is possible to see the results of other committed transactions

+         rpms = []

+         builddirs = {}

+         for a in self.compat[arch] + ('noarch',):

+             rpm_iter, builds = self.session.listTaggedRPMS(tag_id,

+                 event=opts['event'], arch=a, latest=opts['latest'],

+                 inherit=opts['inherit'], rpmsigs=True)

+             for build in builds:

+                 builddirs[build['id']] = self.pathinfo.build(build)

+             rpms += list(rpm_iter)

+         #get build dirs

+         need = set(['%(name)s-%(version)s-%(release)s.%(arch)s.rpm' % r for r in rpms])

+         #generate pkglist files

+         pkgfile = os.path.join(self.repodir, 'pkglist')

+         pkglist = file(pkgfile, 'w')

+         preferred = {}

+         if opts['unsigned']:

+             keys.append('') # make unsigned rpms the least preferred

+         for rpminfo in rpms:

+             if rpminfo['sigkey'] == '' and not opts['unsigned']:

+                 # skip, this is the unsigned rpminfo

+                 continue

+             if rpminfo['sigkey'] not in keys:

+                 # skip, not a key we are looking for

+                 continue

+             idx = keys.index(rpminfo['sigkey'])

+             if preferred.has_key(rpminfo['id']):

+                 if keys.index(preferred[rpminfo['id']]['sigkey']) <= idx:

+                     # key for this is not as preferable as what has been seen

+                     continue

+             preferred[rpminfo['id']] = rpminfo

+         seen = set()

+         fs_missing = set()

+         for rpminfo in preferred.values():

+             if rpminfo['sigkey'] == '':

+                 # we're taking an unsigned rpm (--allow-unsigned)

+                 pkgpath = '%s/%s' % (builddirs[rpminfo['build_id']],

+                     self.pathinfo.rpm(rpminfo))

+             else:

+                 pkgpath = '%s/%s' % (builddirs[rpminfo['build_id']],

+                     self.pathinfo.signed(rpminfo, rpminfo['sigkey']))

+             seen.add(os.path.basename(pkgpath))

+             if not os.path.exists(pkgpath):

+                 fs_missing.add(pkgpath)

+             else:

+                 bnp = os.path.basename(pkgpath)

+                 bnplet = bnp[0].lower()

+                 pkglist.write(bnplet + '/' + bnp + '\n')

+                 koji.ensuredir(os.path.join(self.repodir, bnplet))

+                 self.keypaths[bnp] = pkgpath

+                 os.symlink(pkgpath, os.path.join(self.repodir, bnplet, bnp))

+         pkglist.close()

+         if len(fs_missing) > 0:

+             raise koji.GenericError('Packages missing from the filesystem:\n' +

+                 '\n'.join(fs_missing))

+         if not opts['skip']:

+             missing = list(need - seen)

+             if len(missing) != 0:

+                 missing.sort()

+                 raise koji.GenericError('Unsigned packages found: ' +

+                     '\n'.join(missing))

+         return pkgfile

+ 

+ 

  class WaitrepoTask(BaseTaskHandler):

  

      Methods = ['waitrepo']

file modified
+100
@@ -6777,6 +6777,106 @@ 

          session.logout()

          return watch_tasks(session, [task_id], quiet=options.quiet)

  

+ def handle_signed_repo(options, session, args):

+     """create a yum repo of GPG signed RPMs"""

+     usage = _("usage: %prog signed-repo [options] tag keyID [keyID...]")

+     usage += _("\n(Specify the --help option for a list of other options)")

+     parser = OptionParser(usage=usage)

+     parser.add_option('--allow-unsigned', action='store_true', default=False,

+         help=_('Use unsigned RPMs if none are available with the right key'))

+     parser.add_option("--arch", action='append', default=[],

+         help=_("Indicate an architecture to consider. The default is all " +

+             "architectures associated with the given tag. This option may " +

+             "be specified multiple times."))

+     parser.add_option('--comps', help='Include a comps file in the repodata')

+     parser.add_option('--delta-rpms', metavar='PATH',default=[],

+         action='append',

+         help=_('Create delta-rpms. PATH points to (older) rpms to generate against. May be specified multiple times. These have to be reachable by the builder too, so the path needs to reach shared storage.'))

+     parser.add_option('--event', type='int',

+         help=_('create a signed repository based on a Brew event'))

+     parser.add_option('--non-latest', dest='latest', default=True,

+         action='store_false', help='Include older builds, not just the latest')

+     parser.add_option('--multilib', default=None,

+         help=_('Include multilib packages in the repository using a config'))

+     parser.add_option("--noinherit", action='store_true', default=False,

+         help=_('Do not consider tag inheritance'))

+     parser.add_option("--nowait", action='store_true', default=False,

+         help=_('Do not wait for the task to complete'))

+     parser.add_option('--skip-unsigned', action='store_true', default=False,

+         help=_('Skip RPMs not signed with the desired key(s)'))

+     task_opts, args = parser.parse_args(args)

+     if len(args) < 2:

+         parser.error(_('You must provide a tag and 1 or more GPG key IDs'))

+     if task_opts.allow_unsigned and task_opts.skip_unsigned:

+         parser.error(_('allow_signed and skip_unsigned are mutually exclusive'))

+     activate_session(session)

+     stuffdir = _unique_path('cli-signed')

+     if task_opts.comps:

+         if not os.path.exists(task_opts.comps):

+             parser.error(_('could not find %s' % task_opts.comps))

+         session.uploadWrapper(task_opts.comps, stuffdir,

+             callback=_progress_callback)

+         print

+         task_opts.comps = os.path.join(stuffdir,

+             os.path.basename(task_opts.comps))

+     if len(task_opts.delta_rpms) > 0:

+         for path in task_opts.delta_rpms:

+             if not os.path.exists(path):

+                 print _("Warning: %s is not reachable locally. If this" % path)

+                 print _("  host does not have access to Koji's shared storage")

+                 print _("  this can be ignored.")

+     tag = args[0]

+     keys = args[1:]

+     taginfo = session.getTag(tag)

+     if not taginfo:

+         parser.error(_('unknown tag %s' % tag))

+     if len(task_opts.arch) == 0:

+         task_opts.arch = taginfo['arches']

+         if task_opts.arch == None:

+             parser.error(_('No arches given and no arches associated with tag'))

+     else:

+         for a in task_opts.arch:

+             if not taginfo['arches'] or a not in taginfo['arches']:

+                 print _('Warning: %s is not in the list of tag arches' % a)

+     if task_opts.multilib:

+         if not os.path.exists(task_opts.multilib):

+             parser.error(_('could not find %s' % task_opts.multilib))

+         if 'x86_64' in task_opts.arch and not 'i686' in task_opts.arch:

+             parser.error(_('The multilib arch (i686) must be included'))

+         if 's390x' in task_opts.arch and not 's390' in task_opts.arch:

+             parser.error(_('The multilib arch (s390) must be included'))

+         if 'ppc64' in task_opts.arch and not 'ppc' in task_opts.arch:

+             parser.error(_('The multilib arch (ppc) must be included'))

+         session.uploadWrapper(task_opts.multilib, stuffdir,

+             callback=_progress_callback)

+         task_opts.multilib = os.path.join(stuffdir,

+             os.path.basename(task_opts.multilib))

+         print

+     try:

+         task_opts.arch.remove('noarch') # handled specifically

+         task_opts.arch.remove('src') # ditto

+     except ValueError:

+         pass

+     opts = {

+         'arch': task_opts.arch,

+         'comps': task_opts.comps,

+         'delta': task_opts.delta_rpms,

+         'event': task_opts.event,

+         'inherit': not task_opts.noinherit,

+         'latest': task_opts.latest,

+         'multilib': task_opts.multilib,

+         'skip': task_opts.skip_unsigned,

+         'unsigned': task_opts.allow_unsigned

+     }

+     task_id = session.signedRepo(tag, keys, **opts)

+     print "Creating signed repo for tag " + tag

+     if _running_in_bg() or task_opts.nowait:

+         return

+     else:

+         session.logout()

+         return watch_tasks(session, [task_id], quiet=options.quiet)

+ 

+ 

  def anon_handle_search(options, session, args):

      "[search] Search the system"

      usage = _("usage: %prog search [options] search_type pattern")

file modified
+3 -1
@@ -51,6 +51,7 @@ 

  INSERT INTO permissions (name) VALUES ('admin');

  INSERT INTO permissions (name) VALUES ('build');

  INSERT INTO permissions (name) VALUES ('repo');

+ INSERT INTO permissions (name) VALUES ('image');

  INSERT INTO permissions (name) VALUES ('livecd');

  INSERT INTO permissions (name) VALUES ('maven-import');

  INSERT INTO permissions (name) VALUES ('win-import');
@@ -388,7 +389,8 @@ 

  	id SERIAL NOT NULL PRIMARY KEY,

  	create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),

  	tag_id INTEGER NOT NULL REFERENCES tag(id),

- 	state INTEGER

+ 	state INTEGER,

+ 	signed BOOLEAN DEFAULT 'false'

  ) WITHOUT OIDS;

  

  -- external yum repos

file modified
+112 -19
@@ -2334,6 +2334,38 @@ 

      mdfile.close()

      _generate_maven_metadata(destdir)

  

+ def signed_repo_init(tag, keys, task_opts):

+     """Create a new repo entry in the INIT state, return full repo data"""

+     logger = logging.getLogger("koji.hub.signed_repo_init")

+     state = koji.REPO_INIT

+     tinfo = get_tag(tag, strict=True)

+     koji.plugin.run_callbacks('preRepoInit', tag=tinfo, keys=keys, repo_id=None)

+     tag_id = tinfo['id']

+     repo_id = _singleValue("SELECT nextval('repo_id_seq')")

+     repo_arches = task_opts['arch']

+     arches = set([])

+     for arch in repo_arches:

+         arches.add(koji.canonArch(arch))

+     if not task_opts['event']:

+         task_opts['event'] = _singleValue("SELECT get_event()")

+     insert = InsertProcessor('repo')

+     insert.set(id=repo_id, create_event=task_opts['event'], tag_id=tag_id,

+         state=state, signed=True)

+     insert.execute()

+     repodir = koji.pathinfo.signedrepo(repo_id, tinfo['name'])

+     for arch in arches:

+         koji.ensuredir(os.path.join(repodir, arch))

+     # handle comps

+     if task_opts['comps']:

+         groupsdir = os.path.join(repodir, 'groups')

+         koji.ensuredir(groupsdir)

+         shutil.copyfile(os.path.join(koji.pathinfo.work(),

+             task_opts['comps']), groupsdir + '/comps.xml')

+     koji.plugin.run_callbacks('postRepoInit', tag=tinfo,

+         event=task_opts['event'], repo_id=repo_id)

+     return repo_id, task_opts['event']

+ 

+ 

  def repo_set_state(repo_id, state, check=True):

      """Set repo state"""

      if check:
@@ -2355,6 +2387,7 @@ 

          ('EXTRACT(EPOCH FROM events.time)','create_ts'),

          ('repo.tag_id', 'tag_id'),

          ('tag.name', 'tag_name'),

+         ('repo.signed', 'signed'),

      )

      q = """SELECT %s FROM repo

      JOIN tag ON tag_id=tag.id
@@ -9591,16 +9624,20 @@ 

                      taginfo['extra'][key] = ancestor['extra'][key]

          return taginfo

  

-     def getRepo(self,tag,state=None,event=None):

-         if isinstance(tag,int):

+     def getRepo(self, tag, state=None, event=None, signed=False):

+         if isinstance(tag, int):

              id = tag

          else:

-             id = get_tag_id(tag,strict=True)

+             id = get_tag_id(tag, strict=True)

  

-         fields = ['repo.id', 'repo.state', 'repo.create_event', 'events.time', 'EXTRACT(EPOCH FROM events.time)']

-         aliases = ['id', 'state', 'create_event', 'creation_time', 'create_ts']

+         fields = ['repo.id', 'repo.state', 'repo.create_event', 'events.time', 'EXTRACT(EPOCH FROM events.time)', 'repo.signed']

+         aliases = ['id', 'state', 'create_event', 'creation_time', 'create_ts', 'signed']

          joins = ['events ON repo.create_event = events.id']

          clauses = ['repo.tag_id = %(id)i']

+         if signed:

+             clauses.append('repo.signed is true')

+         else:

+             clauses.append('repo.signed is false')

          if event:

              # the repo table doesn't have all the fields of a _config table, just create_event

              clauses.append('create_event <= %(event)i')
@@ -9618,6 +9655,13 @@ 

      repoInfo = staticmethod(repo_info)

      getActiveRepos = staticmethod(get_active_repos)

  

+     def signedRepo(self, tag, keys, **task_opts):

+         """Create a signed-repo task. returns task id"""

+         context.session.assertPerm('signed-repo')

+         repo_id, event_id = signed_repo_init(tag, keys, task_opts)

+         task_opts['event'] = event_id

+         return make_task('signedRepo', [tag, repo_id, keys, task_opts], priority=15, channel='createrepo')

+ 

      def newRepo(self, tag, event=None, src=False, debuginfo=False):

          """Create a newRepo task. returns task id"""

          if context.session.hasPerm('regen-repo'):
@@ -11696,12 +11740,13 @@ 

              else:

                  os.link(filepath, dst)

  

-     def repoDone(self, repo_id, data, expire=False):

+     def repoDone(self, repo_id, data, expire=False, signed=False):

          """Move repo data into place, mark as ready, and expire earlier repos

  

          repo_id: the id of the repo

          data: a dictionary of the form { arch: (uploadpath, files), ...}

          expire(optional): if set to true, mark the repo expired immediately*

+         signed(optional): if true, hardlink signed rpms in the final directory

  

          * This is used when a repo from an older event is generated

          """
@@ -11713,19 +11758,23 @@ 

              raise koji.GenericError, "Repo %(id)s not in INIT state (got %(state)s)" % rinfo

          repodir = koji.pathinfo.repo(repo_id, rinfo['tag_name'])

          workdir = koji.pathinfo.work()

-         for arch, (uploadpath, files) in data.iteritems():

-             archdir = "%s/%s" % (repodir, arch)

-             if not os.path.isdir(archdir):

-                 raise koji.GenericError, "Repo arch directory missing: %s" % archdir

-             datadir = "%s/repodata" % archdir

-             koji.ensuredir(datadir)

-             for fn in files:

-                 src = "%s/%s/%s" % (workdir,uploadpath, fn)

-                 dst = "%s/%s" % (datadir, fn)

-                 if not os.path.exists(src):

-                     raise koji.GenericError, "uploaded file missing: %s" % src

-                 os.link(src, dst)

-                 os.unlink(src)

+         if not signed:

+             for arch, (uploadpath, files) in data.iteritems():

+                 archdir = "%s/%s" % (repodir, koji.canonArch(arch))

+                 if not os.path.isdir(archdir):

+                     raise koji.GenericError, "Repo arch directory missing: %s" % archdir

+                 datadir = "%s/repodata" % archdir

+                 koji.ensuredir(datadir)

+                 for fn in files:

+                     src = "%s/%s/%s" % (workdir,uploadpath, fn)

+                     if fn.endswith('pkglist'):

+                         dst = '%s/%s' % (archdir, fn)

+                     else:

+                         dst = "%s/%s" % (datadir, fn)

+                     if not os.path.exists(src):

+                         raise koji.GenericError, "uploaded file missing: %s" % src

+                     os.link(src, dst)

+                     os.unlink(src)

          if expire:

              repo_expire(repo_id)

              koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire)
@@ -11745,6 +11794,50 @@ 

              log_error("Unable to create latest link for repo: %s" % repodir)

          koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire)

  

+     def signedRepoMove(self, repo_id, uploadpath, files, arch, fullpaths):

+         """

+         Very similar to repoDone, except only the uploads are completed.

+         fullpaths is a dict like so: rpm file name -> sig"""

+         workdir = koji.pathinfo.work()

+         rinfo = repo_info(repo_id, strict=True)

+         repodir = koji.pathinfo.signedrepo(repo_id, rinfo['tag_name'])

+         archdir = "%s/%s" % (repodir, koji.canonArch(arch))

+         if not os.path.isdir(archdir):

+             raise koji.GenericError, "Repo arch directory missing: %s" % archdir

+         datadir = "%s/repodata" % archdir

+         koji.ensuredir(datadir)

+         for fn in files:

+             src = "%s/%s/%s" % (workdir, uploadpath, fn)

+             if fn.endswith('.drpm'):

+                 koji.ensuredir(os.path.join(archdir, 'drpms'))

+                 dst = "%s/drpms/%s" % (archdir, fn)

+             elif fn.endswith('pkglist'):

+                 dst = '%s/%s' % (archdir, fn)

+             else:

+                 dst = "%s/%s" % (datadir, fn)

+             if not os.path.exists(src):

+                 raise koji.GenericError, "uploaded file missing: %s" % src

+             os.link(src, dst)

+             if fn.endswith('pkglist'):

+                 # hardlink the found rpms into the final repodir

+                 # TODO: properly consider split-volume functionality

+                 with open(src) as pkgfile:

+                     for pkg in pkgfile:

+                         pkg = os.path.basename(pkg.strip())

+                         rpmpath = fullpaths[pkg]

+                         bnp = os.path.basename(rpmpath)

+                         bnplet = bnp[0].lower()

+                         koji.ensuredir(os.path.join(archdir, bnplet))

+                         try:

+                             os.link(rpmpath, os.path.join(archdir, bnplet, bnp))

+                         except OSError, ose:

+                             if ose.errno == 18:

+                                 shutil.copy2(

+                                     rpmpath, os.path.join(archdir, bnplet, bnp))

+                             else:

+                                 raise ose

+             os.unlink(src)

+ 

      def isEnabled(self):

          host = Host()

          host.verify()

file modified
+7 -7
@@ -204,13 +204,6 @@ 

  %dir %{_sysconfdir}/koji-hub/plugins

  %config(noreplace) %{_sysconfdir}/koji-hub/plugins/*.conf

  

- %files builder-plugins

- %defattr(-,root,root)

- %dir %{_sysconfdir}/kojid/plugins

- %config(noreplace) %{_sysconfdir}/kojid/plugins/*.conf

- %dir %{_prefix}/lib/koji-builder-plugins

- %{_prefix}/lib/koji-builder-plugins/*.py*

- 

  %files utils

  %defattr(-,root,root)

  %{_sbindir}/kojira
@@ -252,6 +245,13 @@ 

  %config(noreplace) %{_sysconfdir}/kojid/kojid.conf

  %attr(-,kojibuilder,kojibuilder) %{_sysconfdir}/mock/koji

  

+ %files builder-plugins

+ %defattr(-,root,root)

+ %dir %{_sysconfdir}/kojid/plugins

+ %config(noreplace) %{_sysconfdir}/kojid/plugins/*.conf

+ %dir %{_prefix}/lib/koji-builder-plugins

+ %{_prefix}/lib/koji-builder-plugins/*.py*

+ 

  %pre builder

  /usr/sbin/useradd -r -s /bin/bash -G mock -d /builddir -M kojibuilder 2>/dev/null ||:

  

file modified
+11 -2
@@ -1694,6 +1694,10 @@ 

          """Return the directory where a repo belongs"""

          return self.topdir + ("/repos/%(tag_str)s/%(repo_id)s" % locals())

  

+     def signedrepo(self, repo_id, tag):

+         """Return the directory with a signed repo lives"""

+         return os.path.join(self.topdir, 'repos', 'signed', tag, str(repo_id))

+ 

      def repocache(self,tag_str):

          """Return the directory where a repo belongs"""

          return self.topdir + ("/repos/%(tag_str)s/cache" % locals())
@@ -2519,7 +2523,7 @@ 

          if taskInfo.has_key('request'):

              build = taskInfo['request'][1]

              extra = buildLabel(build)

-     elif method == 'newRepo':

+     elif method in ('newRepo', 'signedRepo'):

          if taskInfo.has_key('request'):

              extra = str(taskInfo['request'][0])

      elif method in ('tagBuild', 'tagNotification'):
@@ -2530,10 +2534,15 @@ 

          if taskInfo.has_key('request'):

              tagInfo = taskInfo['request'][0]

              extra = tagInfo['name']

-     elif method == 'createrepo':

+     elif method in ('createrepo'):

          if taskInfo.has_key('request'):

              arch = taskInfo['request'][1]

              extra = arch

+     elif method in ('createsignedrepo'):

+         if taskInfo.has_key('request'):

+             repo_id = taskInfo['request'][1]

+             arch = taskInfo['request'][2]

+             extra = '%s, %s' % (repo_id, arch)

      elif method == 'dependantTask':

          if taskInfo.has_key('request'):

              extra = ', '.join([subtask[0] for subtask in taskInfo['request'][1]])

file modified
+28 -9
@@ -134,7 +134,11 @@ 

                               (self.tag_id, self.repo_id))

              return False

          tag_name = tag_info['name']

-         path = pathinfo.repo(self.repo_id, tag_name)

+         rinfo = self.session.repoInfo(self.repo_id, strict=True)

+         if rinfo['signed']:

+             path = pathinfo.signedrepo(self.repo_id, tag_name)

+         else:

+             path = pathinfo.repo(self.repo_id, tag_name)

          try:

              #also check dir age. We do this because a repo can be created from an older event

              #and should not be removed based solely on that event's timestamp.
@@ -333,40 +337,51 @@ 

          finally:

              session.logout()

  

-     def pruneLocalRepos(self):

+     def pruneLocalRepos(self, topdir, timername):

          """Scan filesystem for repos and remove any deleted ones

  

          Also, warn about any oddities"""

          if self.delete_pids:

              #skip

              return

-         self.logger.debug("Scanning filesystem for repos")

-         topdir = "%s/repos" % pathinfo.topdir

+         self.logger.debug("Scanning %s for repos" % topdir)

+         self.logger.debug('max age allowed: %s seconds (from %s)' %

+             (getattr(self.options, timername), timername))

          for tag in os.listdir(topdir):

              tagdir = "%s/%s" % (topdir, tag)

              if not os.path.isdir(tagdir):

+                 self.logger.debug("%s is not a directory, skipping" % tagdir)

                  continue

              for repo_id in os.listdir(tagdir):

                  try:

                      repo_id = int(repo_id)

                  except ValueError:

+                     self.logger.debug("%s not an int, skipping" % tagdir)

+                     # This condition is how signed repos are not removed by

+                     # the first call to this method. Although, if someone has

+                     # tags that are just integers, that could be a problem.

                      continue

                  repodir = "%s/%s" % (tagdir, repo_id)

                  if not os.path.isdir(repodir):

+                     self.logger.debug("%s not a directory, skipping" % repodir)

                      continue

                  if self.repos.has_key(repo_id):

                      #we're already managing it, no need to deal with it here

+                     self.logger.debug("seen %s already, skipping" % repodir)

                      continue

                  try:

                      dir_ts = os.stat(repodir).st_mtime

                  except OSError:

                      #just in case something deletes the repo out from under us

+                     self.logger.debug("%s deleted already?!" % repodir)

                      continue

                  rinfo = self.session.repoInfo(repo_id)

                  if rinfo is None:

                      if not self.options.ignore_stray_repos:

                          age = time.time() - dir_ts

-                         if age > self.options.deleted_repo_lifetime:

+                         self.logger.debug("did not expect %s; age: %s" %

+                             (repodir, age))

+                         if age > getattr(self.options, timername):

                              self.logger.info("Removing unexpected directory (no such repo): %s" % repodir)

                              self.rmtree(repodir)

                      continue
@@ -375,11 +390,11 @@ 

                      continue

                  if rinfo['state'] in (koji.REPO_DELETED, koji.REPO_PROBLEM):

                      age = time.time() - max(rinfo['create_ts'], dir_ts)

-                     if age > self.options.deleted_repo_lifetime:

+                     self.logger.debug("potential removal candidate: %s; age: %s" % (repodir, age))

+                     if age > getattr(self.options, timername):

                          #XXX should really be called expired_repo_lifetime

                          logger.info("Removing stray repo (state=%s): %s" % (koji.REPO_STATES[rinfo['state']], repodir))

                          self.rmtree(repodir)

-                         pass

  

      def tagUseStats(self, tag_id):

          stats = self.tag_use_stats.get(tag_id)
@@ -632,7 +647,9 @@ 

              repomgr.updateRepos()

              repomgr.checkQueue()

              repomgr.printState()

-             repomgr.pruneLocalRepos()

+             repodir = "%s/repos" % pathinfo.topdir

+             repomgr.pruneLocalRepos(repodir, 'deleted_repo_lifetime')

+             repomgr.pruneLocalRepos(repodir + '/signed', 'signed_repo_lifetime')

              if not curr_chk_thread.isAlive():

                  logger.error("Currency checker thread died. Restarting it.")

                  curr_chk_thread = start_currency_checker(session, repomgr)
@@ -726,6 +743,7 @@ 

                  'delete_batch_size' : 3,

                  'deleted_repo_lifetime': 7*24*3600,

                  #XXX should really be called expired_repo_lifetime

+                 'signed_repo_lifetime': 7*24*3600,

                  'sleeptime' : 15,

                  'cert': '/etc/kojira/client.crt',

                  'ca': '',  # FIXME: unused, remove in next major release
@@ -734,7 +752,8 @@ 

      if config.has_section(section):

          int_opts = ('deleted_repo_lifetime', 'max_repo_tasks', 'repo_tasks_limit',

                      'retry_interval', 'max_retries', 'offline_retry_interval',

-                     'max_delete_processes', 'max_repo_tasks_maven', 'delete_batch_size', )

+                     'max_delete_processes', 'max_repo_tasks_maven',

+                     'delete_batch_size', 'signed_repo_lifetime')

          str_opts = ('topdir', 'server', 'user', 'password', 'logfile', 'principal', 'keytab', 'krbservice',

                      'cert', 'ca', 'serverca', 'debuginfo_tags', 'source_tags')  # FIXME: remove ca here

          bool_opts = ('with_src','verbose','debug','ignore_stray_repos', 'offline_retry', 'krb_rdns')

file modified
+9
@@ -39,3 +39,12 @@ 

  

  ;certificate of the CA that issued the HTTP server certificate

  ;serverca = /etc/kojira/serverca.crt

+ 

+ ;how soon (in seconds) to clean up expired repositories. 1 week default

+ ;deleted_repo_lifetime = 604800

+ 

+ ;how soon (in seconds) to clean up signed repositories. 1 week default here too

+ ;signed_repo_lifetime = 604800

+ 

+ ;turn on debugging statements in the log

+ ;debug = false

file modified
+5
@@ -33,3 +33,8 @@ 

  # to hide from tasks listed on the front page. You might want to, for instance,

  # hide the activity of an account used for continuous integration.

  #HiddenUsers = 5372 1234

+ 

+ # Uncommenting this will show python tracebacks in the webUI, but they are the

+ # same as what you will see in apache's error_log.

+ # Not for production use

+ #PythonDebug = True

file modified
+5 -3
@@ -436,6 +436,8 @@ 

            'tagBuild',

            'newRepo',

            'createrepo',

+           'signedRepo',

+           'createsignedrepo',

            'buildNotification',

            'tagNotification',

            'dependantTask',
@@ -449,9 +451,9 @@ 

            'livemedia',

            'createLiveMedia']

  # Tasks that can exist without a parent

- _TOPLEVEL_TASKS = ['build', 'buildNotification', 'chainbuild', 'maven', 'chainmaven', 'wrapperRPM', 'winbuild', 'newRepo', 'tagBuild', 'tagNotification', 'waitrepo', 'livecd', 'appliance', 'image', 'livemedia']

+ _TOPLEVEL_TASKS = ['build', 'buildNotification', 'chainbuild', 'maven', 'chainmaven', 'wrapperRPM', 'winbuild', 'newRepo', 'signedRepo', 'tagBuild', 'tagNotification', 'waitrepo', 'livecd', 'appliance', 'image', 'livemedia']

  # Tasks that can have children

- _PARENT_TASKS = ['build', 'chainbuild', 'maven', 'chainmaven', 'winbuild', 'newRepo', 'wrapperRPM', 'livecd', 'appliance', 'image', 'livemedia']

+ _PARENT_TASKS = ['build', 'chainbuild', 'maven', 'chainmaven', 'winbuild', 'newRepo', 'signedRepo', 'wrapperRPM', 'livecd', 'appliance', 'image', 'livemedia']

  

  def tasks(environ, owner=None, state='active', view='tree', method='all', hostID=None, channelID=None, start=None, order='-id'):

      values = _initValues(environ, 'Tasks', 'tasks')
@@ -628,7 +630,7 @@ 

          build = server.getBuild(params[1])

          values['destTag'] = destTag

          values['build'] = build

-     elif task['method'] == 'newRepo':

+     elif task['method'] in ('newRepo', 'signedRepo', 'createsignedrepo'):

          tag = server.getTag(params[0])

          values['tag'] = tag

      elif task['method'] == 'tagNotification':

file modified
+16 -5
@@ -221,8 +221,13 @@ 

          #elif $task.method == 'newRepo'

          <strong>Tag:</strong> <a href="taginfo?tagID=$tag.id">$tag.name</a><br/>

          #if $len($params) > 1

-         $printOpts($params[1])

+           $printOpts($params[1])

          #end if

+         #elif $task.method == 'signedRepo'

+         <strong>Tag:</strong> <a href="taginfo?tagID=$tag.id">$tag.name</a><br/>

+         <strong>Repo ID:</strong> $params[1]<br/>

+         <strong>Keys:</strong> $printValue(0, $params[2])<br/>

+         $printOpts($params[3])

          #elif $task.method == 'prepRepo'

          <strong>Tag:</strong> <a href="taginfo?tagID=$params[0].id">$params[0].name</a>

          #elif $task.method == 'createrepo'
@@ -230,12 +235,18 @@ 

          <strong>Arch:</strong> $params[1]<br/>

          #set $oldrepo = $params[2]

          #if $oldrepo

-         <strong>Old Repo ID:</strong> $oldrepo.id<br/>

-         <strong>Old Repo Creation:</strong> $koji.formatTimeLong($oldrepo.creation_time)<br/>

+             <strong>Old Repo ID:</strong> $oldrepo.id<br/>

+             <strong>Old Repo Creation:</strong> $koji.formatTimeLong($oldrepo.creation_time)<br/>

          #end if

-         #if $len($params) > 3

-         <strong>External Repos:</strong> $printValue(None, [ext['external_repo_name'] for ext in $params[3]])<br/>

+         #if $len($params) > 4 and $params[4]

+             <strong>External Repos:</strong> $printValue(None, [ext['external_repo_name'] for ext in $params[3]])<br/>

          #end if

+         #elif $task.method == 'createsignedrepo'

+         <strong>Tag:</strong> <a href="taginfo?tagID=$tag.id">$tag.name</a><br/>

+         <strong>Repo ID:</strong> $params[1]<br/>

+         <strong>Arch:</strong> $printValue(0, $params[2])<br/>

+         <strong>Keys:</strong> $printValue(0, $params[3])<br/>

+         <strong>Options:</strong> $printMap($params[4], '&nbsp;&nbsp;&nbsp;&nbsp;')

          #elif $task.method == 'dependantTask'

          <strong>Dependant Tasks:</strong><br/>

          #for $dep in $deps

no initial comment

PR #3 that fixes channel use and copying if hard-linking is not possible

A note - @ausil is rebasing this and testing it out in Fedora stg. We can do code review after that work is submitted back.

A few questions and comments...

This PR adds new imports of yum lib. I know koji is guilty of importing yum elsewhere (for the mergerepos script that ext repos rely on, and for importing comps in the cli), but we may soon be forced to drop those. Not to say we can't use yum for this now, but I'd like to at least consider how much work the eventual port to another lib is going to be.

The data model implementation puts these new repos in the regular repo table. Should they really be there? I.e.

  • should they be eligible to be used to make buildroots?
  • do we expect them to continue to be defined the same way (create_event, tag_id)

More comments pending inline....

what's this doing here?
also, maybe add signed-repo perm?

The comparison to repoDone is somewhat confusing. This call only covers a single arch,
doesn't have the callbacks that repoDone does, and the calling task also calls repoDone.

No real behavior change here, but the comment is a bit confusing and suggests that
maybe we need some more straightforward logic

the default is the same as for regular repos? Does that fit the intended use case?

That was my intent, I'll correct it. I think I noticed 'image' was missing from long ago, so I added that and forgot about my original intent.

Other methods use:
host = Host()
host.verify()

Is that sufficient?

What's confusing? Are you looking for a change from me?

Having a separate timer for normal repos and signed repos is the important thing. I suspect policies for how long a signed repo will stay around will vary wildly from deployment to deployment.

rebased

8 years ago

The rebased branch only differs from the previous (apart from being rebased onto master) by the last two commits that seem to duplicate part of https://pagure.io/koji/pull-request/65
Since this is your master branch, I'm not sure if that was intentional.

Seems like accidental logic. The existing behavior was there to avoid clearing anything koji
didn't put there. Now it's accidentally serving a different purpose. It seems like this ought
to be more explicit. For example, we could skip 'signed' in the outer loop.
It also exposes the namespace overlap issue. repos/signed/1000 could be repo #1000 for
a tag named 'signed' or a directory of signed repos for a tag named 1000.
I wonder if we should consider a different path.
Perhaps repos/sometag/signed/NNN
Or for that matter, do we really need a different path?

2 new commits added

  • fixes from testing and upstream comments
  • fix builder-plugins spec file and add --non-latest
8 years ago

1 new commit added

  • make the src arch work in signed repos
8 years ago

rebased

8 years ago

Testing the current code the createrepo task appears to have the correct rpms for the arch. the final repo as written out had no rpms in it for armhfp and i386. the issues are partially fixed

1 new commit added

  • lowercase directories in signed repos
8 years ago

My understanding is the aforementioned problem was determined to be the result of a full filesystem in stage, so I have made no changes to address that. However, in IRC it was observed that capital letters were still being used in the repo paths (the single letter directories), so I've pushed a change to address that.

rebased

8 years ago

Rebased for the 3rd time >_>

1 new commit added

  • accidentally wiped out changes in conflict resolution
8 years ago

Is there any progress here?

Haven't seen any progress. Since upstream hasn't been involved in 3 months, I'm inclined to withdraw the installation media feature change proposed for Fedora which builds upon this work. I need to see koji-upstream care more before I'll bother continuing.

And if I get asked to rebase this a 5th time, I'm withdrawing this PR too. (currently up to 3 rebases)

We were just discussing merging it. Expect a response early next week.

Here is the branch rebased to current HEAD, with stray commits removed, and one fix applied.

https://github.com/mikem23/koji-playground/tree/signed-repos-54d-rebase

By stray commits, I mean changes unrelated to signed repos that got pulled in over time and did not cleanly disappear in the various rebases.

I did the initial rebase as normal. There were a few conflicts with PR#114, but nothing too bad. I then did a rebase -i, stripping out all the extraneous commits. The diff between the second rebase and the first was a single blank line.

A question about the new yum imports. I already get flack from Fedora for importing yum. I'm /not/ asking you to port this to dnf or anything, but I wonder if you have an idea about where that might need to go in the coming year.

I appreciate you being clear about your expectations. :)
Let me get in contact with some internal folks before I answer that question about the direction.

A few more updates on the rebased branch:
https://github.com/mikem23/koji-playground/commits/signed-repos-54d-rebase

Questions:

  • getRepo has a new optional arg 'signed ' that is never used in the code. Can we drop it? It seems like querying for signed repos is going to need to work differently than the way getRepo does. Or is this opt something that Pungi relies on? Maybe we need a more robust repo query call?
  • repoDone now runs the arches through canonArch. This isn't technically wrong, but afaict, all the code paths that lead here will have already done that (or am I missing something). I wonder if we should instead assert arch == canonArch(arch).
  • why'd you move pkglist in the regular repos?
  • cli comment says src is handled specifically, but I don't see where. What am I missing?

I would really like to see this get into Koji so that Bodhi can offload repo generation to Koji instead of using its masher. Would my assistance be helpful?

  1. The "signed" arg is for consumers like Bodhi to be specific about details for a signed repository.
    https://taiga.fedorainfracloud.org/project/acarter-fedora-docker-atomic-tooling/us/461?no-milestone=1

  2. Any harm in being defensive? I think I did that to eliminate risk of inconsistent arguments... or maybe because I confused myself trying to get koji to pick the right directories. :)

  3. I don't think I did that intentionally...

  4. Comment is wrong and out of date, sorry about that.

re: Yum vs. DNF: I don't have clear direction internally yet about DNF in a future RHEL, or when. We should have unit test or at least Koji's use cases on Yum written out so that if/when the switch happens, we can fling the same criteria at DNF and have some confidence that it works like we expect.

@jgreguske At the very least, once RHEL 7.3 drops, DNF will be updated in EPEL to match the version in Fedora, as libsolv will be updated enough that DNF 1.1 or DNF 2.0 will function on EL7.

@jgreguske can you please rebase the patch set

I've already rebased it, maybe I should close this and reopen a new one based on that

@mikem Does your branch also generate a detached signature for repomd.xml automatically?

@ngompa it does not afaict. At least, I have not added it. My changes on the branch are simply cleanup so far.

@mikem Could you publish your new branch as a PR so it can be reviewed against current master?

Yes, I'll try to get a new PR this week

@mikem A week has passed... New PR?

Please ignore this comment. I just want to subscribe to follow this issue but see no option to do so directly.

I also want to follow the progress of this issue - @jflorian for the record I opened an issue against pagure to support subscribing to comments from pull requests: https://pagure.io/pagure/issue/1866

@mikem Have you had a chance to rebase this and prep it for merging?

Pull-Request has been closed by mikem

8 years ago