From f5c6d44000379d6cfaac2348dec24e32047d73af Mon Sep 17 00:00:00 2001 From: Daniel Mach Date: Feb 10 2015 11:47:16 +0000 Subject: Initial changes for Pungi 4.0. --- diff --git a/.hgignore b/.hgignore deleted file mode 100644 index a64bf5c..0000000 --- a/.hgignore +++ /dev/null @@ -1,11 +0,0 @@ -syntax: glob -*.pyc -*~ -pungi.pidaproject -.project -.pydevproject -.settings/org.eclipse.mylyn.tasks.ui.prefs -MANIFEST -build/* -dist/* -noarch/* diff --git a/.hgtags b/.hgtags deleted file mode 100644 index 315ccbc..0000000 --- a/.hgtags +++ /dev/null @@ -1,70 +0,0 @@ -e80b96291cfe23c4c21b2e668d8d80a8998c7cfc pungi-- -f755487fdd539c3a68296c0dc7b6c6dc49dccb98 pungi-0.1.0-1%{?dist} -d9bda840074f8f5e7b8844007e9951cd55ad9c1d pungi-- -baa55b9774642535467104c3f6b268671cc35e08 pungi-0.1.0-1 -902402e675943d6c3186b924a9cff89d539b06f9 pungi-0.1.0-1 -14a5e625d91034b7dcb1f2b26486827929e87e24 pungi-0.1.0-1 -b13071d9363851d2766e9efaf80e9e13feec7a0c pungi-0.1.0-1 -00326e01cc7dd77f527d1a70e97fa907f35ce669 pungi-0.1.0-1 -591cf30beec90deb8b01aaef07e042d8878f4f09 pungi-0.1.1-1 -9f954716abd9c8db453b9f1b56f64e0defd8fa1d FC-6 -f0cbd4fbc9e7915fa94588237827e0a1379ec823 pungi-- -c5e81c8e1adc642b15e5aac713ae2e58a386c9b9 pungi-0.2.0-1 -f90b645121cb2f794ceda3c4be050c53d36a7bec pungi-0.2.0-1 -ebfe0e963db6d7b652f63f99aaec121e7ff19074 pungi-0.2.1-1 -769a8e08d77a2234295449282b16ca38ff4d846e pungi-0.2.2-1 -ba049c3454d5dae9326d8050bb0d7d8116982ca4 f7-test1 -780520383876b76dd06fa013e1a41ddd6bf0901e pungi-0.2.3-1 -158bd9a170892b43645caed12bddc3602ef3be4d pungi-0.2.3-1 -6659955ccfdf29ecd6027bd3770f80d815720af0 pungi-0.2.3-1 -9f7b5877c32c533d00ea6baa67057ce424a40a61 pungi-0.2.3-1 -7ea08753383766ce36bb49fef6d4fcf44158ad26 pungi-0.2.3-1 -65596b024b8380bd72c6faec00d37820ada1444d pungi-0.2.4-1 -5e3332cfa2bb723f438507313836c299fcc99cff pungi-0.2.5-1 -61146ab008d70cb4ce294d14a8465c05613e91e5 pungi-0.2.6-1 -6de1d8a07c7b75fc069c72eaa9b3cb4ecaa5ad5a pungi-0.2.7-1 -c150a9d7a125e6c25384fbbf8080d7532191b587 f7-test2 -9c5cdf9e045ab0c804d85a50b24107b108aa2da5 pungi-0.2.8-1 -f1ee949b238b004ee53c6b30915e69352274f583 pungi-0.3.0-1 -8cd10e139537882e6620ff7a834550372d661765 pungi-0.3.2-1.fc7 -cadce5054b15ae68cab1ab4535051413502f6f06 pungi-0.3.3-1.fc7 -ab966576cf34faf07ef340f2bf35a845e50b2e60 pungi-0.3.4-1.fc7 -be92c798eb08d91cd95769473a89201fea2deee7 pungi-0.3.4-1.fc7 -14cc0c736f005298e55857b6f8d4e96811d391f0 pungi-0.3.5-1.fc7 -d71cd934642d7e57875bec2cf1fae94143a32e3d pungi-0.3.6-1.fc7 -c307e8c643a7b3bc7d79aed21e1992c3dadd930b pungi-0.3.7-1.fc7 -49ffb6153da14ce66ef8f95e356f7b268caba907 f7 -a18b9e205575a209423b22ebb55987f46c4f0994 pungi-0.3.8-1.fc8 -9b1619bddf881c65bcba1ae6c6a0f516915f2973 pungi-0.3.9-1.fc8 -2ece0fc1e1f5a519f10b514e35a44f575b6926b7 pungi-0.4.0-1.fc8 -57308147c0f4a6d4fc6ba8d35f3225b83ecd245a pungi-0.4.1-1.fc8 -b6b730ec37d887efaa175718780486f2f2217626 pungi-0.5.0-1.fc8 -904b3b5d799fcfc01d57075f8a9cfaf57c4b9e96 pungi-0.5.0-1.fc8 -0d1678fab953bf8049feba79435d536720875c35 pungi-1.0.0-1.fc8 -0f9756371e0bf46172f414f6568522f7d69531a7 pungi-1.0.0-2.fc8 -6a80d436e84bf782164b3613c52e8155793d33ad pungi-1.0.1-1.fc8.1 -812ca4b56f453adb7cf9632ebaf7fd05b9e317b0 pungi-1.0.2-1.fc8 -21fdf978d6b3b0e6b1f9470994261b5c2dd03dc4 pungi-1.0.2-1.fc8 -daafaa357e4b59316e7f89f94f7d85de1dd214e3 pungi-1.1.0-1.fc8 -a4f98216a6de2caa3aa3d25a05ca2df69f84ad27 pungi-1.1.1-1.fc8 -fe6ef87b3d0224c60f4a20510e2c9cf7eded5bbb pungi-1.1.2-1.fc8 -bbbfaaf1cbfa2329d7fc5932ad9eabb8ea92fa3d pungi-1.1.4-1.fc8 -25d1219ba97e777cd34d596475eb14b505e2c431 pungi-1.1.5-1.fc8 -4edd3626781cbb562d467c30eb905a3670a29920 pungi-1.1.6-1.fc8 -ffa18895b6d9c3069a4eceb93ab3cbb2775f6958 pungi-1.1.7-1.fc8 -74c78bd267b31410f45240f68b47f2197d629a01 pungi-1.1.8-1.fc8 -214e56769a8a36964d297366f9ee6d1e9e13734e pungi-1.1.9-1.fc8 -251b9f7c28a64ad79d24998e8f1329ef593918e6 pungi-1.1.9-1.fc8 -fd109d29c502c7117ec171aad24bae0149d14f82 pungi-1.1.10-1.fc8 -0a52fdb4ca6463835128c0c6b9a8705ac1973c0b pungi-1.2.0-1.fc8 -738b45957b10d06bdb6be6d63266b559d62ab647 pungi-1.2.1-1.fc8 -0a6ed4246de368c4f80f2de98d0b1e40fe36ee6f pungi-1.2.2-1.fc8 -daab9b65ae2df4cc8e74bb01f354b90a5d8e351c pungi-1.2.3-1.fc8 -eda1c2fb02e4427a860146aa8aebcb1bb12c5eda pungi-1.2.4-1.fc8 -df20ca17d16dda8d85bec12e0f94cf7525779a2b pungi-1.2.6-1.fc8 -edceabcc3a0dc2b28679dbef2c734e7d7c55c9a4 pungi-1.2.7-1.fc8 -5c8f4d20fe3022ca21b3d064a484571efe02c821 pungi-1.2.8-1.fc8 -77e9034ef654d9ce17575fb6c6216a561eaba6ec pungi-1.2.9-1.fc9 -b96d2030c9d572852ba4d45c49647b665e020bfa pungi-1.2.10-1.fc9 -337fa20a0e1e04dc03842b9a354dc0f086b9fa4d pungi-1.2.11-1.fc9 -3cb7b1a58df7e3cb8f6e6b90f5dbd9922a4fcaa8 pungi-1.2.12-1.fc9 diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..58869fc --- /dev/null +++ b/AUTHORS @@ -0,0 +1,15 @@ +Authors: + +Jesse Keating +Dennis Gilmore +Daniel Mach + +Contributors: + +Will Woods +Essien Ita Essien +James Bowes +Tom Callaway +Joel Andres Granados + +Mark McLoughlin diff --git a/Authors b/Authors deleted file mode 100644 index 58869fc..0000000 --- a/Authors +++ /dev/null @@ -1,15 +0,0 @@ -Authors: - -Jesse Keating -Dennis Gilmore -Daniel Mach - -Contributors: - -Will Woods -Essien Ita Essien -James Bowes -Tom Callaway -Joel Andres Granados - -Mark McLoughlin diff --git a/COPYING b/COPYING index 557fcb1..5a92318 100644 --- a/COPYING +++ b/COPYING @@ -1,5 +1,5 @@ - Pungi - a Fedora release compose tool - Copyright (C) 2006 Jesse Keating + Pungi - Distribution compose tool + Copyright (C) 2006-2015 Red Hat, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -13,4 +13,3 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - diff --git a/Makefile b/Makefile index 2f4e604..0167d53 100644 --- a/Makefile +++ b/Makefile @@ -50,9 +50,12 @@ install: @python setup.py install clean: + @python setup.py clean @rm -vf *.rpm @rm -vrf noarch @rm -vf *.tar.gz @rm -vrf dist @rm -vf MANIFEST @rm -vf Changelog + find . -\( -name "*.pyc" -o -name '*.pyo' -o -name "*~" -o -name "__pycache__" -\) -delete + find . -depth -type d -a -name '*.egg-info' -exec rm -rf {} \; diff --git a/ToDo b/ToDo deleted file mode 100644 index 7adbaed..0000000 --- a/ToDo +++ /dev/null @@ -1,62 +0,0 @@ -Working with Kickstart - -* Remove config options for: - osdir - sourcedir - debugdir - isodir - iso_basename - cachedir - arch - relnotefilere - relnotedirre - relnotepkgs - -DONE ^^ - -* Move to CLI only: - name - version - destdir - flavor (optional) - bugurl (optional) - discs (optional) - nosource (optional/debug) - -DONE ^^ - -* Make the following transitions - product_name -> name *DONE* - product path == 'Packages/" *DONE* - cachedir == /var/cache/pungi *DONE* - -DONE ^^ - -* Get comps data from repos - Use ayum.comps. to get comps information *DONE* - Snag each available comps file, cat them together, use xslt to make clean *DONE* - Use this mutant comps for creating repodata in the destdirs. *DONE* - -DONE ^^ - -* From pykickstart - manifest -> %packages *DONE* - yum-repos -> repo *DONE* - isosize -> part iso size (optional) *DONE* - -DONE ^^ - -* Release notes stuff - Drop all stuff but GPL, README, GPG keys, potentially README-BURNING-ISOS ? - -* From livecd-creator - Steal root check *DONE* - Use same cli syntax if possible (conf -> config, etc..) *DONE* - -DONE ^^ - -* Figure out how to allow use of $releasever and $basearch *DONE* - -DONE ^^ - -* Profit! diff --git a/bin/pungi-gather b/bin/pungi-gather new file mode 100755 index 0000000..770b9e3 --- /dev/null +++ b/bin/pungi-gather @@ -0,0 +1,313 @@ +#!/usr/bin/python -tt +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +import os +import pungi.gather +import pungi.config +import pungi.ks +import subprocess + +def main(): + + config = pungi.config.Config() + + (opts, args) = get_arguments(config) + + # You must be this high to ride if you're going to do root tasks + if os.geteuid () != 0 and (opts.do_all or opts.do_buildinstall): + print >> sys.stderr, "You must run pungi as root" + return 1 + + if opts.do_all or opts.do_buildinstall: + try: + selinux = subprocess.Popen('/usr/sbin/getenforce', + stdout=subprocess.PIPE, + stderr=open('/dev/null', 'w')).communicate()[0].strip('\n') + if selinux == 'Enforcing': + print >> sys.stdout, "WARNING: SELinux is enforcing. This may lead to a compose with selinux disabled." + print >> sys.stdout, "Consider running with setenforce 0." + except: + pass + + # Set up the kickstart parser and pass in the kickstart file we were handed + ksparser = pungi.ks.get_ksparser(ks_path=opts.config) + + if opts.sourceisos: + config.set('pungi', 'arch', 'source') + + for part in ksparser.handler.partition.partitions: + if part.mountpoint == 'iso': + config.set('pungi', 'cdsize', str(part.size)) + + config.set('pungi', 'force', str(opts.force)) + + if config.get('pungi', 'workdirbase') == '/work': + config.set('pungi', 'workdirbase', "%s/work" % config.get('pungi', 'destdir')) + # Set up our directories + if not os.path.exists(config.get('pungi', 'destdir')): + try: + os.makedirs(config.get('pungi', 'destdir')) + except OSError, e: + print >> sys.stderr, "Error: Cannot create destination dir %s" % config.get('pungi', 'destdir') + sys.exit(1) + else: + print >> sys.stdout, "Warning: Reusing existing destination directory." + + if not os.path.exists(config.get('pungi', 'workdirbase')): + try: + os.makedirs(config.get('pungi', 'workdirbase')) + except OSError, e: + print >> sys.stderr, "Error: Cannot create working base dir %s" % config.get('pungi', 'workdirbase') + sys.exit(1) + else: + print >> sys.stdout, "Warning: Reusing existing working base directory." + + cachedir = config.get('pungi', 'cachedir') + + if not os.path.exists(cachedir): + try: + os.makedirs(cachedir) + except OSError, e: + print >> sys.stderr, "Error: Cannot create cache dir %s" % cachedir + sys.exit(1) + + # Set debuginfo flag + if opts.nodebuginfo: + config.set('pungi', 'debuginfo', "False") + if opts.greedy: + config.set('pungi', 'greedy', opts.greedy) + else: + # XXX: compatibility + if opts.nogreedy: + config.set('pungi', 'greedy', "none") + else: + config.set('pungi', 'greedy', "all") + config.set('pungi', 'resolve_deps', str(bool(opts.resolve_deps))) + if opts.isfinal: + config.set('pungi', 'isfinal', "True") + if opts.nohash: + config.set('pungi', 'nohash', "True") + if opts.full_archlist: + config.set('pungi', 'full_archlist', "True") + if opts.arch: + config.set('pungi', 'arch', opts.arch) + if opts.multilib: + config.set('pungi', 'multilib', " ".join(opts.multilib)) + if opts.lookaside_repos: + config.set('pungi', 'lookaside_repos', " ".join(opts.lookaside_repos)) + if opts.no_dvd: + config.set('pungi', 'no_dvd', "True") + if opts.nomacboot: + config.set('pungi', 'nomacboot', "True") + config.set("pungi", "fulltree", str(bool(opts.fulltree))) + config.set("pungi", "selfhosting", str(bool(opts.selfhosting))) + config.set("pungi", "nosource", str(bool(opts.nosource))) + config.set("pungi", "nodebuginfo", str(bool(opts.nodebuginfo))) + + if opts.lorax_conf: + config.set("lorax", "conf_file", opts.lorax_conf) + if opts.installpkgs: + config.set("lorax", "installpkgs", " ".join(opts.installpkgs)) + + # Actually do work. + mypungi = pungi.gather.Pungi(config, ksparser) + + with mypungi.yumlock: + if not opts.sourceisos: + if opts.do_all or opts.do_gather or opts.do_buildinstall: + mypungi._inityum() # initialize the yum object for things that need it + if opts.do_all or opts.do_gather: + mypungi.gather() + if opts.nodownload: + for line in mypungi.list_packages(): + flags_str = ",".join(line["flags"]) + if flags_str: + flags_str = "(%s)" % flags_str + sys.stdout.write("RPM%s: %s\n" % (flags_str, line["path"])) + sys.stdout.flush() + else: + mypungi.downloadPackages() + mypungi.makeCompsFile() + if not opts.nodebuginfo: + mypungi.getDebuginfoList() + if opts.nodownload: + for line in mypungi.list_debuginfo(): + flags_str = ",".join(line["flags"]) + if flags_str: + flags_str = "(%s)" % flags_str + sys.stdout.write("DEBUGINFO%s: %s\n" % (flags_str, line["path"])) + sys.stdout.flush() + else: + mypungi.downloadDebuginfo() + if not opts.nosource: + if opts.nodownload: + for line in mypungi.list_srpms(): + flags_str = ",".join(line["flags"]) + if flags_str: + flags_str = "(%s)" % flags_str + sys.stdout.write("SRPM%s: %s\n" % (flags_str, line["path"])) + sys.stdout.flush() + else: + mypungi.downloadSRPMs() + + print "RPM size: %s MiB" % (mypungi.size_packages() / 1024 ** 2) + if not opts.nodebuginfo: + print "DEBUGINFO size: %s MiB" % (mypungi.size_debuginfo() / 1024 ** 2) + if not opts.nosource: + print "SRPM size: %s MiB" % (mypungi.size_srpms() / 1024 ** 2) + + # Furthermore (but without the yumlock...) + if not opts.sourceisos: + if opts.do_all or opts.do_createrepo: + mypungi.doCreaterepo() + + if opts.do_all or opts.do_buildinstall: + if not opts.norelnotes: + mypungi.doGetRelnotes() + mypungi.doBuildinstall() + + if opts.do_all or opts.do_createiso: + mypungi.doCreateIsos() + + # Do things slightly different for src. + if opts.sourceisos: + # we already have all the content gathered + mypungi.topdir = os.path.join(config.get('pungi', 'destdir'), + config.get('pungi', 'version'), + config.get('pungi', 'flavor'), + 'source', 'SRPMS') + mypungi.doCreaterepo(comps=False) + if opts.do_all or opts.do_createiso: + mypungi.doCreateIsos() + + print "All done!" + +if __name__ == '__main__': + from optparse import OptionParser + import sys + import time + + today = time.strftime('%Y%m%d', time.localtime()) + + def get_arguments(config): + parser = OptionParser("%prog [--help] [options]", version="%prog 3.13") + + def set_config(option, opt_str, value, parser, config): + config.set('pungi', option.dest, value) + # When setting name, also set the iso_basename. + if option.dest == 'name': + config.set('pungi', 'iso_basename', value) + + # Pulled in from config file to be cli options as part of pykickstart conversion + parser.add_option("--name", dest="name", type="string", + action="callback", callback=set_config, callback_args=(config, ), + help='the name for your distribution (defaults to "Fedora")') + parser.add_option("--ver", dest="version", type="string", + action="callback", callback=set_config, callback_args=(config, ), + help='the version of your distribution (defaults to datestamp)') + parser.add_option("--flavor", dest="flavor", type="string", + action="callback", callback=set_config, callback_args=(config, ), + help='the flavor of your distribution spin (optional)') + parser.add_option("--destdir", dest="destdir", type="string", + action="callback", callback=set_config, callback_args=(config, ), + help='destination directory (defaults to current directory)') + parser.add_option("--cachedir", dest="cachedir", type="string", + action="callback", callback=set_config, callback_args=(config, ), + help='package cache directory (defaults to /var/cache/pungi)') + parser.add_option("--bugurl", dest="bugurl", type="string", + action="callback", callback=set_config, callback_args=(config, ), + help='the url for your bug system (defaults to http://bugzilla.redhat.com)') + parser.add_option("--selfhosting", action="store_true", dest="selfhosting", + help='build a self-hosting tree by following build dependencies (optional)') + parser.add_option("--fulltree", action="store_true", dest="fulltree", + help='build a tree that includes all packages built from corresponding source rpms (optional)') + parser.add_option("--nosource", action="store_true", dest="nosource", + help='disable gathering of source packages (optional)') + parser.add_option("--nodebuginfo", action="store_true", dest="nodebuginfo", + help='disable gathering of debuginfo packages (optional)') + parser.add_option("--nodownload", action="store_true", dest="nodownload", + help='disable downloading of packages. instead, print the package URLs (optional)') + parser.add_option("--norelnotes", action="store_true", dest="norelnotes", + help='disable gathering of release notes (optional); DEPRECATED') + parser.add_option("--nogreedy", action="store_true", dest="nogreedy", + help='disable pulling of all providers of package dependencies (optional)') + parser.add_option("--nodeps", action="store_false", dest="resolve_deps", default=True, + help='disable resolving dependencies') + parser.add_option("--sourceisos", default=False, action="store_true", dest="sourceisos", + help='Create the source isos (other arch runs must be done)') + parser.add_option("--force", default=False, action="store_true", + help='Force reuse of an existing destination directory (will overwrite files)') + parser.add_option("--isfinal", default=False, action="store_true", + help='Specify this is a GA tree, which causes betanag to be turned off during install') + parser.add_option("--nohash", default=False, action="store_true", + help='disable hashing the Packages trees') + parser.add_option("--full-archlist", action="store_true", + help='Use the full arch list for x86_64 (include i686, i386, etc.)') + parser.add_option("--arch", + help='Override default (uname based) arch') + parser.add_option("--greedy", metavar="METHOD", + help='Greedy method; none, all, build') + parser.add_option("--multilib", action="append", metavar="METHOD", + help='Multilib method; can be specified multiple times; recommended: devel, runtime') + parser.add_option("--lookaside-repo", action="append", dest="lookaside_repos", metavar="NAME", + help='Specify lookaside repo name(s) (packages will used for depsolving but not be included in the output)') + parser.add_option("--workdirbase", dest="workdirbase", type="string", + action="callback", callback=set_config, callback_args=(config, ), + help='base working directory (defaults to destdir + /work)') + parser.add_option("--no-dvd", default=False, action="store_true", dest="no_dvd", + help='Do not make a install DVD/CD only the netinstall image and the tree') + parser.add_option("--lorax-conf", type="string", + help='Path to lorax.conf file (optional)') + parser.add_option("-i", "--installpkgs", default=[], + action="append", metavar="STRING", + help="Package glob for lorax to install before runtime-install.tmpl runs. (may be listed multiple times)") + + parser.add_option("-c", "--config", dest="config", + help='Path to kickstart config file') + parser.add_option("--all-stages", action="store_true", default=True, dest="do_all", + help="Enable ALL stages") + parser.add_option("-G", action="store_true", default=False, dest="do_gather", + help="Flag to enable processing the Gather stage") + parser.add_option("-C", action="store_true", default=False, dest="do_createrepo", + help="Flag to enable processing the Createrepo stage") + parser.add_option("-B", action="store_true", default=False, dest="do_buildinstall", + help="Flag to enable processing the BuildInstall stage") + parser.add_option("-I", action="store_true", default=False, dest="do_createiso", + help="Flag to enable processing the CreateISO stage") + parser.add_option("--relnotepkgs", dest="relnotepkgs", type="string", + action="callback", callback=set_config, callback_args=(config, ), + help='Rpms which contain the release notes') + parser.add_option("--relnotefilere", dest="relnotefilere", type="string", + action="callback", callback=set_config, callback_args=(config, ), + help='Which files are the release notes -- GPL EULA') + parser.add_option("--nomacboot", action="store_true", dest="nomacboot", help='disable setting up macboot as no hfs support ') + + + (opts, args) = parser.parse_args() + + if not opts.config: + parser.error("Please specify a config file") + + if not config.get('pungi', 'flavor').isalnum() and not config.get('pungi', 'flavor') == '': + parser.error("Flavor must be alphanumeric") + + if opts.do_gather or opts.do_createrepo or opts.do_buildinstall or opts.do_createiso: + opts.do_all = False + + if opts.arch and (opts.do_all or opts.do_buildinstall): + parser.error("Cannot override arch while the BuildInstall stage is enabled") + + return (opts, args) + + main() diff --git a/pungi.spec b/pungi.spec index 1b62f19..84bd76f 100644 --- a/pungi.spec +++ b/pungi.spec @@ -1,8 +1,8 @@ %{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")} Name: pungi -Version: 3.13 -Release: 1%{?dist} +Version: 4.0 +Release: 0.1%{?dist} Summary: Distribution compose tool Group: Development/Tools @@ -46,7 +46,7 @@ rm -rf $RPM_BUILD_ROOT %defattr(-,root,root,-) %doc Authors Changelog COPYING GPL ToDo doc/README # For noarch packages: sitelib -%{python_sitelib}/pypungi +%{python_sitelib}/pungi %if 0%{?fedora} >= 9 || 0%{?rhel} >= 6 %{python_sitelib}/%{name}-%{version}-py?.?.egg-info %endif @@ -289,7 +289,7 @@ rm -rf $RPM_BUILD_ROOT - Add support for yum repo costs - Adjust manifest for Fedora 9 (kernels, languages, flash) -* Mon Apr 08 2008 Jesse Keating - 1.2.14-1 +* Mon Apr 07 2008 Jesse Keating - 1.2.14-1 - Create repodata for source. - Fix SRPM splittree making - Bump anaconda require up for fixed splittree diff --git a/pungi/__init__.py b/pungi/__init__.py new file mode 100644 index 0000000..b777be9 --- /dev/null +++ b/pungi/__init__.py @@ -0,0 +1 @@ +__version__ = "4.0" diff --git a/pungi/arch.py b/pungi/arch.py new file mode 100644 index 0000000..332b228 --- /dev/null +++ b/pungi/arch.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- + + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + +import rpmUtils.arch + + +TREE_ARCH_YUM_ARCH_MAP = { + "i386": "athlon", + "ppc64": "ppc64p7", + "sparc": "sparc64v", + "arm": "armv7l", + "armhfp": "armv7hnl", +} + + +def tree_arch_to_yum_arch(tree_arch): + # this is basically an opposite to rpmUtils.arch.getBaseArch() + yum_arch = TREE_ARCH_YUM_ARCH_MAP.get(tree_arch, tree_arch) + return yum_arch + + +def get_multilib_arch(yum_arch): + arch_info = rpmUtils.arch.getMultiArchInfo(yum_arch) + if arch_info is None: + return None + return arch_info[0] + + +def get_valid_multilib_arches(tree_arch): + yum_arch = tree_arch_to_yum_arch(tree_arch) + multilib_arch = get_multilib_arch(yum_arch) + if not multilib_arch: + return [] + return [ i for i in rpmUtils.arch.getArchList(multilib_arch) if i not in ("noarch", "src") ] + + +def get_valid_arches(tree_arch, multilib=True, add_noarch=True, add_src=False): + result = [] + + yum_arch = tree_arch_to_yum_arch(tree_arch) + for arch in rpmUtils.arch.getArchList(yum_arch): + if arch not in result: + result.append(arch) + + if not multilib: + for i in get_valid_multilib_arches(tree_arch): + while i in result: + result.remove(i) + + if add_noarch and "noarch" not in result: + result.append("noarch") + + if add_src and "src" not in result: + result.append("src") + + return result + + +def get_compatible_arches(arch, multilib=False): + tree_arch = rpmUtils.arch.getBaseArch(arch) + compatible_arches = get_valid_arches(tree_arch, multilib=multilib) + return compatible_arches + + +def is_valid_arch(arch): + if arch in ("noarch", "src", "nosrc"): + return True + if arch in rpmUtils.arch.arches: + return True + return False + + +def split_name_arch(name_arch): + if "." in name_arch: + name, arch = name_arch.rsplit(".", 1) + if not is_valid_arch(arch): + name, arch = name_arch, None + else: + name, arch = name_arch, None + return name, arch diff --git a/pungi/config.py b/pungi/config.py new file mode 100644 index 0000000..617ae18 --- /dev/null +++ b/pungi/config.py @@ -0,0 +1,56 @@ +#!/usr/bin/python -tt +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +import os +import time +import yum + +from ConfigParser import SafeConfigParser + +class Config(SafeConfigParser): + def __init__(self): + SafeConfigParser.__init__(self) + + self.add_section('pungi') + self.add_section('lorax') + + self.set('pungi', 'osdir', 'os') + self.set('pungi', 'sourcedir', 'source') + self.set('pungi', 'debugdir', 'debug') + self.set('pungi', 'isodir', 'iso') + self.set('pungi', 'relnotefilere', 'GPL README-BURNING-ISOS-en_US.txt ^RPM-GPG') + self.set('pungi', 'relnotedirre', '') + self.set('pungi', 'relnotepkgs', 'fedora-release fedora-release-notes') + self.set('pungi', 'product_path', 'Packages') + self.set('pungi', 'cachedir', '/var/cache/pungi') + self.set('pungi', 'compress_type', 'xz') + self.set('pungi', 'arch', yum.rpmUtils.arch.getBaseArch()) + self.set('pungi', 'name', 'Fedora') + self.set('pungi', 'iso_basename', 'Fedora') + self.set('pungi', 'version', time.strftime('%Y%m%d', time.localtime())) + self.set('pungi', 'flavor', '') + self.set('pungi', 'destdir', os.getcwd()) + self.set('pungi', 'workdirbase', "/work") + self.set('pungi', 'bugurl', 'https://bugzilla.redhat.com') + self.set('pungi', 'cdsize', '695.0') + self.set('pungi', 'debuginfo', "True") + self.set('pungi', 'alldeps', "True") + self.set('pungi', 'isfinal', "False") + self.set('pungi', 'nohash', "False") + self.set('pungi', 'full_archlist', "False") + self.set('pungi', 'multilib', '') + self.set('pungi', 'lookaside_repos', '') + self.set('pungi', 'resolve_deps', "True") + self.set('pungi', 'no_dvd', "False") + self.set('pungi', 'nomacboot', "False") diff --git a/pungi/gather.py b/pungi/gather.py new file mode 100644 index 0000000..b09b1c6 --- /dev/null +++ b/pungi/gather.py @@ -0,0 +1,1698 @@ +#!/usr/bin/python -tt + + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + + +import yum +import os +import re +import shutil +import sys +import gzip +import pungi.util +import pprint +import lockfile +import logging +import urlgrabber.progress +import subprocess +import createrepo +import ConfigParser +import pylorax +from fnmatch import fnmatch + +import arch as arch_module +import multilib + + +class ReentrantYumLock(object): + """ A lock that can be acquired multiple times by the same process. """ + + def __init__(self, lock, log): + self.lock = lock + self.log = log + self.count = 0 + + def __enter__(self): + if not self.count: + self.log.info("Waiting on %r" % self.lock.lock_file) + self.lock.acquire() + self.log.info("Got %r" % self.lock.lock_file) + self.count = self.count + 1 + self.log.info("Lock count upped to %i" % self.count) + + def __exit__(self, type, value, tb): + self.count = self.count - 1 + self.log.info("Lock count downed to %i" % self.count) + self.log.info("%r %r %r" % (type, value, tb)) + if not self.count: + self.lock.release() + self.log.info("Released %r" % self.lock.lock_file) + + +def yumlocked(method): + """ A locking decorator. """ + def wrapper(self, *args, **kwargs): + with self.yumlock: + return method(self, *args, **kwargs) + # TODO - replace argspec, signature, etc.. + return wrapper + + +def is_debug(po): + if "debuginfo" in po.name: + return True + return False + + +def is_source(po): + if po.arch in ("src", "nosrc"): + return True + return False + + +def is_noarch(po): + if po.arch == "noarch": + return True + return False + + +def is_package(po): + if is_debug(po): + return False + if is_source(po): + return False + return True + + +class MyConfigParser(ConfigParser.ConfigParser): + """A subclass of ConfigParser which does not lowercase options""" + + def optionxform(self, optionstr): + return optionstr + + +class PungiBase(object): + """The base Pungi class. Set up config items and logging here""" + + def __init__(self, config): + self.config = config + + # ARCH setup + self.tree_arch = self.config.get('pungi', 'arch') + self.yum_arch = arch_module.tree_arch_to_yum_arch(self.tree_arch) + full_archlist = self.config.getboolean('pungi', 'full_archlist') + self.valid_arches = arch_module.get_valid_arches(self.tree_arch, multilib=full_archlist) + self.valid_arches.append("src") # throw source in there, filter it later + self.valid_native_arches = arch_module.get_valid_arches(self.tree_arch, multilib=False) + self.valid_multilib_arches = arch_module.get_valid_multilib_arches(self.tree_arch) + + # arch: compatible arches + self.compatible_arches = {} + for i in self.valid_arches: + self.compatible_arches[i] = arch_module.get_compatible_arches(i) + + self.doLoggerSetup() + self.workdir = os.path.join(self.config.get('pungi', 'workdirbase'), + self.config.get('pungi', 'flavor'), + self.tree_arch) + + + + def doLoggerSetup(self): + """Setup our logger""" + + logdir = os.path.join(self.config.get('pungi', 'destdir'), 'logs') + + pungi.util._ensuredir(logdir, None, force=True) # Always allow logs to be written out + + if self.config.get('pungi', 'flavor'): + logfile = os.path.join(logdir, '%s.%s.log' % (self.config.get('pungi', 'flavor'), + self.tree_arch)) + else: + logfile = os.path.join(logdir, '%s.log' % (self.tree_arch)) + + # Create the root logger, that will log to our file + logging.basicConfig(level=logging.DEBUG, + format='%(name)s.%(levelname)s: %(message)s', + filename=logfile) + + +class CallBack(urlgrabber.progress.TextMeter): + """A call back function used with yum.""" + + def progressbar(self, current, total, name=None): + return + + +class PungiYum(yum.YumBase): + """Subclass of Yum""" + + def __init__(self, config): + self.pungiconfig = config + yum.YumBase.__init__(self) + + def doLoggingSetup(self, debuglevel, errorlevel, syslog_ident=None, syslog_facility=None): + """Setup the logging facility.""" + + logdir = os.path.join(self.pungiconfig.get('pungi', 'destdir'), 'logs') + if not os.path.exists(logdir): + os.makedirs(logdir) + if self.pungiconfig.get('pungi', 'flavor'): + logfile = os.path.join(logdir, '%s.%s.log' % (self.pungiconfig.get('pungi', 'flavor'), + self.pungiconfig.get('pungi', 'arch'))) + else: + logfile = os.path.join(logdir, '%s.log' % (self.pungiconfig.get('pungi', 'arch'))) + + yum.logging.basicConfig(level=yum.logging.DEBUG, filename=logfile) + + def doFileLogSetup(self, uid, logfile): + # This function overrides a yum function, allowing pungi to control + # the logging. + pass + + def _compare_providers(self, *args, **kwargs): + # HACK: always prefer 64bit over 32bit packages + result = yum.YumBase._compare_providers(self, *args, **kwargs) + if len(result) >= 2: + pkg1 = result[0][0] + pkg2 = result[1][0] + if pkg1.name == pkg2.name: + best_arch = self.arch.get_best_arch_from_list([pkg1.arch, pkg2.arch], self.arch.canonarch) + if best_arch != "noarch" and best_arch != pkg1.arch: + result[0:1] = result[0:1:-1] + return result + +class Pungi(PungiBase): + def __init__(self, config, ksparser): + PungiBase.__init__(self, config) + + # Set our own logging name space + self.logger = logging.getLogger('Pungi') + + # Create a lock object for later use. + filename = self.config.get('pungi', 'cachedir') + "/yumlock" + lock = lockfile.LockFile(filename) + self.yumlock = ReentrantYumLock(lock, self.logger) + + # Create the stdout/err streams and only send INFO+ stuff there + formatter = logging.Formatter('%(name)s:%(levelname)s: %(message)s') + console = logging.StreamHandler() + console.setFormatter(formatter) + console.setLevel(logging.INFO) + self.logger.addHandler(console) + + self.destdir = self.config.get('pungi', 'destdir') + self.archdir = os.path.join(self.destdir, + self.config.get('pungi', 'version'), + self.config.get('pungi', 'flavor'), + self.tree_arch) + + self.topdir = os.path.join(self.archdir, 'os') + self.isodir = os.path.join(self.archdir, self.config.get('pungi','isodir')) + + pungi.util._ensuredir(self.workdir, self.logger, force=True) + + self.common_files = [] + self.infofile = os.path.join(self.config.get('pungi', 'destdir'), + self.config.get('pungi', 'version'), + '.composeinfo') + + + self.ksparser = ksparser + + self.resolved_deps = {} # list the deps we've already resolved, short circuit. + self.excluded_pkgs = {} # list the packages we've already excluded. + self.seen_pkgs = {} # list the packages we've already seen so we can check all deps only once + self.multilib_methods = self.config.get('pungi', 'multilib').split(" ") + + # greedy methods: + # * none: only best match package + # * all: all packages matching a provide + # * build: best match package + all other packages from the same SRPM having the same provide + self.greedy_method = self.config.get('pungi', 'greedy') + + self.lookaside_repos = self.config.get('pungi', 'lookaside_repos').split(" ") + self.sourcerpm_arch_map = {} # {sourcerpm: set[arches]} - used for gathering debuginfo + + # package object lists + self.po_list = set() + self.srpm_po_list = set() + self.debuginfo_po_list = set() + + # get_srpm_po() cache + self.sourcerpm_srpmpo_map = {} + + # flags + self.input_packages = set() # packages specified in %packages kickstart section including those defined via comps groups + self.comps_packages = set() # packages specified in %packages kickstart section *indirectly* via comps groups + self.prepopulate_packages = set() # packages specified in %prepopulate kickstart section + self.fulltree_packages = set() + self.langpack_packages = set() + self.multilib_packages = set() + + # already processed packages + self.completed_add_srpms = set() # srpms + self.completed_debuginfo = set() # rpms + self.completed_depsolve = set() # rpms + self.completed_langpacks = set() # rpms + self.completed_multilib = set() # rpms + self.completed_fulltree = set() # srpms + self.completed_selfhosting = set() # srpms + self.completed_greedy_build = set() # po.sourcerpm + + self.is_fulltree = self.config.getboolean("pungi", "fulltree") + self.is_selfhosting = self.config.getboolean("pungi", "selfhosting") + self.is_sources = not self.config.getboolean("pungi", "nosource") + self.is_debuginfo = not self.config.getboolean("pungi", "nodebuginfo") + self.is_resolve_deps = self.config.getboolean("pungi", "resolve_deps") + + self.fulltree_excludes = set(self.ksparser.handler.fulltree_excludes) + + def _add_yum_repo(self, name, url, mirrorlist=False, groups=True, + cost=1000, includepkgs=None, excludepkgs=None, + proxy=None): + """This function adds a repo to the yum object. + name: Name of the repo + url: Full url to the repo + mirrorlist: Bool for whether or not url is a mirrorlist + groups: Bool for whether or not to use groupdata from this repo + cost: an optional int representing the cost of a repo + includepkgs: An optional list of includes to use + excludepkgs: An optional list of excludes to use + proxy: An optional proxy to use + """ + includepkgs = includepkgs or [] + excludepkgs = excludepkgs or [] + + self.logger.info('Adding repo %s' % name) + thisrepo = yum.yumRepo.YumRepository(name) + thisrepo.name = name + # add excludes and such here when pykickstart gets them + if mirrorlist: + thisrepo.mirrorlist = yum.parser.varReplace(url, + self.ayum.conf.yumvar) + self.mirrorlists.append(thisrepo.mirrorlist) + self.logger.info('Mirrorlist for repo %s is %s' % + (thisrepo.name, thisrepo.mirrorlist)) + else: + thisrepo.baseurl = yum.parser.varReplace(url, + self.ayum.conf.yumvar) + self.repos.extend(thisrepo.baseurl) + self.logger.info('URL for repo %s is %s' % + (thisrepo.name, thisrepo.baseurl)) + thisrepo.basecachedir = self.ayum.conf.cachedir + thisrepo.enablegroups = groups + # This is until yum uses this failover by default + thisrepo.failovermethod = 'priority' + thisrepo.exclude = excludepkgs + thisrepo.includepkgs = includepkgs + thisrepo.cost = cost + # Yum doesn't like proxy being None + if proxy: + thisrepo.proxy = proxy + self.ayum.repos.add(thisrepo) + self.ayum.repos.enableRepo(thisrepo.id) + self.ayum._getRepos(thisrepo=thisrepo.id, doSetup=True) + # Set the repo callback. + self.ayum.repos.setProgressBar(CallBack()) + self.ayum.repos.callback = CallBack() + thisrepo.metadata_expire = 0 + thisrepo.mirrorlist_expire = 0 + if os.path.exists(os.path.join(thisrepo.cachedir, 'repomd.xml')): + os.remove(os.path.join(thisrepo.cachedir, 'repomd.xml')) + + @yumlocked + def _inityum(self): + """Initialize the yum object. Only needed for certain actions.""" + + # Create a yum object to use + self.repos = [] + self.mirrorlists = [] + self.ayum = PungiYum(self.config) + self.ayum.doLoggingSetup(6, 6) + yumconf = yum.config.YumConf() + yumconf.debuglevel = 6 + yumconf.errorlevel = 6 + yumconf.cachedir = self.config.get('pungi', 'cachedir') + yumconf.persistdir = "/var/lib/yum" # keep at default, gets appended to installroot + yumconf.installroot = os.path.join(self.workdir, 'yumroot') + yumconf.uid = os.geteuid() + yumconf.cache = 0 + yumconf.failovermethod = 'priority' + yumconf.deltarpm = 0 + yumvars = yum.config._getEnvVar() + yumvars['releasever'] = self.config.get('pungi', 'version') + yumvars['basearch'] = yum.rpmUtils.arch.getBaseArch(myarch=self.tree_arch) + yumconf.yumvar = yumvars + self.ayum._conf = yumconf + # I have no idea why this fixes a traceback, but James says it does. + del self.ayum.prerepoconf + self.ayum.repos.setCacheDir(self.ayum.conf.cachedir) + + self.ayum.arch.setup_arch(self.yum_arch) + + # deal with our repos + try: + self.ksparser.handler.repo.methodToRepo() + except: + pass + + for repo in self.ksparser.handler.repo.repoList: + if repo.mirrorlist: + # The not bool() thing is because pykickstart is yes/no on + # whether to ignore groups, but yum is a yes/no on whether to + # include groups. Awkward. + self._add_yum_repo(repo.name, repo.mirrorlist, + mirrorlist=True, + groups=not bool(repo.ignoregroups), + cost=repo.cost, + includepkgs=repo.includepkgs, + excludepkgs=repo.excludepkgs, + proxy=repo.proxy) + else: + self._add_yum_repo(repo.name, repo.baseurl, + mirrorlist=False, + groups=not bool(repo.ignoregroups), + cost=repo.cost, + includepkgs=repo.includepkgs, + excludepkgs=repo.excludepkgs, + proxy=repo.proxy) + + self.logger.info('Getting sacks for arches %s' % self.valid_arches) + self.ayum._getSacks(archlist=self.valid_arches) + + def _filtersrcdebug(self, po): + """Filter out package objects that are of 'src' arch.""" + + if po.arch == 'src' or 'debuginfo' in po.name: + return False + + return True + + def add_package(self, po, msg=None): + if not is_package(po): + raise ValueError("Not a binary package: %s" % po) + if msg: + self.logger.info(msg) + if po not in self.po_list: + self.po_list.add(po) + self.ayum.install(po) + self.sourcerpm_arch_map.setdefault(po.sourcerpm, set()).add(po.arch) + + def add_debuginfo(self, po, msg=None): + if not is_debug(po): + raise ValueError("Not a debuginfog package: %s" % po) + if msg: + self.logger.info(msg) + if po not in self.debuginfo_po_list: + self.debuginfo_po_list.add(po) + + def add_source(self, po, msg=None): + if not is_source(po): + raise ValueError("Not a source package: %s" % po) + if msg: + self.logger.info(msg) + if po not in self.srpm_po_list: + self.srpm_po_list.add(po) + + def verifyCachePkg(self, po, path): # Stolen from yum + """check the package checksum vs the cache + return True if pkg is good, False if not""" + + (csum_type, csum) = po.returnIdSum() + + try: + filesum = yum.misc.checksum(csum_type, path) + except yum.Errors.MiscError: + return False + + if filesum != csum: + return False + + return True + + def excludePackages(self, pkg_sack): + """exclude packages according to config file""" + if not pkg_sack: + return pkg_sack + + excludes = [] # list of (name, arch, pattern) + for i in self.ksparser.handler.packages.excludedList: + pattern = i + multilib = False + if i.endswith(".+"): + multilib = True + i = i[:-2] + name, arch = arch_module.split_name_arch(i) + excludes.append((name, arch, pattern, multilib)) + + for name in self.ksparser.handler.multilib_blacklist: + excludes.append((name, None, "multilib-blacklist: %s" % name, True)) + + for pkg in pkg_sack[:]: + for name, arch, exclude_pattern, multilib in excludes: + if fnmatch(pkg.name, name): + if not arch or fnmatch(pkg.arch, arch): + if multilib and pkg.arch not in self.valid_multilib_arches: + continue + if pkg.nvra not in self.excluded_pkgs: + self.logger.info("Excluding %s.%s (pattern: %s)" % (pkg.name, pkg.arch, exclude_pattern)) + self.excluded_pkgs[pkg.nvra] = pkg + pkg_sack.remove(pkg) + break + + return pkg_sack + + def get_package_deps(self, po): + """Add the dependencies for a given package to the + transaction info""" + added = set() + if po in self.completed_depsolve: + return added + self.completed_depsolve.add(po) + + self.logger.info('Checking deps of %s.%s' % (po.name, po.arch)) + + reqs = po.requires + provs = po.provides + + for req in reqs: + if req in self.resolved_deps: + continue + r, f, v = req + if r.startswith('rpmlib(') or r.startswith('config('): + continue + if req in provs: + continue + + try: + deps = self.ayum.whatProvides(r, f, v).returnPackages() + deps = self.excludePackages(deps) + if not deps: + self.logger.warn("Unresolvable dependency %s in %s.%s" % (r, po.name, po.arch)) + continue + + if self.greedy_method == "all": + deps = yum.packageSack.ListPackageSack(deps).returnNewestByNameArch() + else: + found = False + for dep in deps: + if dep in self.po_list: + # HACK: there can be builds in the input list on which we want to apply the "build" greedy rules + if self.greedy_method == "build" and dep.sourcerpm not in self.completed_greedy_build: + break + found = True + break + if found: + deps = [] + else: + all_deps = deps + deps = [self.ayum._bestPackageFromList(all_deps)] + if self.greedy_method == "build": + # handle "build" greedy method + if deps: + build_po = deps[0] + if is_package(build_po): + if build_po.arch != "noarch" and build_po.arch not in self.valid_multilib_arches: + all_deps = [ i for i in all_deps if i.arch not in self.valid_multilib_arches ] + for dep in all_deps: + if dep != build_po and dep.sourcerpm == build_po.sourcerpm: + deps.append(dep) + self.completed_greedy_build.add(dep.sourcerpm) + + for dep in deps: + if dep not in added: + msg = 'Added %s.%s (repo: %s) for %s.%s' % (dep.name, dep.arch, dep.repoid, po.name, po.arch) + self.add_package(dep, msg) + added.add(dep) + + except (yum.Errors.InstallError, yum.Errors.YumBaseError), ex: + self.logger.warn("Unresolvable dependency %s in %s.%s (repo: %s)" % (r, po.name, po.arch, po.repoid)) + continue + self.resolved_deps[req] = None + + for add in added: + self.get_package_deps(add) + return added + + def add_langpacks(self, po_list=None): + po_list = po_list or self.po_list + added = set() + + for po in sorted(po_list): + if po in self.completed_langpacks: + continue + + # get all langpacks matching the package name + langpacks = [ i for i in self.langpacks if i["name"] == po.name ] + if not langpacks: + continue + + self.completed_langpacks.add(po) + + for langpack in langpacks: + pattern = langpack["install"] % "*" # replace '%s' with '*' + exactmatched, matched, unmatched = yum.packages.parsePackages(self.all_pkgs, [pattern], casematch=1, pkgdict=self.pkg_refs.copy()) + matches = filter(self._filtersrcdebug, exactmatched + matched) + matches = [ i for i in matches if not i.name.endswith("-devel") and not i.name.endswith("-static") and i.name != "man-pages-overrides" ] + matches = [ i for i in matches if fnmatch(i.name, pattern) ] + + packages_by_name = {} + for i in matches: + packages_by_name.setdefault(i.name, []).append(i) + + for i, pkg_sack in packages_by_name.iteritems(): + pkg_sack = self.excludePackages(pkg_sack) + match = self.ayum._bestPackageFromList(pkg_sack) + msg = 'Added langpack %s.%s (repo: %s) for package %s (pattern: %s)' % (match.name, match.arch, match.repoid, po.name, pattern) + self.add_package(match, msg) + self.completed_langpacks.add(match) # assuming langpack doesn't have langpacks + added.add(match) + + return added + + def add_multilib(self, po_list=None): + po_list = po_list or self.po_list + added = set() + + if not self.multilib_methods: + return added + + for po in sorted(po_list): + if po in self.completed_multilib: + continue + + if po.arch in ("noarch", "src", "nosrc"): + continue + + if po.arch in self.valid_multilib_arches: + continue + + self.completed_multilib.add(po) + + matches = self.ayum.pkgSack.searchNevra(name=po.name, ver=po.version, rel=po.release) + matches = [i for i in matches if i.arch in self.valid_multilib_arches] + if not matches: + continue + matches = self.excludePackages(matches) + match = self.ayum._bestPackageFromList(matches) + if not match: + continue + + if po.name in self.ksparser.handler.multilib_whitelist: + msg = "Added multilib package %s.%s (repo: %s) for package %s.%s (method: %s)" % (match.name, match.arch, match.repoid, po.name, po.arch, "multilib-whitelist") + self.add_package(match, msg) + self.completed_multilib.add(match) + added.add(match) + continue + + method = multilib.po_is_multilib(po, self.multilib_methods) + if not method: + continue + msg = "Added multilib package %s.%s (repo: %s) for package %s.%s (method: %s)" % (match.name, match.arch, match.repoid, po.name, po.arch, method) + self.add_package(match, msg) + self.completed_multilib.add(match) + added.add(match) + return added + + def getPackagesFromGroup(self, group): + """Get a list of package names from a ksparser group object + + Returns a list of package names""" + + packages = [] + + # Check if we have the group + if not self.ayum.comps.has_group(group.name): + self.logger.error("Group %s not found in comps!" % group) + return packages + + # Get the group object to work with + groupobj = self.ayum.comps.return_group(group.name) + + # Add the mandatory packages + packages.extend(groupobj.mandatory_packages.keys()) + + # Add the default packages unless we don't want them + if group.include == 1: + packages.extend(groupobj.default_packages.keys()) + + # Add the optional packages if we want them + if group.include == 2: + packages.extend(groupobj.default_packages.keys()) + packages.extend(groupobj.optional_packages.keys()) + + # Deal with conditional packages + # Populate a dict with the name of the required package and value + # of the package objects it would bring in. To be used later if + # we match the conditional. + for condreq, cond in groupobj.conditional_packages.iteritems(): + matches = self.ayum.pkgSack.searchNevra(name=condreq) + if matches: + if self.greedy_method != "all": + # works for both "none" and "build" greedy methods + matches = [self.ayum._bestPackageFromList(matches)] + self.ayum.tsInfo.conditionals.setdefault(cond, []).extend(matches) + + return packages + + def _addDefaultGroups(self, excludeGroups=None): + """Cycle through the groups and return at list of the ones that ara + default.""" + excludeGroups = excludeGroups or [] + + # This is mostly stolen from anaconda. + groups = map(lambda x: x.groupid, + filter(lambda x: x.default, self.ayum.comps.groups)) + + groups = [x for x in groups if x not in excludeGroups] + + self.logger.debug('Add default groups %s' % groups) + return groups + + def get_langpacks(self): + try: + self.langpacks = list(self.ayum.comps.langpacks) + except AttributeError: + # old yum + self.logger.warning("Could not get langpacks via yum.comps. You may need to update yum.") + self.langpacks = [] + except yum.Errors.GroupsError: + # no groups or no comps at all + self.logger.warning("Could not get langpacks due to missing comps in repodata or --ignoregroups=true option.") + self.langpacks = [] + + def getPackageObjects(self): + """Cycle through the list of packages and get package object matches.""" + + searchlist = [] # The list of package names/globs to search for + matchdict = {} # A dict of objects to names + excludeGroups = [] # A list of groups for removal defined in the ks file + + # precompute pkgs and pkg_refs to speed things up + self.all_pkgs = list(set(self.ayum.pkgSack.returnPackages())) + self.all_pkgs = self.excludePackages(self.all_pkgs) + + + lookaside_nvrs = set() + for po in self.all_pkgs: + if po.repoid in self.lookaside_repos: + lookaside_nvrs.add(po.nvra) + for po in self.all_pkgs[:]: + if po.repoid not in self.lookaside_repos and po.nvra in lookaside_nvrs: + self.logger.debug("Removed %s (repo: %s), because it's also in a lookaside repo" % (po, po.repoid)) + self.all_pkgs.remove(po) + + self.pkg_refs = yum.packages.buildPkgRefDict(self.all_pkgs, casematch=True) + + self.get_langpacks() + + # First remove the excludes + self.ayum.excludePackages() + + # Get the groups set for removal + for group in self.ksparser.handler.packages.excludedGroupList: + excludeGroups.append(str(group)[1:]) + + if "core" in [ i.groupid for i in self.ayum.comps.groups ]: + if "core" not in [ i.name for i in self.ksparser.handler.packages.groupList ]: + self.logger.warning("The @core group is no longer added by default; Please add @core to the kickstart if you want it in.") + + if "base" in [ i.groupid for i in self.ayum.comps.groups ]: + if "base" not in [ i.name for i in self.ksparser.handler.packages.groupList ]: + if self.ksparser.handler.packages.addBase: + self.logger.warning("The --nobase kickstart option is no longer supported; Please add @base to the kickstart if you want it in.") + + # Check to see if we want all the defaults + if self.ksparser.handler.packages.default: + for group in self._addDefaultGroups(excludeGroups): + self.ksparser.handler.packages.add(['@%s' % group]) + + # Get a list of packages from groups + comps_package_names = set() + for group in self.ksparser.handler.packages.groupList: + comps_package_names.update(self.getPackagesFromGroup(group)) + searchlist.extend(sorted(comps_package_names)) + + # Add packages + searchlist.extend(self.ksparser.handler.packages.packageList) + input_packages = searchlist[:] + + # Add prepopulate packages + prepopulate_packages = self.ksparser.handler.prepopulate + searchlist.extend(prepopulate_packages) + + # Make the search list unique + searchlist = yum.misc.unique(searchlist) + + for name in searchlist: + pattern = name + multilib = False + if name.endswith(".+"): + name = name[:-2] + multilib = True + + if self.greedy_method == "all" and name == "system-release": + # HACK: handles a special case, when system-release virtual provide is specified in the greedy mode + matches = self.ayum.whatProvides(name, None, None).returnPackages() + else: + exactmatched, matched, unmatched = yum.packages.parsePackages(self.all_pkgs, [name], casematch=1, pkgdict=self.pkg_refs.copy()) + matches = exactmatched + matched + + matches = filter(self._filtersrcdebug, matches) + + if multilib and self.greedy_method != "all": + matches = [ po for po in matches if po.arch in self.valid_multilib_arches ] + + if not matches: + self.logger.warn('Could not find a match for %s in any configured repo' % pattern) + continue + + packages_by_name = {} + for po in matches: + packages_by_name.setdefault(po.name, []).append(po) + + for name, packages in packages_by_name.iteritems(): + packages = self.excludePackages(packages or []) + if not packages: + continue + if self.greedy_method == "all": + packages = yum.packageSack.ListPackageSack(packages).returnNewestByNameArch() + else: + # works for both "none" and "build" greedy methods + packages = [self.ayum._bestPackageFromList(packages)] + + if name in input_packages: + self.input_packages.update(packages) + if name in comps_package_names: + self.comps_packages.update(packages) + + for po in packages: + msg = 'Found %s.%s' % (po.name, po.arch) + self.add_package(po, msg) + name_arch = "%s.%s" % (po.name, po.arch) + if name_arch in prepopulate_packages: + self.prepopulate_packages.add(po) + + if not self.po_list: + raise RuntimeError("No packages found") + + self.logger.info('Finished gathering package objects.') + + def gather(self): + + # get package objects according to the input list + self.getPackageObjects() + if self.is_sources: + self.createSourceHashes() + + pass_num = 0 + added = set() + while 1: + if pass_num > 0 and not added: + break + added = set() + pass_num += 1 + self.logger.info("Pass #%s" % pass_num) + + if self.is_resolve_deps: + # get conditional deps (defined in comps) + for txmbr in self.ayum.tsInfo: + if not txmbr.po in self.po_list: + if not is_package(txmbr.po): + # we don't want sources which can be pulled in, because 'src' arch is part of self.valid_arches + continue + self.add_package(txmbr.po) + + # resolve deps + if self.is_resolve_deps: + for po in sorted(self.po_list): + added.update(self.get_package_deps(po)) + + if self.is_sources: + added_srpms = self.add_srpms() + added.update(added_srpms) + + if self.is_selfhosting: + for srpm_po in sorted(added_srpms): + added.update(self.get_package_deps(srpm_po)) + + if self.is_fulltree: + new = self.add_fulltree() + self.fulltree_packages.update(new) + self.fulltree_packages.update([ self.sourcerpm_srpmpo_map[i.sourcerpm] for i in new ]) + added.update(new) + if added: + continue + + # add langpacks + new = self.add_langpacks(self.po_list) + self.langpack_packages.update(new) + if self.is_sources: + self.langpack_packages.update([ self.sourcerpm_srpmpo_map[i.sourcerpm] for i in new ]) + added.update(new) + if added: + continue + + # add multilib packages + new = self.add_multilib(self.po_list) + self.multilib_packages.update(new) + self.multilib_packages.update([ self.sourcerpm_srpmpo_map[i.sourcerpm] for i in new ]) + added.update(new) + if added: + continue + + def get_srpm_po(self, po): + """Given a package object, get a package object for the corresponding source rpm.""" + + # return srpm_po from cache if available + srpm_po = self.sourcerpm_srpmpo_map.get(po.sourcerpm, None) + if srpm_po is not None: + return srpm_po + + # arch can be "src" or "nosrc" + nvr, arch, _ = po.sourcerpm.rsplit(".", 2) + name, ver, rel = nvr.rsplit('-', 2) + + # ... but even "nosrc" packages are stored as "src" in repodata + srpm_po_list = self.ayum.pkgSack.searchNevra(name=name, ver=ver, rel=rel, arch="src") + if not srpm_po_list: + raise RuntimeError("Cannot find a source rpm for %s" % po.sourcerpm) + srpm_po = srpm_po_list[0] + self.sourcerpm_srpmpo_map[po.sourcerpm] = srpm_po + return srpm_po + + def createSourceHashes(self): + """Create two dicts - one that maps binary POs to source POs, and + one that maps a single source PO to all binary POs it produces. + Requires yum still configured.""" + self.src_by_bin = {} + self.bin_by_src = {} + self.logger.info("Generating source <-> binary package mappings") + #(dummy1, everything, dummy2) = yum.packages.parsePackages(self.all_pkgs, ['*'], pkgdict=self.pkg_refs.copy()) + failed = [] + for po in self.all_pkgs: + if is_source(po): + continue + try: + srpmpo = self.get_srpm_po(po) + except RuntimeError: + failed.append(po.sourcerpm) + continue + + self.src_by_bin[po] = srpmpo + if self.bin_by_src.has_key(srpmpo): + self.bin_by_src[srpmpo].append(po) + else: + self.bin_by_src[srpmpo] = [po] + + if failed: + self.logger.info("The following srpms could not be found: %s" % ( + pprint.pformat(list(sorted(failed))))) + self.logger.info("Couldn't find %i of %i srpms." % ( + len(failed), len(self.src_by_bin))) + raise RuntimeError("Could not find all srpms.") + + def add_srpms(self, po_list=None): + """Cycle through the list of package objects and + find the sourcerpm for them. Requires yum still + configured and a list of package objects""" + + srpms = set() + po_list = po_list or self.po_list + for po in sorted(po_list): + srpm_po = self.sourcerpm_srpmpo_map[po.sourcerpm] + if srpm_po in self.completed_add_srpms: + continue + msg = "Added source package %s.%s (repo: %s)" % (srpm_po.name, srpm_po.arch, srpm_po.repoid) + self.add_source(srpm_po, msg) + + # flags + if po in self.input_packages: + self.input_packages.add(srpm_po) + if po in self.fulltree_packages: + self.fulltree_packages.add(srpm_po) + if po in self.langpack_packages: + self.langpack_packages.add(srpm_po) + if po in self.multilib_packages: + self.multilib_packages.add(srpm_po) + + self.completed_add_srpms.add(srpm_po) + srpms.add(srpm_po) + return srpms + + def add_fulltree(self, srpm_po_list=None): + """Cycle through all package objects, and add any + that correspond to a source rpm that we are including. + Requires yum still configured and a list of package + objects.""" + + self.logger.info("Completing package set") + + srpm_po_list = srpm_po_list or self.srpm_po_list + srpms = [] + for srpm_po in srpm_po_list: + if srpm_po in self.completed_fulltree: + continue + if srpm_po.name not in self.fulltree_excludes: + srpms.append(srpm_po) + self.completed_fulltree.add(srpm_po) + + added = set() + for srpm_po in srpms: + include_native = False + include_multilib = False + has_native = False + has_multilib = False + + for po in self.excludePackages(self.bin_by_src[srpm_po]): + if not is_package(po): + continue + if po.arch == "noarch": + continue + if po not in self.po_list: + # process only already included packages + if po.arch in self.valid_multilib_arches: + has_multilib = True + elif po.arch in self.valid_native_arches: + has_native = True + continue + if po.arch in self.valid_multilib_arches and self.greedy_method == "all": + include_multilib = True + elif po.arch in self.valid_native_arches: + include_native = True + + # XXX: this is very fragile! + # Do not make any changes unless you really know what you're doing! + if not include_native: + # if there's no native package already pulled in... + if has_native and not include_multilib: + # include all native packages, but only if we're not pulling multilib already + # SCENARIO: a noarch package was already pulled in and there are x86_64 and i686 packages -> we want x86_64 in to complete the package set + include_native = True + elif has_multilib: + # SCENARIO: a noarch package was already pulled in and there are no x86_64 packages; we want i686 in to complete the package set + include_multilib = True + + for po in self.excludePackages(self.bin_by_src[srpm_po]): + if not is_package(po): + continue + if po in self.po_list: + continue + if po.arch != "noarch": + if po.arch in self.valid_multilib_arches: + if not include_multilib: + continue + if po.arch in self.valid_native_arches: + if not include_native: + continue + msg = "Added %s.%s (repo: %s) to complete package set" % (po.name, po.arch, po.repoid) + self.add_package(po, msg) + return added + + def getDebuginfoList(self): + """Cycle through the list of package objects and find + debuginfo rpms for them. Requires yum still + configured and a list of package objects""" + + added = set() + for po in self.all_pkgs: + if not is_debug(po): + continue + + if po.sourcerpm not in self.sourcerpm_arch_map: + # TODO: print a warning / throw an error + continue + if not (set(self.compatible_arches[po.arch]) & set(self.sourcerpm_arch_map[po.sourcerpm]) - set(["noarch"])): + # skip all incompatible arches + # this pulls i386 debuginfo for a i686 package for example + continue + msg = 'Added debuginfo %s.%s (repo: %s)' % (po.name, po.arch, po.repoid) + self.add_debuginfo(po, msg) + + # flags + srpm_po = self.sourcerpm_srpmpo_map[po.sourcerpm] + if srpm_po in self.input_packages: + self.input_packages.add(po) + if srpm_po in self.fulltree_packages: + self.fulltree_packages.add(po) + if srpm_po in self.langpack_packages: + self.langpack_packages.add(po) + if srpm_po in self.multilib_packages: + self.multilib_packages.add(po) + + added.add(po) + return added + + def _downloadPackageList(self, polist, relpkgdir): + """Cycle through the list of package objects and + download them from their respective repos.""" + + downloads = [] + for pkg in polist: + downloads.append('%s.%s' % (pkg.name, pkg.arch)) + downloads.sort() + self.logger.info("Download list: %s" % downloads) + + pkgdir = os.path.join(self.config.get('pungi', 'destdir'), + self.config.get('pungi', 'version'), + self.config.get('pungi', 'flavor'), + relpkgdir) + + # Ensure the pkgdir exists, force if requested, and make sure we clean it out + if relpkgdir.endswith('SRPMS'): + # Since we share source dirs with other arches don't clean, but do allow us to use it + pungi.util._ensuredir(pkgdir, self.logger, force=True, clean=False) + else: + pungi.util._ensuredir(pkgdir, self.logger, force=self.config.getboolean('pungi', 'force'), clean=True) + + probs = self.ayum.downloadPkgs(polist) + + if len(probs.keys()) > 0: + self.logger.error("Errors were encountered while downloading packages.") + for key in probs.keys(): + errors = yum.misc.unique(probs[key]) + for error in errors: + self.logger.error("%s: %s" % (key, error)) + sys.exit(1) + + for po in polist: + basename = os.path.basename(po.relativepath) + + local = po.localPkg() + if self.config.getboolean('pungi', 'nohash'): + target = os.path.join(pkgdir, basename) + else: + target = os.path.join(pkgdir, po.name[0].lower(), basename) + # Make sure we have the hashed dir available to link into we only want dirs there to corrospond to packages + # that we are including so we can not just do A-Z 0-9 + pungi.util._ensuredir(os.path.join(pkgdir, po.name[0].lower()), self.logger, force=True, clean=False) + + # Link downloaded package in (or link package from file repo) + try: + pungi.util._link(local, target, self.logger, force=True) + continue + except: + self.logger.error("Unable to link %s from the yum cache." % po.name) + sys.exit(1) + + self.logger.info('Finished downloading packages.') + + @yumlocked + def downloadPackages(self): + """Download the package objects obtained in getPackageObjects().""" + + self._downloadPackageList(self.po_list, + os.path.join(self.tree_arch, + self.config.get('pungi', 'osdir'), + self.config.get('pungi', 'product_path'))) + + def makeCompsFile(self): + """Gather any comps files we can from repos and merge them into one.""" + + ourcompspath = os.path.join(self.workdir, '%s-%s-comps.xml' % (self.config.get('pungi', 'name'), self.config.get('pungi', 'version'))) + + # Filter out things we don't include + ourgroups = [] + for item in self.ksparser.handler.packages.groupList: + g = self.ayum.comps.return_group(item.name) + if g: + ourgroups.append(g.groupid) + allgroups = [g.groupid for g in self.ayum.comps.get_groups()] + for group in allgroups: + if group not in ourgroups and not self.ayum.comps.return_group(group).langonly: + self.logger.info('Removing extra group %s from comps file' % (group,)) + del self.ayum.comps._groups[group] + + groups = [g.groupid for g in self.ayum.comps.get_groups()] + envs = self.ayum.comps.get_environments() + for env in envs: + for group in env.groups: + if group not in groups: + self.logger.info('Removing incomplete environment %s from comps file' % (env,)) + del self.ayum.comps._environments[env.environmentid] + break + + ourcomps = open(ourcompspath, 'w') + ourcomps.write(self.ayum.comps.xml()) + ourcomps.close() + + # Disable this until https://bugzilla.redhat.com/show_bug.cgi?id=442097 is fixed. + # Run the xslt filter over our comps file + #compsfilter = ['/usr/bin/xsltproc', '--novalid'] + #compsfilter.append('-o') + #compsfilter.append(ourcompspath) + #compsfilter.append('/usr/share/pungi/comps-cleanup.xsl') + #compsfilter.append(ourcompspath) + + #pungi.util._doRunCommand(compsfilter, self.logger) + + @yumlocked + def downloadSRPMs(self): + """Cycle through the list of srpms and + find the package objects for them, Then download them.""" + + # do the downloads + self._downloadPackageList(self.srpm_po_list, os.path.join('source', 'SRPMS')) + + @yumlocked + def downloadDebuginfo(self): + """Cycle through the list of debuginfo rpms and + download them.""" + + # do the downloads + self._downloadPackageList(self.debuginfo_po_list, os.path.join(self.tree_arch, 'debug')) + + def _list_packages(self, po_list): + """Cycle through the list of packages and return their paths.""" + result = [] + for po in po_list: + if po.repoid in self.lookaside_repos: + continue + + flags = [] + + # input + if po in self.input_packages: + flags.append("input") + + # comps + if po in self.comps_packages: + flags.append("comps") + + # prepopulate + if po in self.prepopulate_packages: + flags.append("prepopulate") + + # langpack + if po in self.langpack_packages: + flags.append("langpack") + + # multilib + if po in self.multilib_packages: + flags.append("multilib") + + # fulltree + if po in self.fulltree_packages: + flags.append("fulltree") + + # fulltree-exclude + if is_source(po): + srpm_name = po.name + else: + srpm_name = po.sourcerpm.rsplit("-", 2)[0] + if srpm_name in self.fulltree_excludes: + flags.append("fulltree-exclude") + + result.append({ + "path": os.path.join(po.basepath or "", po.relativepath), + "flags": sorted(flags), + }) + result.sort(lambda x, y: cmp(x["path"], y["path"])) + return result + + def list_packages(self): + """Cycle through the list of RPMs and return their paths.""" + return self._list_packages(self.po_list) + + def list_srpms(self): + """Cycle through the list of SRPMs and return their paths.""" + return self._list_packages(self.srpm_po_list) + + def list_debuginfo(self): + """Cycle through the list of DEBUGINFO RPMs and return their paths.""" + return self._list_packages(self.debuginfo_po_list) + + def _size_packages(self, po_list): + return sum([ po.size for po in po_list if po.repoid not in self.lookaside_repos ]) + + def size_packages(self): + return self._size_packages(self.po_list) + + def size_srpms(self): + return self._size_packages(self.srpm_po_list) + + def size_debuginfo(self): + return self._size_packages(self.debuginfo_po_list) + + def writeinfo(self, line): + """Append a line to the infofile in self.infofile""" + + + f=open(self.infofile, "a+") + f.write(line.strip() + "\n") + f.close() + + def mkrelative(self, subfile): + """Return the relative path for 'subfile' underneath the version dir.""" + + basedir = os.path.join(self.destdir, self.config.get('pungi', 'version')) + if subfile.startswith(basedir): + return subfile.replace(basedir + os.path.sep, '') + + def _makeMetadata(self, path, cachedir, comps=False, repoview=False, repoviewtitle=False, + baseurl=False, output=False, basedir=False, update=True, + compress_type=None): + """Create repodata and repoview.""" + + conf = createrepo.MetaDataConfig() + conf.cachedir = os.path.join(cachedir, 'createrepocache') + conf.update = update + conf.unique_md_filenames = True + if output: + conf.outputdir = output + else: + conf.outputdir = path + conf.directory = path + conf.database = True + if comps: + conf.groupfile = comps + if basedir: + conf.basedir = basedir + if baseurl: + conf.baseurl = baseurl + if compress_type: + conf.compress_type = compress_type + repomatic = createrepo.MetaDataGenerator(conf) + self.logger.info('Making repodata') + repomatic.doPkgMetadata() + repomatic.doRepoMetadata() + repomatic.doFinalMove() + + if repoview: + # setup the repoview call + repoview = ['/usr/bin/repoview'] + repoview.append('--quiet') + + repoview.append('--state-dir') + repoview.append(os.path.join(cachedir, 'repoviewcache')) + + if repoviewtitle: + repoview.append('--title') + repoview.append(repoviewtitle) + + repoview.append(path) + + # run the command + pungi.util._doRunCommand(repoview, self.logger) + + def doCreaterepo(self, comps=True): + """Run createrepo to generate repodata in the tree.""" + + + compsfile = None + if comps: + compsfile = os.path.join(self.workdir, '%s-%s-comps.xml' % (self.config.get('pungi', 'name'), self.config.get('pungi', 'version'))) + + # setup the cache dirs + for target in ['createrepocache', 'repoviewcache']: + pungi.util._ensuredir(os.path.join(self.config.get('pungi', 'cachedir'), + target), + self.logger, + force=True) + + repoviewtitle = '%s %s - %s' % (self.config.get('pungi', 'name'), + self.config.get('pungi', 'version'), + self.tree_arch) + + cachedir = self.config.get('pungi', 'cachedir') + compress_type = self.config.get('pungi', 'compress_type') + + # setup the createrepo call + self._makeMetadata(self.topdir, cachedir, compsfile, + repoview=True, repoviewtitle=repoviewtitle, + compress_type=compress_type) + + # create repodata for debuginfo + if self.config.getboolean('pungi', 'debuginfo'): + path = os.path.join(self.archdir, 'debug') + if not os.path.isdir(path): + self.logger.debug("No debuginfo for %s" % self.tree_arch) + return + self._makeMetadata(path, cachedir, repoview=False, + compress_type=compress_type) + + def _shortenVolID(self): + """shorten the volume id to make sure its under 32 characters""" + + substitutions = {'Workstation': 'WS', + 'Server': 'S', + 'Cloud': 'C', + 'Alpha': 'A', + 'Beta': 'B', + 'TC': 'T'} + name = self.config.get('pungi', 'name') + version = self.config.get('pungi', 'version') + arch = self.tree_arch + + for k, v in substitutions.iteritems(): + if k in name: + name = name.replace(k, v) + if k in version: + version = version.replace(k, v) + volid = "%s-%s-%s" % (name, version, arch) + if len(volid) > 32: + raise RuntimeError("Volume ID %s is longer than 32 characters" % volid) + else: + return volid + + def doBuildinstall(self): + """Run lorax on the tree.""" + + # the old ayum object has transaction data that confuse lorax, reinit. + self._inityum() + + # Add the repo in the destdir to our yum object + self._add_yum_repo('ourtree', + 'file://%s' % self.topdir, + cost=10) + + product = self.config.get('pungi', 'name') + version = self.config.get('pungi', 'version') + release = '%s %s' % (self.config.get('pungi', 'name'), self.config.get('pungi', 'version')) + + variant = self.config.get('pungi', 'flavor') + bugurl = self.config.get('pungi', 'bugurl') + isfinal = self.config.get('pungi', 'isfinal') + + volid = self._shortenVolID() + workdir = self.workdir + outputdir = self.topdir + + # on ppc64 we need to tell lorax to only use ppc64 packages so that the media will run on all 64 bit ppc boxes + if self.tree_arch == 'ppc64': + self.ayum.arch.setup_arch('ppc64') + self.ayum.compatarch = 'ppc64' + elif self.tree_arch == 'ppc64le': + self.ayum.arch.setup_arch('ppc64le') + self.ayum.compatarch = 'ppc64le' + + # Only supported mac hardware is x86 make sure we only enable mac support on arches that need it + if self.tree_arch in ['x86_64']: + if self.config.getboolean('pungi','nomacboot'): + domacboot = False + else: + domacboot = True + else: + domacboot = False + + # run the command + lorax = pylorax.Lorax() + try: + conf_file = self.config.get('lorax', 'conf_file') + lorax.configure(conf_file=conf_file) + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + lorax.configure() + + try: + installpkgs = self.config.get('lorax', 'installpkgs').split(" ") + except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): + installpkgs = None + + lorax.run(self.ayum, product=product, version=version, release=release, + variant=variant, bugurl=bugurl, isfinal=isfinal, domacboot=domacboot, + workdir=workdir, outputdir=outputdir, volid=volid, installpkgs=installpkgs) + + # write out the tree data for snake + self.writeinfo('tree: %s' % self.mkrelative(self.topdir)) + + # Write out checksums for verifytree + # First open the treeinfo file so that we can config parse it + treeinfofile = os.path.join(self.topdir, '.treeinfo') + + try: + treefile = open(treeinfofile, 'r') + except IOError: + self.logger.error("Could not read .treeinfo file: %s" % treefile) + sys.exit(1) + + # Create a ConfigParser object out of the contents so that we can + # write it back out later and not worry about formatting + treeinfo = MyConfigParser() + treeinfo.readfp(treefile) + treefile.close() + treeinfo.add_section('checksums') + + # Create a function to use with os.path.walk to sum the files + # basepath is used to make the sum output relative + sums = [] + def getsum(basepath, dir, files): + for file in files: + path = os.path.join(dir, file) + # don't bother summing directories. Won't work. + if os.path.isdir(path): + continue + sum = pungi.util._doCheckSum(path, 'sha256', self.logger) + outpath = path.replace(basepath, '') + sums.append((outpath, sum)) + + # Walk the os/images path to get sums of all the files + os.path.walk(os.path.join(self.topdir, 'images'), getsum, self.topdir + '/') + + # Capture PPC images + if self.tree_arch in ['ppc', 'ppc64', 'ppc64le']: + os.path.walk(os.path.join(self.topdir, 'ppc'), getsum, self.topdir + '/') + + # Get a checksum of repomd.xml since it has within it sums for other files + repomd = os.path.join(self.topdir, 'repodata', 'repomd.xml') + sum = pungi.util._doCheckSum(repomd, 'sha256', self.logger) + sums.append((os.path.join('repodata', 'repomd.xml'), sum)) + + # Now add the sums, and write the config out + try: + treefile = open(treeinfofile, 'w') + except IOError: + self.logger.error("Could not open .treeinfo for writing: %s" % treefile) + sys.exit(1) + + for path, sum in sums: + treeinfo.set('checksums', path, sum) + + treeinfo.write(treefile) + treefile.close() + + def doGetRelnotes(self): + """Get extra files from packages in the tree to put in the topdir of + the tree.""" + + + docsdir = os.path.join(self.workdir, 'docs') + relnoterpms = self.config.get('pungi', 'relnotepkgs').split() + + fileres = [] + for pattern in self.config.get('pungi', 'relnotefilere').split(): + fileres.append(re.compile(pattern)) + + dirres = [] + for pattern in self.config.get('pungi', 'relnotedirre').split(): + dirres.append(re.compile(pattern)) + + pungi.util._ensuredir(docsdir, self.logger, force=self.config.getboolean('pungi', 'force'), clean=True) + + # Expload the packages we list as relnote packages + pkgs = os.listdir(os.path.join(self.topdir, self.config.get('pungi', 'product_path'))) + + rpm2cpio = ['/usr/bin/rpm2cpio'] + cpio = ['cpio', '-imud'] + + for pkg in pkgs: + pkgname = pkg.rsplit('-', 2)[0] + for relnoterpm in relnoterpms: + if pkgname == relnoterpm: + extraargs = [os.path.join(self.topdir, self.config.get('pungi', 'product_path'), pkg)] + try: + p1 = subprocess.Popen(rpm2cpio + extraargs, cwd=docsdir, stdout=subprocess.PIPE) + (out, err) = subprocess.Popen(cpio, cwd=docsdir, stdin=p1.stdout, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True).communicate() + except: + self.logger.error("Got an error from rpm2cpio") + self.logger.error(err) + raise + + if out: + self.logger.debug(out) + + # Walk the tree for our files + for dirpath, dirname, filelist in os.walk(docsdir): + for filename in filelist: + for regex in fileres: + if regex.match(filename) and not os.path.exists(os.path.join(self.topdir, filename)): + self.logger.info("Linking release note file %s" % filename) + pungi.util._link(os.path.join(dirpath, filename), + os.path.join(self.topdir, filename), + self.logger, + force=self.config.getboolean('pungi', + 'force')) + self.common_files.append(filename) + + # Walk the tree for our dirs + for dirpath, dirname, filelist in os.walk(docsdir): + for directory in dirname: + for regex in dirres: + if regex.match(directory) and not os.path.exists(os.path.join(self.topdir, directory)): + self.logger.info("Copying release note dir %s" % directory) + shutil.copytree(os.path.join(dirpath, directory), os.path.join(self.topdir, directory)) + + def _doIsoChecksum(self, path, csumfile): + """Simple function to wrap creating checksums of iso files.""" + + try: + checkfile = open(csumfile, 'a') + except IOError: + self.logger.error("Could not open checksum file: %s" % csumfile) + + self.logger.info("Generating checksum of %s" % path) + checksum = pungi.util._doCheckSum(path, 'sha256', self.logger) + if checksum: + checkfile.write("%s *%s\n" % (checksum.replace('sha256:', ''), os.path.basename(path))) + else: + self.logger.error('Failed to generate checksum for %s' % checkfile) + sys.exit(1) + checkfile.close() + + def doCreateIsos(self): + """Create iso of the tree.""" + + if self.tree_arch.startswith('arm'): + self.logger.info("ARCH: arm, not doing doCreateIsos().") + return + + isolist = [] + ppcbootinfo = '/usr/share/lorax/config_files/ppc' + + pungi.util._ensuredir(self.isodir, self.logger, + force=self.config.getboolean('pungi', 'force'), + clean=True) # This is risky... + + # setup the base command + mkisofs = ['/usr/bin/mkisofs'] + mkisofs.extend(['-v', '-U', '-J', '-R', '-T', '-m', 'repoview', '-m', 'boot.iso']) # common mkisofs flags + + x86bootargs = ['-b', 'isolinux/isolinux.bin', '-c', 'isolinux/boot.cat', + '-no-emul-boot', '-boot-load-size', '4', '-boot-info-table'] + + efibootargs = ['-eltorito-alt-boot', '-e', 'images/efiboot.img', + '-no-emul-boot'] + + macbootargs = ['-eltorito-alt-boot', '-e', 'images/macboot.img', + '-no-emul-boot'] + + ia64bootargs = ['-b', 'images/boot.img', '-no-emul-boot'] + + ppcbootargs = ['-part', '-hfs', '-r', '-l', '-sysid', 'PPC', '-no-desktop', '-allow-multidot', '-chrp-boot'] + + ppcbootargs.append('-map') + ppcbootargs.append(os.path.join(ppcbootinfo, 'mapping')) + + ppcbootargs.append('-hfs-bless') # must be last + + isohybrid = ['/usr/bin/isohybrid'] + + # Check the size of the tree + # This size checking method may be bunk, accepting patches... + if not self.tree_arch == 'source': + treesize = int(subprocess.Popen(mkisofs + ['-print-size', '-quiet', self.topdir], stdout=subprocess.PIPE).communicate()[0]) + else: + srcdir = os.path.join(self.config.get('pungi', 'destdir'), self.config.get('pungi', 'version'), + self.config.get('pungi', 'flavor'), 'source', 'SRPMS') + + treesize = int(subprocess.Popen(mkisofs + ['-print-size', '-quiet', srcdir], stdout=subprocess.PIPE).communicate()[0]) + # Size returned is 2KiB clusters or some such. This translates that to MiB. + treesize = treesize * 2048 / 1024 / 1024 + + if treesize > 700: # we're larger than a 700meg CD + isoname = '%s-DVD-%s-%s.iso' % (self.config.get('pungi', 'iso_basename'), self.tree_arch, + self.config.get('pungi', 'version')) + else: + isoname = '%s-%s-%s.iso' % (self.config.get('pungi', 'iso_basename'), self.tree_arch, + self.config.get('pungi', 'version')) + + isofile = os.path.join(self.isodir, isoname) + + # setup the extra mkisofs args + extraargs = [] + + if self.tree_arch == 'i386' or self.tree_arch == 'x86_64': + extraargs.extend(x86bootargs) + if self.tree_arch == 'x86_64': + extraargs.extend(efibootargs) + isohybrid.append('-u') + if os.path.exists(os.path.join(self.topdir, 'images', 'macboot.img')): + extraargs.extend(macbootargs) + isohybrid.append('-m') + elif self.tree_arch == 'ia64': + extraargs.extend(ia64bootargs) + elif self.tree_arch.startswith('ppc'): + extraargs.extend(ppcbootargs) + extraargs.append(os.path.join(self.topdir, "ppc/mac")) + elif self.tree_arch.startswith('aarch64'): + extraargs.extend(efibootargs) + + # NOTE: if this doesn't match what's in the bootloader config, the + # image won't be bootable! + extraargs.append('-V') + extraargs.append(self._shortenVolID()) + + extraargs.extend(['-o', isofile]) + + isohybrid.append(isofile) + + if not self.tree_arch == 'source': + extraargs.append(self.topdir) + else: + extraargs.append(os.path.join(self.archdir, 'SRPMS')) + + if self.config.get('pungi', 'no_dvd') == "False": + # run the command + pungi.util._doRunCommand(mkisofs + extraargs, self.logger) + + # Run isohybrid on the iso as long as its not the source iso + if os.path.exists("/usr/bin/isohybrid") and not self.tree_arch == 'source': + pungi.util._doRunCommand(isohybrid, self.logger) + + # implant md5 for mediacheck on all but source arches + if not self.tree_arch == 'source': + pungi.util._doRunCommand(['/usr/bin/implantisomd5', isofile], self.logger) + + # shove the checksum into a file + csumfile = os.path.join(self.isodir, '%s-%s-%s-CHECKSUM' % ( + self.config.get('pungi', 'iso_basename'), + self.config.get('pungi', 'version'), + self.tree_arch)) + # Write a line about what checksums are used. + # sha256sum is magic... + file = open(csumfile, 'w') + file.write('# The image checksum(s) are generated with sha256sum.\n') + file.close() + if self.config.get('pungi', 'no_dvd') == "False": + self._doIsoChecksum(isofile, csumfile) + + # Write out a line describing the media + self.writeinfo('media: %s' % self.mkrelative(isofile)) + + # Now link the boot iso + if not self.tree_arch == 'source' and \ + os.path.exists(os.path.join(self.topdir, 'images', 'boot.iso')): + isoname = '%s-netinst-%s-%s.iso' % (self.config.get('pungi', 'iso_basename'), + self.tree_arch, self.config.get('pungi', 'version')) + isofile = os.path.join(self.isodir, isoname) + + # link the boot iso to the iso dir + pungi.util._link(os.path.join(self.topdir, 'images', 'boot.iso'), isofile, self.logger) + + # shove the checksum into a file + self._doIsoChecksum(isofile, csumfile) + + self.logger.info("CreateIsos is done.") diff --git a/pungi/ks.py b/pungi/ks.py new file mode 100644 index 0000000..53f7520 --- /dev/null +++ b/pungi/ks.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- + + +""" +Pungi adds several new sections to kickstarts. + + +FULLTREE EXCLUDES +----------------- +Fulltree excludes allow us to define SRPM names +we don't want to be part of fulltree processing. + +Syntax: +%fulltree-excludes + + +... +%end + + +MULTILIB BLACKLIST +------------------ +List of RPMs which are prevented from becoming multilib. + +Syntax: +%multilib-blacklist + + +... +%end + + +MULTILIB WHITELIST +------------------ +List of RPMs which will become multilib (but only if native package is pulled in). + +Syntax: +%multilib-whitelist + + +... +%end + + +PREPOPULATE +----------- +To make sure no package is left behind between 2 composes, +we can explicitly add . records to the %prepopulate section. +These will be added to the input list and marked with 'prepopulate' flag. + +Syntax: +%prepopulate +. +. +... +%end +""" + + +import pykickstart.parser +import pykickstart.sections + + +class FulltreeExcludesSection(pykickstart.sections.Section): + sectionOpen = "%fulltree-excludes" + + def handleLine(self, line): + if not self.handler: + return + + (h, s, t) = line.partition('#') + line = h.rstrip() + + self.handler.fulltree_excludes.add(line) + + +class MultilibBlacklistSection(pykickstart.sections.Section): + sectionOpen = "%multilib-blacklist" + + def handleLine(self, line): + if not self.handler: + return + + (h, s, t) = line.partition('#') + line = h.rstrip() + + self.handler.multilib_blacklist.add(line) + + +class MultilibWhitelistSection(pykickstart.sections.Section): + sectionOpen = "%multilib-whitelist" + + def handleLine(self, line): + if not self.handler: + return + + (h, s, t) = line.partition('#') + line = h.rstrip() + + self.handler.multilib_whitelist.add(line) + + +class PrepopulateSection(pykickstart.sections.Section): + sectionOpen = "%prepopulate" + + def handleLine(self, line): + if not self.handler: + return + + (h, s, t) = line.partition('#') + line = h.rstrip() + + self.handler.prepopulate.add(line) + + +class KickstartParser(pykickstart.parser.KickstartParser): + def setupSections(self): + pykickstart.parser.KickstartParser.setupSections(self) + self.registerSection(FulltreeExcludesSection(self.handler)) + self.registerSection(MultilibBlacklistSection(self.handler)) + self.registerSection(MultilibWhitelistSection(self.handler)) + self.registerSection(PrepopulateSection(self.handler)) + + +HandlerClass = pykickstart.version.returnClassForVersion() +class PungiHandler(HandlerClass): + def __init__(self, *args, **kwargs): + HandlerClass.__init__(self, *args, **kwargs) + self.fulltree_excludes = set() + self.multilib_blacklist = set() + self.multilib_whitelist = set() + self.prepopulate = set() + + +def get_ksparser(ks_path=None): + """ + Return a kickstart parser instance. + Read kickstart if ks_path provided. + """ + ksparser = KickstartParser(PungiHandler()) + if ks_path: + ksparser.readKickstart(ks_path) + return ksparser diff --git a/pungi/multilib.py b/pungi/multilib.py new file mode 100755 index 0000000..a3f5e90 --- /dev/null +++ b/pungi/multilib.py @@ -0,0 +1,396 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +import re +import fnmatch +import pathmatch + +import pungi.gather + + + +LINE_PATTERN_RE = re.compile(r"^\s*(?P[^#]+)(:?\s+(?P#.*))?$") +RUNTIME_PATTERN_SPLIT_RE = re.compile(r"^\s*(?P[^\s]+)\s+(?P[^\s]+)(:?\s+(?P#.*))?$") +SONAME_PATTERN_RE = re.compile(r"^(.+\.so\.[a-zA-Z0-9_\.]+).*$") + + +def read_lines(lines): + result = [] + for i in lines: + i = i.strip() + + if not i: + continue + + # skip comments + if i.startswith("#"): + continue + + match = LINE_PATTERN_RE.match(i) + if match is None: + raise ValueError("Couldn't parse line: %s" % i) + gd = match.groupdict() + result.append(gd["line"]) + return result + + +def read_lines_from_file(path): + lines = open(path, "r").readlines() + lines = read_lines(lines) + return lines + + +def read_runtime_patterns(lines): + result = [] + for i in read_lines(lines): + match = RUNTIME_PATTERN_SPLIT_RE.match(i) + if match is None: + raise ValueError("Couldn't parse pattern: %s" % i) + gd = match.groupdict() + result.append((gd["path"], gd["pattern"])) + return result + + +def read_runtime_patterns_from_file(path): + lines = open(path, "r").readlines() + return read_runtime_patterns(lines) + + +def expand_runtime_patterns(patterns): + pm = pathmatch.PathMatch() + result = [] + for path, pattern in patterns: + for root in ("", "/opt/*/*/root"): + # include Software Collections: /opt///root/... + if "$LIBDIR" in path: + for lib_dir in ("/lib", "/lib64", "/usr/lib", "/usr/lib64"): + path_pattern = path.replace("$LIBDIR", lib_dir) + path_pattern = "%s/%s" % (root, path_pattern.lstrip("/")) + pm[path_pattern] = (path_pattern, pattern) + else: + path_pattern = "%s/%s" % (root, path.lstrip("/")) + pm[path_pattern] = (path_pattern, pattern) + return pm + + +class MultilibMethodBase(object): + """a base class for multilib methods""" + name = "base" + + def select(self, po): + raise NotImplementedError + + def skip(self, po): + if pungi.gather.is_noarch(po) or pungi.gather.is_source(po) or pungi.gather.is_debug(po): + return True + return False + + def is_kernel(self, po): + for p_name, p_flag, (p_e, p_v, p_r) in po.provides: + if p_name == "kernel": + return True + return False + + def is_kernel_devel(self, po): + for p_name, p_flag, (p_e, p_v, p_r) in po.provides: + if p_name == "kernel-devel": + return True + return False + + def is_kernel_or_kernel_devel(self, po): + for p_name, p_flag, (p_e, p_v, p_r) in po.provides: + if p_name in ("kernel", "kernel-devel"): + return True + return False + + +class NoneMultilibMethod(MultilibMethodBase): + """multilib disabled""" + name = "none" + + def select(self, po): + return False + + +class AllMultilibMethod(MultilibMethodBase): + """all packages are multilib""" + name = "all" + + def select(self, po): + if self.skip(po): + return False + return True + + +class RuntimeMultilibMethod(MultilibMethodBase): + """pre-defined paths to libs""" + name = "runtime" + + def __init__(self, **kwargs): + self.blacklist = read_lines_from_file("/usr/share/pungi/multilib/runtime-blacklist.conf") + self.whitelist = read_lines_from_file("/usr/share/pungi/multilib/runtime-whitelist.conf") + self.patterns = expand_runtime_patterns(read_runtime_patterns_from_file("/usr/share/pungi/multilib/runtime-patterns.conf")) + + def select(self, po): + if self.skip(po): + return False + if po.name in self.blacklist: + return False + if po.name in self.whitelist: + return True + if self.is_kernel(po): + return False + + # gather all *.so.* provides from the RPM header + provides = set() + for i in po.provides: + match = SONAME_PATTERN_RE.match(i[0]) + if match is not None: + provides.add(match.group(1)) + + for path in po.returnFileEntries() + po.returnFileEntries("ghost"): + dirname, filename = path.rsplit("/", 1) + dirname = dirname.rstrip("/") + + patterns = self.patterns[dirname] + if not patterns: + continue + for dir_pattern, file_pattern in patterns: + if file_pattern == "-": + return True + if fnmatch.fnmatch(filename, file_pattern): + if ".so.*" in file_pattern: + if filename in provides: + # return only if the lib is provided in RPM header + # (some libs may be private, hence not exposed in Provides) + return True + else: + return True + return False + + +class FileMultilibMethod(MultilibMethodBase): + """explicitely defined whitelist and blacklist""" + def __init__(self, **kwargs): + self.name = "file" + whitelist = kwargs.pop("whitelist", None) + blacklist = kwargs.pop("blacklist", None) + self.whitelist = self.read_file(whitelist) + self.blacklist = self.read_file(blacklist) + + @staticmethod + def read_file(path): + if not path: + return [] + result = [ i.strip() for i in open(path, "r") if not i.strip().startswith("#") ] + return result + + def select(self, po): + for pattern in self.blacklist: + if fnmatch.fnmatch(po.name, pattern): + return False + for pattern in self.whitelist: + if fnmatch.fnmatch(po.name, pattern): + return False + return False + + +class KernelMultilibMethod(MultilibMethodBase): + """kernel and kernel-devel""" + def __init__(self, **kwargs): + self.name = "kernel" + + def select(self, po): + if self.is_kernel_or_kernel_devel(po): + return True + return False + + +class YabootMultilibMethod(MultilibMethodBase): + """yaboot on ppc""" + def __init__(self, **kwargs): + self.name = "yaboot" + + def select(self, po): + if po.arch in ["ppc"]: + if po.name.startswith("yaboot"): + return True + return False + + +class DevelMultilibMethod(MultilibMethodBase): + """all -devel and -static packages""" + name = "devel" + + def __init__(self, **kwargs): + self.blacklist = read_lines_from_file("/usr/share/pungi/multilib/devel-blacklist.conf") + self.whitelist = read_lines_from_file("/usr/share/pungi/multilib/devel-whitelist.conf") + + def select(self, po): + if self.skip(po): + return False + if po.name in self.blacklist: + return False + if po.name in self.whitelist: + return True + if self.is_kernel_devel(po): + return False + # HACK: exclude ghc* + if po.name.startswith("ghc-"): + return False + if po.name.endswith("-devel"): + return True + if po.name.endswith("-static"): + return True + for p_name, p_flag, (p_e, p_v, p_r) in po.provides: + if p_name.endswith("-devel"): + return True + if p_name.endswith("-static"): + return True + return False + + +DEFAULT_METHODS = ["devel", "runtime"] +METHOD_MAP = {} +for cls in (AllMultilibMethod, DevelMultilibMethod, FileMultilibMethod, KernelMultilibMethod, NoneMultilibMethod, RuntimeMultilibMethod, YabootMultilibMethod): + method = cls() + METHOD_MAP[method.name] = method + + +def po_is_multilib(po, methods): + for method_name in methods: + if not method_name: + continue + method = METHOD_MAP[method_name] + if method.select(po): + return method_name + return None + + +def do_multilib(yum_arch, methods, repos, tmpdir, logfile): + import os + import yum + import rpm + import logging + + archlist = yum.rpmUtils.arch.getArchList(yum_arch) + + yumbase = yum.YumBase() + yumbase.preconf.init_plugins = False + yumbase.preconf.root = tmpdir + # order matters! + # must run doConfigSetup() before touching yumbase.conf + yumbase.doConfigSetup(fn="/dev/null") + yumbase.conf.cache = False + yumbase.conf.cachedir = tmpdir + yumbase.conf.exactarch = True + yumbase.conf.gpgcheck = False + yumbase.conf.logfile = logfile + yumbase.conf.plugins = False + yumbase.conf.reposdir = [] + yumbase.verbose_logger.setLevel(logging.ERROR) + + yumbase.doRepoSetup() + yumbase.doTsSetup() + yumbase.doRpmDBSetup() + yumbase.ts.pushVSFlags((rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)) + + for repo in yumbase.repos.findRepos("*"): + repo.disable() + + for i, baseurl in enumerate(repos): + repo_id = "multilib-%s" % i + if "://" not in baseurl: + baseurl = "file://" + os.path.abspath(baseurl) + yumbase.add_enable_repo(repo_id, baseurls=[baseurl]) + + yumbase.doSackSetup(archlist=archlist) + yumbase.doSackFilelistPopulate() + + method_kwargs = {} + + result = [] + for po in sorted(yumbase.pkgSack): + method = po_is_multilib(po, methods) + if method: + nvra = "%s-%s-%s.%s.rpm" % (po.name, po.version, po.release, po.arch) + result.append((nvra, method)) + return result + + +def main(): + import optparse + import shutil + import tempfile + + class MyOptionParser(optparse.OptionParser): + def print_help(self, *args, **kwargs): + optparse.OptionParser.print_help(self, *args, **kwargs) + print + print "Available multilib methods:" + for key, value in sorted(METHOD_MAP.items()): + default = (key in DEFAULT_METHODS) and " (default)" or "" + print " %-10s %s%s" % (key, value.__doc__ or "", default) + + parser = MyOptionParser("usage: %prog [options]") + + parser.add_option( + "--arch", + ) + parser.add_option( + "--method", + action="append", + default=DEFAULT_METHODS, + help="multilib method", + ) + parser.add_option( + "--repo", + dest="repos", + action="append", + help="path or url to yum repo; can be specified multiple times", + ) + parser.add_option("--tmpdir") + parser.add_option("--logfile", action="store") + + opts, args = parser.parse_args() + + if args: + parser.error("no arguments expected") + + if not opts.repos: + parser.error("provide at least one repo") + + for method_name in opts.method: + if method_name not in METHOD_MAP: + parser.error("unknown method: %s" % method_name) + print opts.method + + tmpdir = opts.tmpdir + if not opts.tmpdir: + tmpdir = tempfile.mkdtemp(prefix="multilib_") + + nvra_list = do_multilib(opts.arch, opts.method, opts.repos, tmpdir, opts.logfile) + for nvra, method in nvra_list: + print "MULTILIB(%s): %s" % (method, nvra) + + if not opts.tmpdir: + shutil.rmtree(tmpdir) + + +if __name__ == "__main__": + main() diff --git a/pungi/pathmatch.py b/pungi/pathmatch.py new file mode 100644 index 0000000..d37f38d --- /dev/null +++ b/pungi/pathmatch.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- + + +import fnmatch + + +def head_tail_split(name): + name_split = name.strip("/").split("/", 1) + if len(name_split) == 2: + head = name_split[0] + tail = name_split[1].strip("/") + else: + head, tail = name_split[0], None + return head, tail + + +class PathMatch(object): + def __init__(self, parent=None, desc=None): + self._patterns = {} + self._final_patterns = {} + self._values = [] + + def __setitem__(self, name, value): + head, tail = head_tail_split(name) + + if tail is not None: + # recursion + if head not in self._patterns: + self._patterns[head] = PathMatch(parent=self, desc=head) + self._patterns[head][tail] = value + else: + if head not in self._final_patterns: + self._final_patterns[head] = PathMatch(parent=self, desc=head) + if value not in self._final_patterns[head]._values: + self._final_patterns[head]._values.append(value) + + def __getitem__(self, name): + result = [] + head, tail = head_tail_split(name) + for pattern in self._patterns: + if fnmatch.fnmatch(head, pattern): + if tail is None: + values = self._patterns[pattern]._values + else: + values = self._patterns[pattern][tail] + for value in values: + if value not in result: + result.append(value) + + for pattern in self._final_patterns: + if tail is None: + x = head + else: + x = "%s/%s" % (head, tail) + if fnmatch.fnmatch(x, pattern): + values = self._final_patterns[pattern]._values + for value in values: + if value not in result: + result.append(value) + return result diff --git a/pungi/util.py b/pungi/util.py new file mode 100644 index 0000000..0a2ea11 --- /dev/null +++ b/pungi/util.py @@ -0,0 +1,123 @@ +#!/usr/bin/python -tt +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + +import subprocess +import os +import shutil +import sys +import hashlib + +def _doRunCommand(command, logger, rundir='/tmp', output=subprocess.PIPE, error=subprocess.PIPE, env=None): + """Run a command and log the output. Error out if we get something on stderr""" + + + logger.info("Running %s" % subprocess.list2cmdline(command)) + + p1 = subprocess.Popen(command, cwd=rundir, stdout=output, stderr=error, universal_newlines=True, env=env) + (out, err) = p1.communicate() + + if out: + logger.debug(out) + + if p1.returncode != 0: + logger.error("Got an error from %s" % command[0]) + logger.error(err) + raise OSError, "Got an error from %s: %s" % (command[0], err) + +def _link(local, target, logger, force=False): + """Simple function to link or copy a package, removing target optionally.""" + + if os.path.exists(target) and force: + os.remove(target) + + #check for broken links + if force and os.path.islink(target): + if not os.path.exists(os.readlink(target)): + os.remove(target) + + try: + os.link(local, target) + except OSError, e: + if e.errno != 18: # EXDEV + logger.error('Got an error linking from cache: %s' % e) + raise OSError, e + + # Can't hardlink cross file systems + shutil.copy2(local, target) + +def _ensuredir(target, logger, force=False, clean=False): + """Ensure that a directory exists, if it already exists, only continue + if force is set.""" + + # We have to check existance of a logger, as setting the logger could + # itself cause an issue. + def whoops(func, path, exc_info): + message = 'Could not remove %s' % path + if logger: + logger.error(message) + else: + sys.stderr(message) + sys.exit(1) + + if os.path.exists(target) and not os.path.isdir(target): + message = '%s exists but is not a directory.' % target + if logger: + logger.error(message) + else: + sys.stderr(message) + sys.exit(1) + + if not os.path.isdir(target): + os.makedirs(target) + elif force and clean: + shutil.rmtree(target, onerror=whoops) + os.makedirs(target) + elif force: + return + else: + message = 'Directory %s already exists. Use --force to overwrite.' % target + if logger: + logger.error(message) + else: + sys.stderr(message) + sys.exit(1) + +def _doCheckSum(path, hash, logger): + """Generate a checksum hash from a provided path. + Return a string of type:hash""" + + # Try to figure out what hash we want to do + try: + sum = hashlib.new(hash) + except ValueError: + logger.error("Invalid hash type: %s" % hash) + return False + + # Try to open the file, using binary flag. + try: + myfile = open(path, 'rb') + except IOError, e: + logger.error("Could not open file %s: %s" % (path, e)) + return False + + # Loop through the file reading chunks at a time as to not + # put the entire file in memory. That would suck for DVDs + while True: + chunk = myfile.read(8192) # magic number! Taking suggestions for better blocksize + if not chunk: + break # we're done with the file + sum.update(chunk) + myfile.close() + + return '%s:%s' % (hash, sum.hexdigest()) diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 index 4cba64e..bbc8932 --- a/setup.py +++ b/setup.py @@ -1,20 +1,44 @@ -from distutils.core import setup +#!/usr/bin/python +# -*- coding: utf-8 -*- + + +import os import glob -setup(name='pungi', - version='3.13', # make sure src/bin/pungi.py is updated to match - description='Distribution compose tool', - author='Dennis Gilmore', - author_email='dgilmore@fedoraproject.org', - url='http://fedorahosted.org/pungi', - license='GPLv2', - package_dir = {'': 'src'}, - packages = ['pypungi'], - scripts = ['src/bin/pungi.py'], - data_files=[ +import distutils.command.sdist +from setuptools import setup + + +# override default tarball format with bzip2 +distutils.command.sdist.sdist.default_format = {"posix": "bztar"} + + +# recursively scan for python modules to be included +package_root_dirs = ["pungi"] +packages = set() +for package_root_dir in package_root_dirs: + for root, dirs, files in os.walk(package_root_dir): + if "__init__.py" in files: + packages.add(root.replace("/", ".")) +packages = sorted(packages) + + +setup( + name = "pungi", + version = "4.0", # make sure it matches with pungi.__version__ + description = "Distribution compose tool", + url = "http://fedorahosted.org/pungi", + author = "Dennis Gilmore", + author_email = "dgilmore@fedoraproject.org", + license = "GPLv2", + + packages = packages, + scripts = [ + 'bin/pungi-gather', + ], + data_files = [ ('/usr/share/pungi', glob.glob('share/*.xsl')), ('/usr/share/pungi', glob.glob('share/*.ks')), ('/usr/share/pungi/multilib', glob.glob('share/multilib/*')), - ] + ] ) - diff --git a/src/bin/pungi.py b/src/bin/pungi.py deleted file mode 100755 index ca7ced8..0000000 --- a/src/bin/pungi.py +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/python -tt -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Library General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - -import os -import pypungi -import pypungi.config -import pypungi.ks -import subprocess - -def main(): - - config = pypungi.config.Config() - - (opts, args) = get_arguments(config) - - # You must be this high to ride if you're going to do root tasks - if os.geteuid () != 0 and (opts.do_all or opts.do_buildinstall): - print >> sys.stderr, "You must run pungi as root" - return 1 - - if opts.do_all or opts.do_buildinstall: - try: - selinux = subprocess.Popen('/usr/sbin/getenforce', - stdout=subprocess.PIPE, - stderr=open('/dev/null', 'w')).communicate()[0].strip('\n') - if selinux == 'Enforcing': - print >> sys.stdout, "WARNING: SELinux is enforcing. This may lead to a compose with selinux disabled." - print >> sys.stdout, "Consider running with setenforce 0." - except: - pass - - # Set up the kickstart parser and pass in the kickstart file we were handed - ksparser = pypungi.ks.get_ksparser(ks_path=opts.config) - - if opts.sourceisos: - config.set('pungi', 'arch', 'source') - - for part in ksparser.handler.partition.partitions: - if part.mountpoint == 'iso': - config.set('pungi', 'cdsize', str(part.size)) - - config.set('pungi', 'force', str(opts.force)) - - if config.get('pungi', 'workdirbase') == '/work': - config.set('pungi', 'workdirbase', "%s/work" % config.get('pungi', 'destdir')) - # Set up our directories - if not os.path.exists(config.get('pungi', 'destdir')): - try: - os.makedirs(config.get('pungi', 'destdir')) - except OSError, e: - print >> sys.stderr, "Error: Cannot create destination dir %s" % config.get('pungi', 'destdir') - sys.exit(1) - else: - print >> sys.stdout, "Warning: Reusing existing destination directory." - - if not os.path.exists(config.get('pungi', 'workdirbase')): - try: - os.makedirs(config.get('pungi', 'workdirbase')) - except OSError, e: - print >> sys.stderr, "Error: Cannot create working base dir %s" % config.get('pungi', 'workdirbase') - sys.exit(1) - else: - print >> sys.stdout, "Warning: Reusing existing working base directory." - - cachedir = config.get('pungi', 'cachedir') - - if not os.path.exists(cachedir): - try: - os.makedirs(cachedir) - except OSError, e: - print >> sys.stderr, "Error: Cannot create cache dir %s" % cachedir - sys.exit(1) - - # Set debuginfo flag - if opts.nodebuginfo: - config.set('pungi', 'debuginfo', "False") - if opts.greedy: - config.set('pungi', 'greedy', opts.greedy) - else: - # XXX: compatibility - if opts.nogreedy: - config.set('pungi', 'greedy', "none") - else: - config.set('pungi', 'greedy', "all") - config.set('pungi', 'resolve_deps', str(bool(opts.resolve_deps))) - if opts.isfinal: - config.set('pungi', 'isfinal', "True") - if opts.nohash: - config.set('pungi', 'nohash', "True") - if opts.full_archlist: - config.set('pungi', 'full_archlist', "True") - if opts.arch: - config.set('pungi', 'arch', opts.arch) - if opts.multilib: - config.set('pungi', 'multilib', " ".join(opts.multilib)) - if opts.lookaside_repos: - config.set('pungi', 'lookaside_repos', " ".join(opts.lookaside_repos)) - if opts.no_dvd: - config.set('pungi', 'no_dvd', "True") - if opts.nomacboot: - config.set('pungi', 'nomacboot', "True") - config.set("pungi", "fulltree", str(bool(opts.fulltree))) - config.set("pungi", "selfhosting", str(bool(opts.selfhosting))) - config.set("pungi", "nosource", str(bool(opts.nosource))) - config.set("pungi", "nodebuginfo", str(bool(opts.nodebuginfo))) - - if opts.lorax_conf: - config.set("lorax", "conf_file", opts.lorax_conf) - if opts.installpkgs: - config.set("lorax", "installpkgs", " ".join(opts.installpkgs)) - - # Actually do work. - mypungi = pypungi.Pungi(config, ksparser) - - with mypungi.yumlock: - if not opts.sourceisos: - if opts.do_all or opts.do_gather or opts.do_buildinstall: - mypungi._inityum() # initialize the yum object for things that need it - if opts.do_all or opts.do_gather: - mypungi.gather() - if opts.nodownload: - for line in mypungi.list_packages(): - flags_str = ",".join(line["flags"]) - if flags_str: - flags_str = "(%s)" % flags_str - sys.stdout.write("RPM%s: %s\n" % (flags_str, line["path"])) - sys.stdout.flush() - else: - mypungi.downloadPackages() - mypungi.makeCompsFile() - if not opts.nodebuginfo: - mypungi.getDebuginfoList() - if opts.nodownload: - for line in mypungi.list_debuginfo(): - flags_str = ",".join(line["flags"]) - if flags_str: - flags_str = "(%s)" % flags_str - sys.stdout.write("DEBUGINFO%s: %s\n" % (flags_str, line["path"])) - sys.stdout.flush() - else: - mypungi.downloadDebuginfo() - if not opts.nosource: - if opts.nodownload: - for line in mypungi.list_srpms(): - flags_str = ",".join(line["flags"]) - if flags_str: - flags_str = "(%s)" % flags_str - sys.stdout.write("SRPM%s: %s\n" % (flags_str, line["path"])) - sys.stdout.flush() - else: - mypungi.downloadSRPMs() - - print "RPM size: %s MiB" % (mypungi.size_packages() / 1024 ** 2) - if not opts.nodebuginfo: - print "DEBUGINFO size: %s MiB" % (mypungi.size_debuginfo() / 1024 ** 2) - if not opts.nosource: - print "SRPM size: %s MiB" % (mypungi.size_srpms() / 1024 ** 2) - - # Furthermore (but without the yumlock...) - if not opts.sourceisos: - if opts.do_all or opts.do_createrepo: - mypungi.doCreaterepo() - - if opts.do_all or opts.do_buildinstall: - if not opts.norelnotes: - mypungi.doGetRelnotes() - mypungi.doBuildinstall() - - if opts.do_all or opts.do_createiso: - mypungi.doCreateIsos() - - # Do things slightly different for src. - if opts.sourceisos: - # we already have all the content gathered - mypungi.topdir = os.path.join(config.get('pungi', 'destdir'), - config.get('pungi', 'version'), - config.get('pungi', 'flavor'), - 'source', 'SRPMS') - mypungi.doCreaterepo(comps=False) - if opts.do_all or opts.do_createiso: - mypungi.doCreateIsos() - - print "All done!" - -if __name__ == '__main__': - from optparse import OptionParser - import sys - import time - - today = time.strftime('%Y%m%d', time.localtime()) - - def get_arguments(config): - parser = OptionParser("%prog [--help] [options]", version="%prog 3.13") - - def set_config(option, opt_str, value, parser, config): - config.set('pungi', option.dest, value) - # When setting name, also set the iso_basename. - if option.dest == 'name': - config.set('pungi', 'iso_basename', value) - - # Pulled in from config file to be cli options as part of pykickstart conversion - parser.add_option("--name", dest="name", type="string", - action="callback", callback=set_config, callback_args=(config, ), - help='the name for your distribution (defaults to "Fedora")') - parser.add_option("--ver", dest="version", type="string", - action="callback", callback=set_config, callback_args=(config, ), - help='the version of your distribution (defaults to datestamp)') - parser.add_option("--flavor", dest="flavor", type="string", - action="callback", callback=set_config, callback_args=(config, ), - help='the flavor of your distribution spin (optional)') - parser.add_option("--destdir", dest="destdir", type="string", - action="callback", callback=set_config, callback_args=(config, ), - help='destination directory (defaults to current directory)') - parser.add_option("--cachedir", dest="cachedir", type="string", - action="callback", callback=set_config, callback_args=(config, ), - help='package cache directory (defaults to /var/cache/pungi)') - parser.add_option("--bugurl", dest="bugurl", type="string", - action="callback", callback=set_config, callback_args=(config, ), - help='the url for your bug system (defaults to http://bugzilla.redhat.com)') - parser.add_option("--selfhosting", action="store_true", dest="selfhosting", - help='build a self-hosting tree by following build dependencies (optional)') - parser.add_option("--fulltree", action="store_true", dest="fulltree", - help='build a tree that includes all packages built from corresponding source rpms (optional)') - parser.add_option("--nosource", action="store_true", dest="nosource", - help='disable gathering of source packages (optional)') - parser.add_option("--nodebuginfo", action="store_true", dest="nodebuginfo", - help='disable gathering of debuginfo packages (optional)') - parser.add_option("--nodownload", action="store_true", dest="nodownload", - help='disable downloading of packages. instead, print the package URLs (optional)') - parser.add_option("--norelnotes", action="store_true", dest="norelnotes", - help='disable gathering of release notes (optional); DEPRECATED') - parser.add_option("--nogreedy", action="store_true", dest="nogreedy", - help='disable pulling of all providers of package dependencies (optional)') - parser.add_option("--nodeps", action="store_false", dest="resolve_deps", default=True, - help='disable resolving dependencies') - parser.add_option("--sourceisos", default=False, action="store_true", dest="sourceisos", - help='Create the source isos (other arch runs must be done)') - parser.add_option("--force", default=False, action="store_true", - help='Force reuse of an existing destination directory (will overwrite files)') - parser.add_option("--isfinal", default=False, action="store_true", - help='Specify this is a GA tree, which causes betanag to be turned off during install') - parser.add_option("--nohash", default=False, action="store_true", - help='disable hashing the Packages trees') - parser.add_option("--full-archlist", action="store_true", - help='Use the full arch list for x86_64 (include i686, i386, etc.)') - parser.add_option("--arch", - help='Override default (uname based) arch') - parser.add_option("--greedy", metavar="METHOD", - help='Greedy method; none, all, build') - parser.add_option("--multilib", action="append", metavar="METHOD", - help='Multilib method; can be specified multiple times; recommended: devel, runtime') - parser.add_option("--lookaside-repo", action="append", dest="lookaside_repos", metavar="NAME", - help='Specify lookaside repo name(s) (packages will used for depsolving but not be included in the output)') - parser.add_option("--workdirbase", dest="workdirbase", type="string", - action="callback", callback=set_config, callback_args=(config, ), - help='base working directory (defaults to destdir + /work)') - parser.add_option("--no-dvd", default=False, action="store_true", dest="no_dvd", - help='Do not make a install DVD/CD only the netinstall image and the tree') - parser.add_option("--lorax-conf", type="string", - help='Path to lorax.conf file (optional)') - parser.add_option("-i", "--installpkgs", default=[], - action="append", metavar="STRING", - help="Package glob for lorax to install before runtime-install.tmpl runs. (may be listed multiple times)") - - parser.add_option("-c", "--config", dest="config", - help='Path to kickstart config file') - parser.add_option("--all-stages", action="store_true", default=True, dest="do_all", - help="Enable ALL stages") - parser.add_option("-G", action="store_true", default=False, dest="do_gather", - help="Flag to enable processing the Gather stage") - parser.add_option("-C", action="store_true", default=False, dest="do_createrepo", - help="Flag to enable processing the Createrepo stage") - parser.add_option("-B", action="store_true", default=False, dest="do_buildinstall", - help="Flag to enable processing the BuildInstall stage") - parser.add_option("-I", action="store_true", default=False, dest="do_createiso", - help="Flag to enable processing the CreateISO stage") - parser.add_option("--relnotepkgs", dest="relnotepkgs", type="string", - action="callback", callback=set_config, callback_args=(config, ), - help='Rpms which contain the release notes') - parser.add_option("--relnotefilere", dest="relnotefilere", type="string", - action="callback", callback=set_config, callback_args=(config, ), - help='Which files are the release notes -- GPL EULA') - parser.add_option("--nomacboot", action="store_true", dest="nomacboot", help='disable setting up macboot as no hfs support ') - - - (opts, args) = parser.parse_args() - - if not opts.config: - parser.error("Please specify a config file") - - if not config.get('pungi', 'flavor').isalnum() and not config.get('pungi', 'flavor') == '': - parser.error("Flavor must be alphanumeric") - - if opts.do_gather or opts.do_createrepo or opts.do_buildinstall or opts.do_createiso: - opts.do_all = False - - if opts.arch and (opts.do_all or opts.do_buildinstall): - parser.error("Cannot override arch while the BuildInstall stage is enabled") - - return (opts, args) - - main() diff --git a/src/pypungi/__init__.py b/src/pypungi/__init__.py deleted file mode 100644 index c1318ce..0000000 --- a/src/pypungi/__init__.py +++ /dev/null @@ -1,1698 +0,0 @@ -#!/usr/bin/python -tt - - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Library General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - - -import yum -import os -import re -import shutil -import sys -import gzip -import pypungi.util -import pprint -import lockfile -import logging -import urlgrabber.progress -import subprocess -import createrepo -import ConfigParser -import pylorax -from fnmatch import fnmatch - -import arch as arch_module -import multilib - - -class ReentrantYumLock(object): - """ A lock that can be acquired multiple times by the same process. """ - - def __init__(self, lock, log): - self.lock = lock - self.log = log - self.count = 0 - - def __enter__(self): - if not self.count: - self.log.info("Waiting on %r" % self.lock.lock_file) - self.lock.acquire() - self.log.info("Got %r" % self.lock.lock_file) - self.count = self.count + 1 - self.log.info("Lock count upped to %i" % self.count) - - def __exit__(self, type, value, tb): - self.count = self.count - 1 - self.log.info("Lock count downed to %i" % self.count) - self.log.info("%r %r %r" % (type, value, tb)) - if not self.count: - self.lock.release() - self.log.info("Released %r" % self.lock.lock_file) - - -def yumlocked(method): - """ A locking decorator. """ - def wrapper(self, *args, **kwargs): - with self.yumlock: - return method(self, *args, **kwargs) - # TODO - replace argspec, signature, etc.. - return wrapper - - -def is_debug(po): - if "debuginfo" in po.name: - return True - return False - - -def is_source(po): - if po.arch in ("src", "nosrc"): - return True - return False - - -def is_noarch(po): - if po.arch == "noarch": - return True - return False - - -def is_package(po): - if is_debug(po): - return False - if is_source(po): - return False - return True - - -class MyConfigParser(ConfigParser.ConfigParser): - """A subclass of ConfigParser which does not lowercase options""" - - def optionxform(self, optionstr): - return optionstr - - -class PungiBase(object): - """The base Pungi class. Set up config items and logging here""" - - def __init__(self, config): - self.config = config - - # ARCH setup - self.tree_arch = self.config.get('pungi', 'arch') - self.yum_arch = arch_module.tree_arch_to_yum_arch(self.tree_arch) - full_archlist = self.config.getboolean('pungi', 'full_archlist') - self.valid_arches = arch_module.get_valid_arches(self.tree_arch, multilib=full_archlist) - self.valid_arches.append("src") # throw source in there, filter it later - self.valid_native_arches = arch_module.get_valid_arches(self.tree_arch, multilib=False) - self.valid_multilib_arches = arch_module.get_valid_multilib_arches(self.tree_arch) - - # arch: compatible arches - self.compatible_arches = {} - for i in self.valid_arches: - self.compatible_arches[i] = arch_module.get_compatible_arches(i) - - self.doLoggerSetup() - self.workdir = os.path.join(self.config.get('pungi', 'workdirbase'), - self.config.get('pungi', 'flavor'), - self.tree_arch) - - - - def doLoggerSetup(self): - """Setup our logger""" - - logdir = os.path.join(self.config.get('pungi', 'destdir'), 'logs') - - pypungi.util._ensuredir(logdir, None, force=True) # Always allow logs to be written out - - if self.config.get('pungi', 'flavor'): - logfile = os.path.join(logdir, '%s.%s.log' % (self.config.get('pungi', 'flavor'), - self.tree_arch)) - else: - logfile = os.path.join(logdir, '%s.log' % (self.tree_arch)) - - # Create the root logger, that will log to our file - logging.basicConfig(level=logging.DEBUG, - format='%(name)s.%(levelname)s: %(message)s', - filename=logfile) - - -class CallBack(urlgrabber.progress.TextMeter): - """A call back function used with yum.""" - - def progressbar(self, current, total, name=None): - return - - -class PungiYum(yum.YumBase): - """Subclass of Yum""" - - def __init__(self, config): - self.pungiconfig = config - yum.YumBase.__init__(self) - - def doLoggingSetup(self, debuglevel, errorlevel, syslog_ident=None, syslog_facility=None): - """Setup the logging facility.""" - - logdir = os.path.join(self.pungiconfig.get('pungi', 'destdir'), 'logs') - if not os.path.exists(logdir): - os.makedirs(logdir) - if self.pungiconfig.get('pungi', 'flavor'): - logfile = os.path.join(logdir, '%s.%s.log' % (self.pungiconfig.get('pungi', 'flavor'), - self.pungiconfig.get('pungi', 'arch'))) - else: - logfile = os.path.join(logdir, '%s.log' % (self.pungiconfig.get('pungi', 'arch'))) - - yum.logging.basicConfig(level=yum.logging.DEBUG, filename=logfile) - - def doFileLogSetup(self, uid, logfile): - # This function overrides a yum function, allowing pungi to control - # the logging. - pass - - def _compare_providers(self, *args, **kwargs): - # HACK: always prefer 64bit over 32bit packages - result = yum.YumBase._compare_providers(self, *args, **kwargs) - if len(result) >= 2: - pkg1 = result[0][0] - pkg2 = result[1][0] - if pkg1.name == pkg2.name: - best_arch = self.arch.get_best_arch_from_list([pkg1.arch, pkg2.arch], self.arch.canonarch) - if best_arch != "noarch" and best_arch != pkg1.arch: - result[0:1] = result[0:1:-1] - return result - -class Pungi(pypungi.PungiBase): - def __init__(self, config, ksparser): - pypungi.PungiBase.__init__(self, config) - - # Set our own logging name space - self.logger = logging.getLogger('Pungi') - - # Create a lock object for later use. - filename = self.config.get('pungi', 'cachedir') + "/yumlock" - lock = lockfile.LockFile(filename) - self.yumlock = ReentrantYumLock(lock, self.logger) - - # Create the stdout/err streams and only send INFO+ stuff there - formatter = logging.Formatter('%(name)s:%(levelname)s: %(message)s') - console = logging.StreamHandler() - console.setFormatter(formatter) - console.setLevel(logging.INFO) - self.logger.addHandler(console) - - self.destdir = self.config.get('pungi', 'destdir') - self.archdir = os.path.join(self.destdir, - self.config.get('pungi', 'version'), - self.config.get('pungi', 'flavor'), - self.tree_arch) - - self.topdir = os.path.join(self.archdir, 'os') - self.isodir = os.path.join(self.archdir, self.config.get('pungi','isodir')) - - pypungi.util._ensuredir(self.workdir, self.logger, force=True) - - self.common_files = [] - self.infofile = os.path.join(self.config.get('pungi', 'destdir'), - self.config.get('pungi', 'version'), - '.composeinfo') - - - self.ksparser = ksparser - - self.resolved_deps = {} # list the deps we've already resolved, short circuit. - self.excluded_pkgs = {} # list the packages we've already excluded. - self.seen_pkgs = {} # list the packages we've already seen so we can check all deps only once - self.multilib_methods = self.config.get('pungi', 'multilib').split(" ") - - # greedy methods: - # * none: only best match package - # * all: all packages matching a provide - # * build: best match package + all other packages from the same SRPM having the same provide - self.greedy_method = self.config.get('pungi', 'greedy') - - self.lookaside_repos = self.config.get('pungi', 'lookaside_repos').split(" ") - self.sourcerpm_arch_map = {} # {sourcerpm: set[arches]} - used for gathering debuginfo - - # package object lists - self.po_list = set() - self.srpm_po_list = set() - self.debuginfo_po_list = set() - - # get_srpm_po() cache - self.sourcerpm_srpmpo_map = {} - - # flags - self.input_packages = set() # packages specified in %packages kickstart section including those defined via comps groups - self.comps_packages = set() # packages specified in %packages kickstart section *indirectly* via comps groups - self.prepopulate_packages = set() # packages specified in %prepopulate kickstart section - self.fulltree_packages = set() - self.langpack_packages = set() - self.multilib_packages = set() - - # already processed packages - self.completed_add_srpms = set() # srpms - self.completed_debuginfo = set() # rpms - self.completed_depsolve = set() # rpms - self.completed_langpacks = set() # rpms - self.completed_multilib = set() # rpms - self.completed_fulltree = set() # srpms - self.completed_selfhosting = set() # srpms - self.completed_greedy_build = set() # po.sourcerpm - - self.is_fulltree = self.config.getboolean("pungi", "fulltree") - self.is_selfhosting = self.config.getboolean("pungi", "selfhosting") - self.is_sources = not self.config.getboolean("pungi", "nosource") - self.is_debuginfo = not self.config.getboolean("pungi", "nodebuginfo") - self.is_resolve_deps = self.config.getboolean("pungi", "resolve_deps") - - self.fulltree_excludes = set(self.ksparser.handler.fulltree_excludes) - - def _add_yum_repo(self, name, url, mirrorlist=False, groups=True, - cost=1000, includepkgs=None, excludepkgs=None, - proxy=None): - """This function adds a repo to the yum object. - name: Name of the repo - url: Full url to the repo - mirrorlist: Bool for whether or not url is a mirrorlist - groups: Bool for whether or not to use groupdata from this repo - cost: an optional int representing the cost of a repo - includepkgs: An optional list of includes to use - excludepkgs: An optional list of excludes to use - proxy: An optional proxy to use - """ - includepkgs = includepkgs or [] - excludepkgs = excludepkgs or [] - - self.logger.info('Adding repo %s' % name) - thisrepo = yum.yumRepo.YumRepository(name) - thisrepo.name = name - # add excludes and such here when pykickstart gets them - if mirrorlist: - thisrepo.mirrorlist = yum.parser.varReplace(url, - self.ayum.conf.yumvar) - self.mirrorlists.append(thisrepo.mirrorlist) - self.logger.info('Mirrorlist for repo %s is %s' % - (thisrepo.name, thisrepo.mirrorlist)) - else: - thisrepo.baseurl = yum.parser.varReplace(url, - self.ayum.conf.yumvar) - self.repos.extend(thisrepo.baseurl) - self.logger.info('URL for repo %s is %s' % - (thisrepo.name, thisrepo.baseurl)) - thisrepo.basecachedir = self.ayum.conf.cachedir - thisrepo.enablegroups = groups - # This is until yum uses this failover by default - thisrepo.failovermethod = 'priority' - thisrepo.exclude = excludepkgs - thisrepo.includepkgs = includepkgs - thisrepo.cost = cost - # Yum doesn't like proxy being None - if proxy: - thisrepo.proxy = proxy - self.ayum.repos.add(thisrepo) - self.ayum.repos.enableRepo(thisrepo.id) - self.ayum._getRepos(thisrepo=thisrepo.id, doSetup=True) - # Set the repo callback. - self.ayum.repos.setProgressBar(CallBack()) - self.ayum.repos.callback = CallBack() - thisrepo.metadata_expire = 0 - thisrepo.mirrorlist_expire = 0 - if os.path.exists(os.path.join(thisrepo.cachedir, 'repomd.xml')): - os.remove(os.path.join(thisrepo.cachedir, 'repomd.xml')) - - @yumlocked - def _inityum(self): - """Initialize the yum object. Only needed for certain actions.""" - - # Create a yum object to use - self.repos = [] - self.mirrorlists = [] - self.ayum = PungiYum(self.config) - self.ayum.doLoggingSetup(6, 6) - yumconf = yum.config.YumConf() - yumconf.debuglevel = 6 - yumconf.errorlevel = 6 - yumconf.cachedir = self.config.get('pungi', 'cachedir') - yumconf.persistdir = "/var/lib/yum" # keep at default, gets appended to installroot - yumconf.installroot = os.path.join(self.workdir, 'yumroot') - yumconf.uid = os.geteuid() - yumconf.cache = 0 - yumconf.failovermethod = 'priority' - yumconf.deltarpm = 0 - yumvars = yum.config._getEnvVar() - yumvars['releasever'] = self.config.get('pungi', 'version') - yumvars['basearch'] = yum.rpmUtils.arch.getBaseArch(myarch=self.tree_arch) - yumconf.yumvar = yumvars - self.ayum._conf = yumconf - # I have no idea why this fixes a traceback, but James says it does. - del self.ayum.prerepoconf - self.ayum.repos.setCacheDir(self.ayum.conf.cachedir) - - self.ayum.arch.setup_arch(self.yum_arch) - - # deal with our repos - try: - self.ksparser.handler.repo.methodToRepo() - except: - pass - - for repo in self.ksparser.handler.repo.repoList: - if repo.mirrorlist: - # The not bool() thing is because pykickstart is yes/no on - # whether to ignore groups, but yum is a yes/no on whether to - # include groups. Awkward. - self._add_yum_repo(repo.name, repo.mirrorlist, - mirrorlist=True, - groups=not bool(repo.ignoregroups), - cost=repo.cost, - includepkgs=repo.includepkgs, - excludepkgs=repo.excludepkgs, - proxy=repo.proxy) - else: - self._add_yum_repo(repo.name, repo.baseurl, - mirrorlist=False, - groups=not bool(repo.ignoregroups), - cost=repo.cost, - includepkgs=repo.includepkgs, - excludepkgs=repo.excludepkgs, - proxy=repo.proxy) - - self.logger.info('Getting sacks for arches %s' % self.valid_arches) - self.ayum._getSacks(archlist=self.valid_arches) - - def _filtersrcdebug(self, po): - """Filter out package objects that are of 'src' arch.""" - - if po.arch == 'src' or 'debuginfo' in po.name: - return False - - return True - - def add_package(self, po, msg=None): - if not is_package(po): - raise ValueError("Not a binary package: %s" % po) - if msg: - self.logger.info(msg) - if po not in self.po_list: - self.po_list.add(po) - self.ayum.install(po) - self.sourcerpm_arch_map.setdefault(po.sourcerpm, set()).add(po.arch) - - def add_debuginfo(self, po, msg=None): - if not is_debug(po): - raise ValueError("Not a debuginfog package: %s" % po) - if msg: - self.logger.info(msg) - if po not in self.debuginfo_po_list: - self.debuginfo_po_list.add(po) - - def add_source(self, po, msg=None): - if not is_source(po): - raise ValueError("Not a source package: %s" % po) - if msg: - self.logger.info(msg) - if po not in self.srpm_po_list: - self.srpm_po_list.add(po) - - def verifyCachePkg(self, po, path): # Stolen from yum - """check the package checksum vs the cache - return True if pkg is good, False if not""" - - (csum_type, csum) = po.returnIdSum() - - try: - filesum = yum.misc.checksum(csum_type, path) - except yum.Errors.MiscError: - return False - - if filesum != csum: - return False - - return True - - def excludePackages(self, pkg_sack): - """exclude packages according to config file""" - if not pkg_sack: - return pkg_sack - - excludes = [] # list of (name, arch, pattern) - for i in self.ksparser.handler.packages.excludedList: - pattern = i - multilib = False - if i.endswith(".+"): - multilib = True - i = i[:-2] - name, arch = arch_module.split_name_arch(i) - excludes.append((name, arch, pattern, multilib)) - - for name in self.ksparser.handler.multilib_blacklist: - excludes.append((name, None, "multilib-blacklist: %s" % name, True)) - - for pkg in pkg_sack[:]: - for name, arch, exclude_pattern, multilib in excludes: - if fnmatch(pkg.name, name): - if not arch or fnmatch(pkg.arch, arch): - if multilib and pkg.arch not in self.valid_multilib_arches: - continue - if pkg.nvra not in self.excluded_pkgs: - self.logger.info("Excluding %s.%s (pattern: %s)" % (pkg.name, pkg.arch, exclude_pattern)) - self.excluded_pkgs[pkg.nvra] = pkg - pkg_sack.remove(pkg) - break - - return pkg_sack - - def get_package_deps(self, po): - """Add the dependencies for a given package to the - transaction info""" - added = set() - if po in self.completed_depsolve: - return added - self.completed_depsolve.add(po) - - self.logger.info('Checking deps of %s.%s' % (po.name, po.arch)) - - reqs = po.requires - provs = po.provides - - for req in reqs: - if req in self.resolved_deps: - continue - r, f, v = req - if r.startswith('rpmlib(') or r.startswith('config('): - continue - if req in provs: - continue - - try: - deps = self.ayum.whatProvides(r, f, v).returnPackages() - deps = self.excludePackages(deps) - if not deps: - self.logger.warn("Unresolvable dependency %s in %s.%s" % (r, po.name, po.arch)) - continue - - if self.greedy_method == "all": - deps = yum.packageSack.ListPackageSack(deps).returnNewestByNameArch() - else: - found = False - for dep in deps: - if dep in self.po_list: - # HACK: there can be builds in the input list on which we want to apply the "build" greedy rules - if self.greedy_method == "build" and dep.sourcerpm not in self.completed_greedy_build: - break - found = True - break - if found: - deps = [] - else: - all_deps = deps - deps = [self.ayum._bestPackageFromList(all_deps)] - if self.greedy_method == "build": - # handle "build" greedy method - if deps: - build_po = deps[0] - if is_package(build_po): - if build_po.arch != "noarch" and build_po.arch not in self.valid_multilib_arches: - all_deps = [ i for i in all_deps if i.arch not in self.valid_multilib_arches ] - for dep in all_deps: - if dep != build_po and dep.sourcerpm == build_po.sourcerpm: - deps.append(dep) - self.completed_greedy_build.add(dep.sourcerpm) - - for dep in deps: - if dep not in added: - msg = 'Added %s.%s (repo: %s) for %s.%s' % (dep.name, dep.arch, dep.repoid, po.name, po.arch) - self.add_package(dep, msg) - added.add(dep) - - except (yum.Errors.InstallError, yum.Errors.YumBaseError), ex: - self.logger.warn("Unresolvable dependency %s in %s.%s (repo: %s)" % (r, po.name, po.arch, po.repoid)) - continue - self.resolved_deps[req] = None - - for add in added: - self.get_package_deps(add) - return added - - def add_langpacks(self, po_list=None): - po_list = po_list or self.po_list - added = set() - - for po in sorted(po_list): - if po in self.completed_langpacks: - continue - - # get all langpacks matching the package name - langpacks = [ i for i in self.langpacks if i["name"] == po.name ] - if not langpacks: - continue - - self.completed_langpacks.add(po) - - for langpack in langpacks: - pattern = langpack["install"] % "*" # replace '%s' with '*' - exactmatched, matched, unmatched = yum.packages.parsePackages(self.all_pkgs, [pattern], casematch=1, pkgdict=self.pkg_refs.copy()) - matches = filter(self._filtersrcdebug, exactmatched + matched) - matches = [ i for i in matches if not i.name.endswith("-devel") and not i.name.endswith("-static") and i.name != "man-pages-overrides" ] - matches = [ i for i in matches if fnmatch(i.name, pattern) ] - - packages_by_name = {} - for i in matches: - packages_by_name.setdefault(i.name, []).append(i) - - for i, pkg_sack in packages_by_name.iteritems(): - pkg_sack = self.excludePackages(pkg_sack) - match = self.ayum._bestPackageFromList(pkg_sack) - msg = 'Added langpack %s.%s (repo: %s) for package %s (pattern: %s)' % (match.name, match.arch, match.repoid, po.name, pattern) - self.add_package(match, msg) - self.completed_langpacks.add(match) # assuming langpack doesn't have langpacks - added.add(match) - - return added - - def add_multilib(self, po_list=None): - po_list = po_list or self.po_list - added = set() - - if not self.multilib_methods: - return added - - for po in sorted(po_list): - if po in self.completed_multilib: - continue - - if po.arch in ("noarch", "src", "nosrc"): - continue - - if po.arch in self.valid_multilib_arches: - continue - - self.completed_multilib.add(po) - - matches = self.ayum.pkgSack.searchNevra(name=po.name, ver=po.version, rel=po.release) - matches = [i for i in matches if i.arch in self.valid_multilib_arches] - if not matches: - continue - matches = self.excludePackages(matches) - match = self.ayum._bestPackageFromList(matches) - if not match: - continue - - if po.name in self.ksparser.handler.multilib_whitelist: - msg = "Added multilib package %s.%s (repo: %s) for package %s.%s (method: %s)" % (match.name, match.arch, match.repoid, po.name, po.arch, "multilib-whitelist") - self.add_package(match, msg) - self.completed_multilib.add(match) - added.add(match) - continue - - method = multilib.po_is_multilib(po, self.multilib_methods) - if not method: - continue - msg = "Added multilib package %s.%s (repo: %s) for package %s.%s (method: %s)" % (match.name, match.arch, match.repoid, po.name, po.arch, method) - self.add_package(match, msg) - self.completed_multilib.add(match) - added.add(match) - return added - - def getPackagesFromGroup(self, group): - """Get a list of package names from a ksparser group object - - Returns a list of package names""" - - packages = [] - - # Check if we have the group - if not self.ayum.comps.has_group(group.name): - self.logger.error("Group %s not found in comps!" % group) - return packages - - # Get the group object to work with - groupobj = self.ayum.comps.return_group(group.name) - - # Add the mandatory packages - packages.extend(groupobj.mandatory_packages.keys()) - - # Add the default packages unless we don't want them - if group.include == 1: - packages.extend(groupobj.default_packages.keys()) - - # Add the optional packages if we want them - if group.include == 2: - packages.extend(groupobj.default_packages.keys()) - packages.extend(groupobj.optional_packages.keys()) - - # Deal with conditional packages - # Populate a dict with the name of the required package and value - # of the package objects it would bring in. To be used later if - # we match the conditional. - for condreq, cond in groupobj.conditional_packages.iteritems(): - matches = self.ayum.pkgSack.searchNevra(name=condreq) - if matches: - if self.greedy_method != "all": - # works for both "none" and "build" greedy methods - matches = [self.ayum._bestPackageFromList(matches)] - self.ayum.tsInfo.conditionals.setdefault(cond, []).extend(matches) - - return packages - - def _addDefaultGroups(self, excludeGroups=None): - """Cycle through the groups and return at list of the ones that ara - default.""" - excludeGroups = excludeGroups or [] - - # This is mostly stolen from anaconda. - groups = map(lambda x: x.groupid, - filter(lambda x: x.default, self.ayum.comps.groups)) - - groups = [x for x in groups if x not in excludeGroups] - - self.logger.debug('Add default groups %s' % groups) - return groups - - def get_langpacks(self): - try: - self.langpacks = list(self.ayum.comps.langpacks) - except AttributeError: - # old yum - self.logger.warning("Could not get langpacks via yum.comps. You may need to update yum.") - self.langpacks = [] - except yum.Errors.GroupsError: - # no groups or no comps at all - self.logger.warning("Could not get langpacks due to missing comps in repodata or --ignoregroups=true option.") - self.langpacks = [] - - def getPackageObjects(self): - """Cycle through the list of packages and get package object matches.""" - - searchlist = [] # The list of package names/globs to search for - matchdict = {} # A dict of objects to names - excludeGroups = [] # A list of groups for removal defined in the ks file - - # precompute pkgs and pkg_refs to speed things up - self.all_pkgs = list(set(self.ayum.pkgSack.returnPackages())) - self.all_pkgs = self.excludePackages(self.all_pkgs) - - - lookaside_nvrs = set() - for po in self.all_pkgs: - if po.repoid in self.lookaside_repos: - lookaside_nvrs.add(po.nvra) - for po in self.all_pkgs[:]: - if po.repoid not in self.lookaside_repos and po.nvra in lookaside_nvrs: - self.logger.debug("Removed %s (repo: %s), because it's also in a lookaside repo" % (po, po.repoid)) - self.all_pkgs.remove(po) - - self.pkg_refs = yum.packages.buildPkgRefDict(self.all_pkgs, casematch=True) - - self.get_langpacks() - - # First remove the excludes - self.ayum.excludePackages() - - # Get the groups set for removal - for group in self.ksparser.handler.packages.excludedGroupList: - excludeGroups.append(str(group)[1:]) - - if "core" in [ i.groupid for i in self.ayum.comps.groups ]: - if "core" not in [ i.name for i in self.ksparser.handler.packages.groupList ]: - self.logger.warning("The @core group is no longer added by default; Please add @core to the kickstart if you want it in.") - - if "base" in [ i.groupid for i in self.ayum.comps.groups ]: - if "base" not in [ i.name for i in self.ksparser.handler.packages.groupList ]: - if self.ksparser.handler.packages.addBase: - self.logger.warning("The --nobase kickstart option is no longer supported; Please add @base to the kickstart if you want it in.") - - # Check to see if we want all the defaults - if self.ksparser.handler.packages.default: - for group in self._addDefaultGroups(excludeGroups): - self.ksparser.handler.packages.add(['@%s' % group]) - - # Get a list of packages from groups - comps_package_names = set() - for group in self.ksparser.handler.packages.groupList: - comps_package_names.update(self.getPackagesFromGroup(group)) - searchlist.extend(sorted(comps_package_names)) - - # Add packages - searchlist.extend(self.ksparser.handler.packages.packageList) - input_packages = searchlist[:] - - # Add prepopulate packages - prepopulate_packages = self.ksparser.handler.prepopulate - searchlist.extend(prepopulate_packages) - - # Make the search list unique - searchlist = yum.misc.unique(searchlist) - - for name in searchlist: - pattern = name - multilib = False - if name.endswith(".+"): - name = name[:-2] - multilib = True - - if self.greedy_method == "all" and name == "system-release": - # HACK: handles a special case, when system-release virtual provide is specified in the greedy mode - matches = self.ayum.whatProvides(name, None, None).returnPackages() - else: - exactmatched, matched, unmatched = yum.packages.parsePackages(self.all_pkgs, [name], casematch=1, pkgdict=self.pkg_refs.copy()) - matches = exactmatched + matched - - matches = filter(self._filtersrcdebug, matches) - - if multilib and self.greedy_method != "all": - matches = [ po for po in matches if po.arch in self.valid_multilib_arches ] - - if not matches: - self.logger.warn('Could not find a match for %s in any configured repo' % pattern) - continue - - packages_by_name = {} - for po in matches: - packages_by_name.setdefault(po.name, []).append(po) - - for name, packages in packages_by_name.iteritems(): - packages = self.excludePackages(packages or []) - if not packages: - continue - if self.greedy_method == "all": - packages = yum.packageSack.ListPackageSack(packages).returnNewestByNameArch() - else: - # works for both "none" and "build" greedy methods - packages = [self.ayum._bestPackageFromList(packages)] - - if name in input_packages: - self.input_packages.update(packages) - if name in comps_package_names: - self.comps_packages.update(packages) - - for po in packages: - msg = 'Found %s.%s' % (po.name, po.arch) - self.add_package(po, msg) - name_arch = "%s.%s" % (po.name, po.arch) - if name_arch in prepopulate_packages: - self.prepopulate_packages.add(po) - - if not self.po_list: - raise RuntimeError("No packages found") - - self.logger.info('Finished gathering package objects.') - - def gather(self): - - # get package objects according to the input list - self.getPackageObjects() - if self.is_sources: - self.createSourceHashes() - - pass_num = 0 - added = set() - while 1: - if pass_num > 0 and not added: - break - added = set() - pass_num += 1 - self.logger.info("Pass #%s" % pass_num) - - if self.is_resolve_deps: - # get conditional deps (defined in comps) - for txmbr in self.ayum.tsInfo: - if not txmbr.po in self.po_list: - if not is_package(txmbr.po): - # we don't want sources which can be pulled in, because 'src' arch is part of self.valid_arches - continue - self.add_package(txmbr.po) - - # resolve deps - if self.is_resolve_deps: - for po in sorted(self.po_list): - added.update(self.get_package_deps(po)) - - if self.is_sources: - added_srpms = self.add_srpms() - added.update(added_srpms) - - if self.is_selfhosting: - for srpm_po in sorted(added_srpms): - added.update(self.get_package_deps(srpm_po)) - - if self.is_fulltree: - new = self.add_fulltree() - self.fulltree_packages.update(new) - self.fulltree_packages.update([ self.sourcerpm_srpmpo_map[i.sourcerpm] for i in new ]) - added.update(new) - if added: - continue - - # add langpacks - new = self.add_langpacks(self.po_list) - self.langpack_packages.update(new) - if self.is_sources: - self.langpack_packages.update([ self.sourcerpm_srpmpo_map[i.sourcerpm] for i in new ]) - added.update(new) - if added: - continue - - # add multilib packages - new = self.add_multilib(self.po_list) - self.multilib_packages.update(new) - self.multilib_packages.update([ self.sourcerpm_srpmpo_map[i.sourcerpm] for i in new ]) - added.update(new) - if added: - continue - - def get_srpm_po(self, po): - """Given a package object, get a package object for the corresponding source rpm.""" - - # return srpm_po from cache if available - srpm_po = self.sourcerpm_srpmpo_map.get(po.sourcerpm, None) - if srpm_po is not None: - return srpm_po - - # arch can be "src" or "nosrc" - nvr, arch, _ = po.sourcerpm.rsplit(".", 2) - name, ver, rel = nvr.rsplit('-', 2) - - # ... but even "nosrc" packages are stored as "src" in repodata - srpm_po_list = self.ayum.pkgSack.searchNevra(name=name, ver=ver, rel=rel, arch="src") - if not srpm_po_list: - raise RuntimeError("Cannot find a source rpm for %s" % po.sourcerpm) - srpm_po = srpm_po_list[0] - self.sourcerpm_srpmpo_map[po.sourcerpm] = srpm_po - return srpm_po - - def createSourceHashes(self): - """Create two dicts - one that maps binary POs to source POs, and - one that maps a single source PO to all binary POs it produces. - Requires yum still configured.""" - self.src_by_bin = {} - self.bin_by_src = {} - self.logger.info("Generating source <-> binary package mappings") - #(dummy1, everything, dummy2) = yum.packages.parsePackages(self.all_pkgs, ['*'], pkgdict=self.pkg_refs.copy()) - failed = [] - for po in self.all_pkgs: - if is_source(po): - continue - try: - srpmpo = self.get_srpm_po(po) - except RuntimeError: - failed.append(po.sourcerpm) - continue - - self.src_by_bin[po] = srpmpo - if self.bin_by_src.has_key(srpmpo): - self.bin_by_src[srpmpo].append(po) - else: - self.bin_by_src[srpmpo] = [po] - - if failed: - self.logger.info("The following srpms could not be found: %s" % ( - pprint.pformat(list(sorted(failed))))) - self.logger.info("Couldn't find %i of %i srpms." % ( - len(failed), len(self.src_by_bin))) - raise RuntimeError("Could not find all srpms.") - - def add_srpms(self, po_list=None): - """Cycle through the list of package objects and - find the sourcerpm for them. Requires yum still - configured and a list of package objects""" - - srpms = set() - po_list = po_list or self.po_list - for po in sorted(po_list): - srpm_po = self.sourcerpm_srpmpo_map[po.sourcerpm] - if srpm_po in self.completed_add_srpms: - continue - msg = "Added source package %s.%s (repo: %s)" % (srpm_po.name, srpm_po.arch, srpm_po.repoid) - self.add_source(srpm_po, msg) - - # flags - if po in self.input_packages: - self.input_packages.add(srpm_po) - if po in self.fulltree_packages: - self.fulltree_packages.add(srpm_po) - if po in self.langpack_packages: - self.langpack_packages.add(srpm_po) - if po in self.multilib_packages: - self.multilib_packages.add(srpm_po) - - self.completed_add_srpms.add(srpm_po) - srpms.add(srpm_po) - return srpms - - def add_fulltree(self, srpm_po_list=None): - """Cycle through all package objects, and add any - that correspond to a source rpm that we are including. - Requires yum still configured and a list of package - objects.""" - - self.logger.info("Completing package set") - - srpm_po_list = srpm_po_list or self.srpm_po_list - srpms = [] - for srpm_po in srpm_po_list: - if srpm_po in self.completed_fulltree: - continue - if srpm_po.name not in self.fulltree_excludes: - srpms.append(srpm_po) - self.completed_fulltree.add(srpm_po) - - added = set() - for srpm_po in srpms: - include_native = False - include_multilib = False - has_native = False - has_multilib = False - - for po in self.excludePackages(self.bin_by_src[srpm_po]): - if not is_package(po): - continue - if po.arch == "noarch": - continue - if po not in self.po_list: - # process only already included packages - if po.arch in self.valid_multilib_arches: - has_multilib = True - elif po.arch in self.valid_native_arches: - has_native = True - continue - if po.arch in self.valid_multilib_arches and self.greedy_method == "all": - include_multilib = True - elif po.arch in self.valid_native_arches: - include_native = True - - # XXX: this is very fragile! - # Do not make any changes unless you really know what you're doing! - if not include_native: - # if there's no native package already pulled in... - if has_native and not include_multilib: - # include all native packages, but only if we're not pulling multilib already - # SCENARIO: a noarch package was already pulled in and there are x86_64 and i686 packages -> we want x86_64 in to complete the package set - include_native = True - elif has_multilib: - # SCENARIO: a noarch package was already pulled in and there are no x86_64 packages; we want i686 in to complete the package set - include_multilib = True - - for po in self.excludePackages(self.bin_by_src[srpm_po]): - if not is_package(po): - continue - if po in self.po_list: - continue - if po.arch != "noarch": - if po.arch in self.valid_multilib_arches: - if not include_multilib: - continue - if po.arch in self.valid_native_arches: - if not include_native: - continue - msg = "Added %s.%s (repo: %s) to complete package set" % (po.name, po.arch, po.repoid) - self.add_package(po, msg) - return added - - def getDebuginfoList(self): - """Cycle through the list of package objects and find - debuginfo rpms for them. Requires yum still - configured and a list of package objects""" - - added = set() - for po in self.all_pkgs: - if not is_debug(po): - continue - - if po.sourcerpm not in self.sourcerpm_arch_map: - # TODO: print a warning / throw an error - continue - if not (set(self.compatible_arches[po.arch]) & set(self.sourcerpm_arch_map[po.sourcerpm]) - set(["noarch"])): - # skip all incompatible arches - # this pulls i386 debuginfo for a i686 package for example - continue - msg = 'Added debuginfo %s.%s (repo: %s)' % (po.name, po.arch, po.repoid) - self.add_debuginfo(po, msg) - - # flags - srpm_po = self.sourcerpm_srpmpo_map[po.sourcerpm] - if srpm_po in self.input_packages: - self.input_packages.add(po) - if srpm_po in self.fulltree_packages: - self.fulltree_packages.add(po) - if srpm_po in self.langpack_packages: - self.langpack_packages.add(po) - if srpm_po in self.multilib_packages: - self.multilib_packages.add(po) - - added.add(po) - return added - - def _downloadPackageList(self, polist, relpkgdir): - """Cycle through the list of package objects and - download them from their respective repos.""" - - downloads = [] - for pkg in polist: - downloads.append('%s.%s' % (pkg.name, pkg.arch)) - downloads.sort() - self.logger.info("Download list: %s" % downloads) - - pkgdir = os.path.join(self.config.get('pungi', 'destdir'), - self.config.get('pungi', 'version'), - self.config.get('pungi', 'flavor'), - relpkgdir) - - # Ensure the pkgdir exists, force if requested, and make sure we clean it out - if relpkgdir.endswith('SRPMS'): - # Since we share source dirs with other arches don't clean, but do allow us to use it - pypungi.util._ensuredir(pkgdir, self.logger, force=True, clean=False) - else: - pypungi.util._ensuredir(pkgdir, self.logger, force=self.config.getboolean('pungi', 'force'), clean=True) - - probs = self.ayum.downloadPkgs(polist) - - if len(probs.keys()) > 0: - self.logger.error("Errors were encountered while downloading packages.") - for key in probs.keys(): - errors = yum.misc.unique(probs[key]) - for error in errors: - self.logger.error("%s: %s" % (key, error)) - sys.exit(1) - - for po in polist: - basename = os.path.basename(po.relativepath) - - local = po.localPkg() - if self.config.getboolean('pungi', 'nohash'): - target = os.path.join(pkgdir, basename) - else: - target = os.path.join(pkgdir, po.name[0].lower(), basename) - # Make sure we have the hashed dir available to link into we only want dirs there to corrospond to packages - # that we are including so we can not just do A-Z 0-9 - pypungi.util._ensuredir(os.path.join(pkgdir, po.name[0].lower()), self.logger, force=True, clean=False) - - # Link downloaded package in (or link package from file repo) - try: - pypungi.util._link(local, target, self.logger, force=True) - continue - except: - self.logger.error("Unable to link %s from the yum cache." % po.name) - sys.exit(1) - - self.logger.info('Finished downloading packages.') - - @yumlocked - def downloadPackages(self): - """Download the package objects obtained in getPackageObjects().""" - - self._downloadPackageList(self.po_list, - os.path.join(self.tree_arch, - self.config.get('pungi', 'osdir'), - self.config.get('pungi', 'product_path'))) - - def makeCompsFile(self): - """Gather any comps files we can from repos and merge them into one.""" - - ourcompspath = os.path.join(self.workdir, '%s-%s-comps.xml' % (self.config.get('pungi', 'name'), self.config.get('pungi', 'version'))) - - # Filter out things we don't include - ourgroups = [] - for item in self.ksparser.handler.packages.groupList: - g = self.ayum.comps.return_group(item.name) - if g: - ourgroups.append(g.groupid) - allgroups = [g.groupid for g in self.ayum.comps.get_groups()] - for group in allgroups: - if group not in ourgroups and not self.ayum.comps.return_group(group).langonly: - self.logger.info('Removing extra group %s from comps file' % (group,)) - del self.ayum.comps._groups[group] - - groups = [g.groupid for g in self.ayum.comps.get_groups()] - envs = self.ayum.comps.get_environments() - for env in envs: - for group in env.groups: - if group not in groups: - self.logger.info('Removing incomplete environment %s from comps file' % (env,)) - del self.ayum.comps._environments[env.environmentid] - break - - ourcomps = open(ourcompspath, 'w') - ourcomps.write(self.ayum.comps.xml()) - ourcomps.close() - - # Disable this until https://bugzilla.redhat.com/show_bug.cgi?id=442097 is fixed. - # Run the xslt filter over our comps file - #compsfilter = ['/usr/bin/xsltproc', '--novalid'] - #compsfilter.append('-o') - #compsfilter.append(ourcompspath) - #compsfilter.append('/usr/share/pungi/comps-cleanup.xsl') - #compsfilter.append(ourcompspath) - - #pypungi.util._doRunCommand(compsfilter, self.logger) - - @yumlocked - def downloadSRPMs(self): - """Cycle through the list of srpms and - find the package objects for them, Then download them.""" - - # do the downloads - self._downloadPackageList(self.srpm_po_list, os.path.join('source', 'SRPMS')) - - @yumlocked - def downloadDebuginfo(self): - """Cycle through the list of debuginfo rpms and - download them.""" - - # do the downloads - self._downloadPackageList(self.debuginfo_po_list, os.path.join(self.tree_arch, 'debug')) - - def _list_packages(self, po_list): - """Cycle through the list of packages and return their paths.""" - result = [] - for po in po_list: - if po.repoid in self.lookaside_repos: - continue - - flags = [] - - # input - if po in self.input_packages: - flags.append("input") - - # comps - if po in self.comps_packages: - flags.append("comps") - - # prepopulate - if po in self.prepopulate_packages: - flags.append("prepopulate") - - # langpack - if po in self.langpack_packages: - flags.append("langpack") - - # multilib - if po in self.multilib_packages: - flags.append("multilib") - - # fulltree - if po in self.fulltree_packages: - flags.append("fulltree") - - # fulltree-exclude - if is_source(po): - srpm_name = po.name - else: - srpm_name = po.sourcerpm.rsplit("-", 2)[0] - if srpm_name in self.fulltree_excludes: - flags.append("fulltree-exclude") - - result.append({ - "path": os.path.join(po.basepath or "", po.relativepath), - "flags": sorted(flags), - }) - result.sort(lambda x, y: cmp(x["path"], y["path"])) - return result - - def list_packages(self): - """Cycle through the list of RPMs and return their paths.""" - return self._list_packages(self.po_list) - - def list_srpms(self): - """Cycle through the list of SRPMs and return their paths.""" - return self._list_packages(self.srpm_po_list) - - def list_debuginfo(self): - """Cycle through the list of DEBUGINFO RPMs and return their paths.""" - return self._list_packages(self.debuginfo_po_list) - - def _size_packages(self, po_list): - return sum([ po.size for po in po_list if po.repoid not in self.lookaside_repos ]) - - def size_packages(self): - return self._size_packages(self.po_list) - - def size_srpms(self): - return self._size_packages(self.srpm_po_list) - - def size_debuginfo(self): - return self._size_packages(self.debuginfo_po_list) - - def writeinfo(self, line): - """Append a line to the infofile in self.infofile""" - - - f=open(self.infofile, "a+") - f.write(line.strip() + "\n") - f.close() - - def mkrelative(self, subfile): - """Return the relative path for 'subfile' underneath the version dir.""" - - basedir = os.path.join(self.destdir, self.config.get('pungi', 'version')) - if subfile.startswith(basedir): - return subfile.replace(basedir + os.path.sep, '') - - def _makeMetadata(self, path, cachedir, comps=False, repoview=False, repoviewtitle=False, - baseurl=False, output=False, basedir=False, update=True, - compress_type=None): - """Create repodata and repoview.""" - - conf = createrepo.MetaDataConfig() - conf.cachedir = os.path.join(cachedir, 'createrepocache') - conf.update = update - conf.unique_md_filenames = True - if output: - conf.outputdir = output - else: - conf.outputdir = path - conf.directory = path - conf.database = True - if comps: - conf.groupfile = comps - if basedir: - conf.basedir = basedir - if baseurl: - conf.baseurl = baseurl - if compress_type: - conf.compress_type = compress_type - repomatic = createrepo.MetaDataGenerator(conf) - self.logger.info('Making repodata') - repomatic.doPkgMetadata() - repomatic.doRepoMetadata() - repomatic.doFinalMove() - - if repoview: - # setup the repoview call - repoview = ['/usr/bin/repoview'] - repoview.append('--quiet') - - repoview.append('--state-dir') - repoview.append(os.path.join(cachedir, 'repoviewcache')) - - if repoviewtitle: - repoview.append('--title') - repoview.append(repoviewtitle) - - repoview.append(path) - - # run the command - pypungi.util._doRunCommand(repoview, self.logger) - - def doCreaterepo(self, comps=True): - """Run createrepo to generate repodata in the tree.""" - - - compsfile = None - if comps: - compsfile = os.path.join(self.workdir, '%s-%s-comps.xml' % (self.config.get('pungi', 'name'), self.config.get('pungi', 'version'))) - - # setup the cache dirs - for target in ['createrepocache', 'repoviewcache']: - pypungi.util._ensuredir(os.path.join(self.config.get('pungi', 'cachedir'), - target), - self.logger, - force=True) - - repoviewtitle = '%s %s - %s' % (self.config.get('pungi', 'name'), - self.config.get('pungi', 'version'), - self.tree_arch) - - cachedir = self.config.get('pungi', 'cachedir') - compress_type = self.config.get('pungi', 'compress_type') - - # setup the createrepo call - self._makeMetadata(self.topdir, cachedir, compsfile, - repoview=True, repoviewtitle=repoviewtitle, - compress_type=compress_type) - - # create repodata for debuginfo - if self.config.getboolean('pungi', 'debuginfo'): - path = os.path.join(self.archdir, 'debug') - if not os.path.isdir(path): - self.logger.debug("No debuginfo for %s" % self.tree_arch) - return - self._makeMetadata(path, cachedir, repoview=False, - compress_type=compress_type) - - def _shortenVolID(self): - """shorten the volume id to make sure its under 32 characters""" - - substitutions = {'Workstation': 'WS', - 'Server': 'S', - 'Cloud': 'C', - 'Alpha': 'A', - 'Beta': 'B', - 'TC': 'T'} - name = self.config.get('pungi', 'name') - version = self.config.get('pungi', 'version') - arch = self.tree_arch - - for k, v in substitutions.iteritems(): - if k in name: - name = name.replace(k, v) - if k in version: - version = version.replace(k, v) - volid = "%s-%s-%s" % (name, version, arch) - if len(volid) > 32: - raise RuntimeError("Volume ID %s is longer than 32 characters" % volid) - else: - return volid - - def doBuildinstall(self): - """Run lorax on the tree.""" - - # the old ayum object has transaction data that confuse lorax, reinit. - self._inityum() - - # Add the repo in the destdir to our yum object - self._add_yum_repo('ourtree', - 'file://%s' % self.topdir, - cost=10) - - product = self.config.get('pungi', 'name') - version = self.config.get('pungi', 'version') - release = '%s %s' % (self.config.get('pungi', 'name'), self.config.get('pungi', 'version')) - - variant = self.config.get('pungi', 'flavor') - bugurl = self.config.get('pungi', 'bugurl') - isfinal = self.config.get('pungi', 'isfinal') - - volid = self._shortenVolID() - workdir = self.workdir - outputdir = self.topdir - - # on ppc64 we need to tell lorax to only use ppc64 packages so that the media will run on all 64 bit ppc boxes - if self.tree_arch == 'ppc64': - self.ayum.arch.setup_arch('ppc64') - self.ayum.compatarch = 'ppc64' - elif self.tree_arch == 'ppc64le': - self.ayum.arch.setup_arch('ppc64le') - self.ayum.compatarch = 'ppc64le' - - # Only supported mac hardware is x86 make sure we only enable mac support on arches that need it - if self.tree_arch in ['x86_64']: - if self.config.getboolean('pungi','nomacboot'): - domacboot = False - else: - domacboot = True - else: - domacboot = False - - # run the command - lorax = pylorax.Lorax() - try: - conf_file = self.config.get('lorax', 'conf_file') - lorax.configure(conf_file=conf_file) - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): - lorax.configure() - - try: - installpkgs = self.config.get('lorax', 'installpkgs').split(" ") - except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): - installpkgs = None - - lorax.run(self.ayum, product=product, version=version, release=release, - variant=variant, bugurl=bugurl, isfinal=isfinal, domacboot=domacboot, - workdir=workdir, outputdir=outputdir, volid=volid, installpkgs=installpkgs) - - # write out the tree data for snake - self.writeinfo('tree: %s' % self.mkrelative(self.topdir)) - - # Write out checksums for verifytree - # First open the treeinfo file so that we can config parse it - treeinfofile = os.path.join(self.topdir, '.treeinfo') - - try: - treefile = open(treeinfofile, 'r') - except IOError: - self.logger.error("Could not read .treeinfo file: %s" % treefile) - sys.exit(1) - - # Create a ConfigParser object out of the contents so that we can - # write it back out later and not worry about formatting - treeinfo = MyConfigParser() - treeinfo.readfp(treefile) - treefile.close() - treeinfo.add_section('checksums') - - # Create a function to use with os.path.walk to sum the files - # basepath is used to make the sum output relative - sums = [] - def getsum(basepath, dir, files): - for file in files: - path = os.path.join(dir, file) - # don't bother summing directories. Won't work. - if os.path.isdir(path): - continue - sum = pypungi.util._doCheckSum(path, 'sha256', self.logger) - outpath = path.replace(basepath, '') - sums.append((outpath, sum)) - - # Walk the os/images path to get sums of all the files - os.path.walk(os.path.join(self.topdir, 'images'), getsum, self.topdir + '/') - - # Capture PPC images - if self.tree_arch in ['ppc', 'ppc64', 'ppc64le']: - os.path.walk(os.path.join(self.topdir, 'ppc'), getsum, self.topdir + '/') - - # Get a checksum of repomd.xml since it has within it sums for other files - repomd = os.path.join(self.topdir, 'repodata', 'repomd.xml') - sum = pypungi.util._doCheckSum(repomd, 'sha256', self.logger) - sums.append((os.path.join('repodata', 'repomd.xml'), sum)) - - # Now add the sums, and write the config out - try: - treefile = open(treeinfofile, 'w') - except IOError: - self.logger.error("Could not open .treeinfo for writing: %s" % treefile) - sys.exit(1) - - for path, sum in sums: - treeinfo.set('checksums', path, sum) - - treeinfo.write(treefile) - treefile.close() - - def doGetRelnotes(self): - """Get extra files from packages in the tree to put in the topdir of - the tree.""" - - - docsdir = os.path.join(self.workdir, 'docs') - relnoterpms = self.config.get('pungi', 'relnotepkgs').split() - - fileres = [] - for pattern in self.config.get('pungi', 'relnotefilere').split(): - fileres.append(re.compile(pattern)) - - dirres = [] - for pattern in self.config.get('pungi', 'relnotedirre').split(): - dirres.append(re.compile(pattern)) - - pypungi.util._ensuredir(docsdir, self.logger, force=self.config.getboolean('pungi', 'force'), clean=True) - - # Expload the packages we list as relnote packages - pkgs = os.listdir(os.path.join(self.topdir, self.config.get('pungi', 'product_path'))) - - rpm2cpio = ['/usr/bin/rpm2cpio'] - cpio = ['cpio', '-imud'] - - for pkg in pkgs: - pkgname = pkg.rsplit('-', 2)[0] - for relnoterpm in relnoterpms: - if pkgname == relnoterpm: - extraargs = [os.path.join(self.topdir, self.config.get('pungi', 'product_path'), pkg)] - try: - p1 = subprocess.Popen(rpm2cpio + extraargs, cwd=docsdir, stdout=subprocess.PIPE) - (out, err) = subprocess.Popen(cpio, cwd=docsdir, stdin=p1.stdout, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, universal_newlines=True).communicate() - except: - self.logger.error("Got an error from rpm2cpio") - self.logger.error(err) - raise - - if out: - self.logger.debug(out) - - # Walk the tree for our files - for dirpath, dirname, filelist in os.walk(docsdir): - for filename in filelist: - for regex in fileres: - if regex.match(filename) and not os.path.exists(os.path.join(self.topdir, filename)): - self.logger.info("Linking release note file %s" % filename) - pypungi.util._link(os.path.join(dirpath, filename), - os.path.join(self.topdir, filename), - self.logger, - force=self.config.getboolean('pungi', - 'force')) - self.common_files.append(filename) - - # Walk the tree for our dirs - for dirpath, dirname, filelist in os.walk(docsdir): - for directory in dirname: - for regex in dirres: - if regex.match(directory) and not os.path.exists(os.path.join(self.topdir, directory)): - self.logger.info("Copying release note dir %s" % directory) - shutil.copytree(os.path.join(dirpath, directory), os.path.join(self.topdir, directory)) - - def _doIsoChecksum(self, path, csumfile): - """Simple function to wrap creating checksums of iso files.""" - - try: - checkfile = open(csumfile, 'a') - except IOError: - self.logger.error("Could not open checksum file: %s" % csumfile) - - self.logger.info("Generating checksum of %s" % path) - checksum = pypungi.util._doCheckSum(path, 'sha256', self.logger) - if checksum: - checkfile.write("%s *%s\n" % (checksum.replace('sha256:', ''), os.path.basename(path))) - else: - self.logger.error('Failed to generate checksum for %s' % checkfile) - sys.exit(1) - checkfile.close() - - def doCreateIsos(self): - """Create iso of the tree.""" - - if self.tree_arch.startswith('arm'): - self.logger.info("ARCH: arm, not doing doCreateIsos().") - return - - isolist = [] - ppcbootinfo = '/usr/share/lorax/config_files/ppc' - - pypungi.util._ensuredir(self.isodir, self.logger, - force=self.config.getboolean('pungi', 'force'), - clean=True) # This is risky... - - # setup the base command - mkisofs = ['/usr/bin/mkisofs'] - mkisofs.extend(['-v', '-U', '-J', '-R', '-T', '-m', 'repoview', '-m', 'boot.iso']) # common mkisofs flags - - x86bootargs = ['-b', 'isolinux/isolinux.bin', '-c', 'isolinux/boot.cat', - '-no-emul-boot', '-boot-load-size', '4', '-boot-info-table'] - - efibootargs = ['-eltorito-alt-boot', '-e', 'images/efiboot.img', - '-no-emul-boot'] - - macbootargs = ['-eltorito-alt-boot', '-e', 'images/macboot.img', - '-no-emul-boot'] - - ia64bootargs = ['-b', 'images/boot.img', '-no-emul-boot'] - - ppcbootargs = ['-part', '-hfs', '-r', '-l', '-sysid', 'PPC', '-no-desktop', '-allow-multidot', '-chrp-boot'] - - ppcbootargs.append('-map') - ppcbootargs.append(os.path.join(ppcbootinfo, 'mapping')) - - ppcbootargs.append('-hfs-bless') # must be last - - isohybrid = ['/usr/bin/isohybrid'] - - # Check the size of the tree - # This size checking method may be bunk, accepting patches... - if not self.tree_arch == 'source': - treesize = int(subprocess.Popen(mkisofs + ['-print-size', '-quiet', self.topdir], stdout=subprocess.PIPE).communicate()[0]) - else: - srcdir = os.path.join(self.config.get('pungi', 'destdir'), self.config.get('pungi', 'version'), - self.config.get('pungi', 'flavor'), 'source', 'SRPMS') - - treesize = int(subprocess.Popen(mkisofs + ['-print-size', '-quiet', srcdir], stdout=subprocess.PIPE).communicate()[0]) - # Size returned is 2KiB clusters or some such. This translates that to MiB. - treesize = treesize * 2048 / 1024 / 1024 - - if treesize > 700: # we're larger than a 700meg CD - isoname = '%s-DVD-%s-%s.iso' % (self.config.get('pungi', 'iso_basename'), self.tree_arch, - self.config.get('pungi', 'version')) - else: - isoname = '%s-%s-%s.iso' % (self.config.get('pungi', 'iso_basename'), self.tree_arch, - self.config.get('pungi', 'version')) - - isofile = os.path.join(self.isodir, isoname) - - # setup the extra mkisofs args - extraargs = [] - - if self.tree_arch == 'i386' or self.tree_arch == 'x86_64': - extraargs.extend(x86bootargs) - if self.tree_arch == 'x86_64': - extraargs.extend(efibootargs) - isohybrid.append('-u') - if os.path.exists(os.path.join(self.topdir, 'images', 'macboot.img')): - extraargs.extend(macbootargs) - isohybrid.append('-m') - elif self.tree_arch == 'ia64': - extraargs.extend(ia64bootargs) - elif self.tree_arch.startswith('ppc'): - extraargs.extend(ppcbootargs) - extraargs.append(os.path.join(self.topdir, "ppc/mac")) - elif self.tree_arch.startswith('aarch64'): - extraargs.extend(efibootargs) - - # NOTE: if this doesn't match what's in the bootloader config, the - # image won't be bootable! - extraargs.append('-V') - extraargs.append(self._shortenVolID()) - - extraargs.extend(['-o', isofile]) - - isohybrid.append(isofile) - - if not self.tree_arch == 'source': - extraargs.append(self.topdir) - else: - extraargs.append(os.path.join(self.archdir, 'SRPMS')) - - if self.config.get('pungi', 'no_dvd') == "False": - # run the command - pypungi.util._doRunCommand(mkisofs + extraargs, self.logger) - - # Run isohybrid on the iso as long as its not the source iso - if os.path.exists("/usr/bin/isohybrid") and not self.tree_arch == 'source': - pypungi.util._doRunCommand(isohybrid, self.logger) - - # implant md5 for mediacheck on all but source arches - if not self.tree_arch == 'source': - pypungi.util._doRunCommand(['/usr/bin/implantisomd5', isofile], self.logger) - - # shove the checksum into a file - csumfile = os.path.join(self.isodir, '%s-%s-%s-CHECKSUM' % ( - self.config.get('pungi', 'iso_basename'), - self.config.get('pungi', 'version'), - self.tree_arch)) - # Write a line about what checksums are used. - # sha256sum is magic... - file = open(csumfile, 'w') - file.write('# The image checksum(s) are generated with sha256sum.\n') - file.close() - if self.config.get('pungi', 'no_dvd') == "False": - self._doIsoChecksum(isofile, csumfile) - - # Write out a line describing the media - self.writeinfo('media: %s' % self.mkrelative(isofile)) - - # Now link the boot iso - if not self.tree_arch == 'source' and \ - os.path.exists(os.path.join(self.topdir, 'images', 'boot.iso')): - isoname = '%s-netinst-%s-%s.iso' % (self.config.get('pungi', 'iso_basename'), - self.tree_arch, self.config.get('pungi', 'version')) - isofile = os.path.join(self.isodir, isoname) - - # link the boot iso to the iso dir - pypungi.util._link(os.path.join(self.topdir, 'images', 'boot.iso'), isofile, self.logger) - - # shove the checksum into a file - self._doIsoChecksum(isofile, csumfile) - - self.logger.info("CreateIsos is done.") diff --git a/src/pypungi/arch.py b/src/pypungi/arch.py deleted file mode 100644 index 332b228..0000000 --- a/src/pypungi/arch.py +++ /dev/null @@ -1,94 +0,0 @@ -# -*- coding: utf-8 -*- - - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Library General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - - -import rpmUtils.arch - - -TREE_ARCH_YUM_ARCH_MAP = { - "i386": "athlon", - "ppc64": "ppc64p7", - "sparc": "sparc64v", - "arm": "armv7l", - "armhfp": "armv7hnl", -} - - -def tree_arch_to_yum_arch(tree_arch): - # this is basically an opposite to rpmUtils.arch.getBaseArch() - yum_arch = TREE_ARCH_YUM_ARCH_MAP.get(tree_arch, tree_arch) - return yum_arch - - -def get_multilib_arch(yum_arch): - arch_info = rpmUtils.arch.getMultiArchInfo(yum_arch) - if arch_info is None: - return None - return arch_info[0] - - -def get_valid_multilib_arches(tree_arch): - yum_arch = tree_arch_to_yum_arch(tree_arch) - multilib_arch = get_multilib_arch(yum_arch) - if not multilib_arch: - return [] - return [ i for i in rpmUtils.arch.getArchList(multilib_arch) if i not in ("noarch", "src") ] - - -def get_valid_arches(tree_arch, multilib=True, add_noarch=True, add_src=False): - result = [] - - yum_arch = tree_arch_to_yum_arch(tree_arch) - for arch in rpmUtils.arch.getArchList(yum_arch): - if arch not in result: - result.append(arch) - - if not multilib: - for i in get_valid_multilib_arches(tree_arch): - while i in result: - result.remove(i) - - if add_noarch and "noarch" not in result: - result.append("noarch") - - if add_src and "src" not in result: - result.append("src") - - return result - - -def get_compatible_arches(arch, multilib=False): - tree_arch = rpmUtils.arch.getBaseArch(arch) - compatible_arches = get_valid_arches(tree_arch, multilib=multilib) - return compatible_arches - - -def is_valid_arch(arch): - if arch in ("noarch", "src", "nosrc"): - return True - if arch in rpmUtils.arch.arches: - return True - return False - - -def split_name_arch(name_arch): - if "." in name_arch: - name, arch = name_arch.rsplit(".", 1) - if not is_valid_arch(arch): - name, arch = name_arch, None - else: - name, arch = name_arch, None - return name, arch diff --git a/src/pypungi/config.py b/src/pypungi/config.py deleted file mode 100644 index 617ae18..0000000 --- a/src/pypungi/config.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/python -tt -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Library General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - -import os -import time -import yum - -from ConfigParser import SafeConfigParser - -class Config(SafeConfigParser): - def __init__(self): - SafeConfigParser.__init__(self) - - self.add_section('pungi') - self.add_section('lorax') - - self.set('pungi', 'osdir', 'os') - self.set('pungi', 'sourcedir', 'source') - self.set('pungi', 'debugdir', 'debug') - self.set('pungi', 'isodir', 'iso') - self.set('pungi', 'relnotefilere', 'GPL README-BURNING-ISOS-en_US.txt ^RPM-GPG') - self.set('pungi', 'relnotedirre', '') - self.set('pungi', 'relnotepkgs', 'fedora-release fedora-release-notes') - self.set('pungi', 'product_path', 'Packages') - self.set('pungi', 'cachedir', '/var/cache/pungi') - self.set('pungi', 'compress_type', 'xz') - self.set('pungi', 'arch', yum.rpmUtils.arch.getBaseArch()) - self.set('pungi', 'name', 'Fedora') - self.set('pungi', 'iso_basename', 'Fedora') - self.set('pungi', 'version', time.strftime('%Y%m%d', time.localtime())) - self.set('pungi', 'flavor', '') - self.set('pungi', 'destdir', os.getcwd()) - self.set('pungi', 'workdirbase', "/work") - self.set('pungi', 'bugurl', 'https://bugzilla.redhat.com') - self.set('pungi', 'cdsize', '695.0') - self.set('pungi', 'debuginfo', "True") - self.set('pungi', 'alldeps', "True") - self.set('pungi', 'isfinal', "False") - self.set('pungi', 'nohash', "False") - self.set('pungi', 'full_archlist', "False") - self.set('pungi', 'multilib', '') - self.set('pungi', 'lookaside_repos', '') - self.set('pungi', 'resolve_deps', "True") - self.set('pungi', 'no_dvd', "False") - self.set('pungi', 'nomacboot', "False") diff --git a/src/pypungi/ks.py b/src/pypungi/ks.py deleted file mode 100644 index 53f7520..0000000 --- a/src/pypungi/ks.py +++ /dev/null @@ -1,143 +0,0 @@ -# -*- coding: utf-8 -*- - - -""" -Pungi adds several new sections to kickstarts. - - -FULLTREE EXCLUDES ------------------ -Fulltree excludes allow us to define SRPM names -we don't want to be part of fulltree processing. - -Syntax: -%fulltree-excludes - - -... -%end - - -MULTILIB BLACKLIST ------------------- -List of RPMs which are prevented from becoming multilib. - -Syntax: -%multilib-blacklist - - -... -%end - - -MULTILIB WHITELIST ------------------- -List of RPMs which will become multilib (but only if native package is pulled in). - -Syntax: -%multilib-whitelist - - -... -%end - - -PREPOPULATE ------------ -To make sure no package is left behind between 2 composes, -we can explicitly add . records to the %prepopulate section. -These will be added to the input list and marked with 'prepopulate' flag. - -Syntax: -%prepopulate -. -. -... -%end -""" - - -import pykickstart.parser -import pykickstart.sections - - -class FulltreeExcludesSection(pykickstart.sections.Section): - sectionOpen = "%fulltree-excludes" - - def handleLine(self, line): - if not self.handler: - return - - (h, s, t) = line.partition('#') - line = h.rstrip() - - self.handler.fulltree_excludes.add(line) - - -class MultilibBlacklistSection(pykickstart.sections.Section): - sectionOpen = "%multilib-blacklist" - - def handleLine(self, line): - if not self.handler: - return - - (h, s, t) = line.partition('#') - line = h.rstrip() - - self.handler.multilib_blacklist.add(line) - - -class MultilibWhitelistSection(pykickstart.sections.Section): - sectionOpen = "%multilib-whitelist" - - def handleLine(self, line): - if not self.handler: - return - - (h, s, t) = line.partition('#') - line = h.rstrip() - - self.handler.multilib_whitelist.add(line) - - -class PrepopulateSection(pykickstart.sections.Section): - sectionOpen = "%prepopulate" - - def handleLine(self, line): - if not self.handler: - return - - (h, s, t) = line.partition('#') - line = h.rstrip() - - self.handler.prepopulate.add(line) - - -class KickstartParser(pykickstart.parser.KickstartParser): - def setupSections(self): - pykickstart.parser.KickstartParser.setupSections(self) - self.registerSection(FulltreeExcludesSection(self.handler)) - self.registerSection(MultilibBlacklistSection(self.handler)) - self.registerSection(MultilibWhitelistSection(self.handler)) - self.registerSection(PrepopulateSection(self.handler)) - - -HandlerClass = pykickstart.version.returnClassForVersion() -class PungiHandler(HandlerClass): - def __init__(self, *args, **kwargs): - HandlerClass.__init__(self, *args, **kwargs) - self.fulltree_excludes = set() - self.multilib_blacklist = set() - self.multilib_whitelist = set() - self.prepopulate = set() - - -def get_ksparser(ks_path=None): - """ - Return a kickstart parser instance. - Read kickstart if ks_path provided. - """ - ksparser = KickstartParser(PungiHandler()) - if ks_path: - ksparser.readKickstart(ks_path) - return ksparser diff --git a/src/pypungi/multilib.py b/src/pypungi/multilib.py deleted file mode 100755 index 6719573..0000000 --- a/src/pypungi/multilib.py +++ /dev/null @@ -1,396 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - - -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Library General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program; if not, write to the Free Software Foundation, Inc., -# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - - -import re -import fnmatch -import pathmatch - -import pypungi - - - -LINE_PATTERN_RE = re.compile(r"^\s*(?P[^#]+)(:?\s+(?P#.*))?$") -RUNTIME_PATTERN_SPLIT_RE = re.compile(r"^\s*(?P[^\s]+)\s+(?P[^\s]+)(:?\s+(?P#.*))?$") -SONAME_PATTERN_RE = re.compile(r"^(.+\.so\.[a-zA-Z0-9_\.]+).*$") - - -def read_lines(lines): - result = [] - for i in lines: - i = i.strip() - - if not i: - continue - - # skip comments - if i.startswith("#"): - continue - - match = LINE_PATTERN_RE.match(i) - if match is None: - raise ValueError("Couldn't parse line: %s" % i) - gd = match.groupdict() - result.append(gd["line"]) - return result - - -def read_lines_from_file(path): - lines = open(path, "r").readlines() - lines = read_lines(lines) - return lines - - -def read_runtime_patterns(lines): - result = [] - for i in read_lines(lines): - match = RUNTIME_PATTERN_SPLIT_RE.match(i) - if match is None: - raise ValueError("Couldn't parse pattern: %s" % i) - gd = match.groupdict() - result.append((gd["path"], gd["pattern"])) - return result - - -def read_runtime_patterns_from_file(path): - lines = open(path, "r").readlines() - return read_runtime_patterns(lines) - - -def expand_runtime_patterns(patterns): - pm = pathmatch.PathMatch() - result = [] - for path, pattern in patterns: - for root in ("", "/opt/*/*/root"): - # include Software Collections: /opt///root/... - if "$LIBDIR" in path: - for lib_dir in ("/lib", "/lib64", "/usr/lib", "/usr/lib64"): - path_pattern = path.replace("$LIBDIR", lib_dir) - path_pattern = "%s/%s" % (root, path_pattern.lstrip("/")) - pm[path_pattern] = (path_pattern, pattern) - else: - path_pattern = "%s/%s" % (root, path.lstrip("/")) - pm[path_pattern] = (path_pattern, pattern) - return pm - - -class MultilibMethodBase(object): - """a base class for multilib methods""" - name = "base" - - def select(self, po): - raise NotImplementedError - - def skip(self, po): - if pypungi.is_noarch(po) or pypungi.is_source(po) or pypungi.is_debug(po): - return True - return False - - def is_kernel(self, po): - for p_name, p_flag, (p_e, p_v, p_r) in po.provides: - if p_name == "kernel": - return True - return False - - def is_kernel_devel(self, po): - for p_name, p_flag, (p_e, p_v, p_r) in po.provides: - if p_name == "kernel-devel": - return True - return False - - def is_kernel_or_kernel_devel(self, po): - for p_name, p_flag, (p_e, p_v, p_r) in po.provides: - if p_name in ("kernel", "kernel-devel"): - return True - return False - - -class NoneMultilibMethod(MultilibMethodBase): - """multilib disabled""" - name = "none" - - def select(self, po): - return False - - -class AllMultilibMethod(MultilibMethodBase): - """all packages are multilib""" - name = "all" - - def select(self, po): - if self.skip(po): - return False - return True - - -class RuntimeMultilibMethod(MultilibMethodBase): - """pre-defined paths to libs""" - name = "runtime" - - def __init__(self, **kwargs): - self.blacklist = read_lines_from_file("/usr/share/pungi/multilib/runtime-blacklist.conf") - self.whitelist = read_lines_from_file("/usr/share/pungi/multilib/runtime-whitelist.conf") - self.patterns = expand_runtime_patterns(read_runtime_patterns_from_file("/usr/share/pungi/multilib/runtime-patterns.conf")) - - def select(self, po): - if self.skip(po): - return False - if po.name in self.blacklist: - return False - if po.name in self.whitelist: - return True - if self.is_kernel(po): - return False - - # gather all *.so.* provides from the RPM header - provides = set() - for i in po.provides: - match = SONAME_PATTERN_RE.match(i[0]) - if match is not None: - provides.add(match.group(1)) - - for path in po.returnFileEntries() + po.returnFileEntries("ghost"): - dirname, filename = path.rsplit("/", 1) - dirname = dirname.rstrip("/") - - patterns = self.patterns[dirname] - if not patterns: - continue - for dir_pattern, file_pattern in patterns: - if file_pattern == "-": - return True - if fnmatch.fnmatch(filename, file_pattern): - if ".so.*" in file_pattern: - if filename in provides: - # return only if the lib is provided in RPM header - # (some libs may be private, hence not exposed in Provides) - return True - else: - return True - return False - - -class FileMultilibMethod(MultilibMethodBase): - """explicitely defined whitelist and blacklist""" - def __init__(self, **kwargs): - self.name = "file" - whitelist = kwargs.pop("whitelist", None) - blacklist = kwargs.pop("blacklist", None) - self.whitelist = self.read_file(whitelist) - self.blacklist = self.read_file(blacklist) - - @staticmethod - def read_file(path): - if not path: - return [] - result = [ i.strip() for i in open(path, "r") if not i.strip().startswith("#") ] - return result - - def select(self, po): - for pattern in self.blacklist: - if fnmatch.fnmatch(po.name, pattern): - return False - for pattern in self.whitelist: - if fnmatch.fnmatch(po.name, pattern): - return False - return False - - -class KernelMultilibMethod(MultilibMethodBase): - """kernel and kernel-devel""" - def __init__(self, **kwargs): - self.name = "kernel" - - def select(self, po): - if self.is_kernel_or_kernel_devel(po): - return True - return False - - -class YabootMultilibMethod(MultilibMethodBase): - """yaboot on ppc""" - def __init__(self, **kwargs): - self.name = "yaboot" - - def select(self, po): - if po.arch in ["ppc"]: - if po.name.startswith("yaboot"): - return True - return False - - -class DevelMultilibMethod(MultilibMethodBase): - """all -devel and -static packages""" - name = "devel" - - def __init__(self, **kwargs): - self.blacklist = read_lines_from_file("/usr/share/pungi/multilib/devel-blacklist.conf") - self.whitelist = read_lines_from_file("/usr/share/pungi/multilib/devel-whitelist.conf") - - def select(self, po): - if self.skip(po): - return False - if po.name in self.blacklist: - return False - if po.name in self.whitelist: - return True - if self.is_kernel_devel(po): - return False - # HACK: exclude ghc* - if po.name.startswith("ghc-"): - return False - if po.name.endswith("-devel"): - return True - if po.name.endswith("-static"): - return True - for p_name, p_flag, (p_e, p_v, p_r) in po.provides: - if p_name.endswith("-devel"): - return True - if p_name.endswith("-static"): - return True - return False - - -DEFAULT_METHODS = ["devel", "runtime"] -METHOD_MAP = {} -for cls in (AllMultilibMethod, DevelMultilibMethod, FileMultilibMethod, KernelMultilibMethod, NoneMultilibMethod, RuntimeMultilibMethod, YabootMultilibMethod): - method = cls() - METHOD_MAP[method.name] = method - - -def po_is_multilib(po, methods): - for method_name in methods: - if not method_name: - continue - method = METHOD_MAP[method_name] - if method.select(po): - return method_name - return None - - -def do_multilib(yum_arch, methods, repos, tmpdir, logfile): - import os - import yum - import rpm - import logging - - archlist = yum.rpmUtils.arch.getArchList(yum_arch) - - yumbase = yum.YumBase() - yumbase.preconf.init_plugins = False - yumbase.preconf.root = tmpdir - # order matters! - # must run doConfigSetup() before touching yumbase.conf - yumbase.doConfigSetup(fn="/dev/null") - yumbase.conf.cache = False - yumbase.conf.cachedir = tmpdir - yumbase.conf.exactarch = True - yumbase.conf.gpgcheck = False - yumbase.conf.logfile = logfile - yumbase.conf.plugins = False - yumbase.conf.reposdir = [] - yumbase.verbose_logger.setLevel(logging.ERROR) - - yumbase.doRepoSetup() - yumbase.doTsSetup() - yumbase.doRpmDBSetup() - yumbase.ts.pushVSFlags((rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)) - - for repo in yumbase.repos.findRepos("*"): - repo.disable() - - for i, baseurl in enumerate(repos): - repo_id = "multilib-%s" % i - if "://" not in baseurl: - baseurl = "file://" + os.path.abspath(baseurl) - yumbase.add_enable_repo(repo_id, baseurls=[baseurl]) - - yumbase.doSackSetup(archlist=archlist) - yumbase.doSackFilelistPopulate() - - method_kwargs = {} - - result = [] - for po in sorted(yumbase.pkgSack): - method = po_is_multilib(po, methods) - if method: - nvra = "%s-%s-%s.%s.rpm" % (po.name, po.version, po.release, po.arch) - result.append((nvra, method)) - return result - - -def main(): - import optparse - import shutil - import tempfile - - class MyOptionParser(optparse.OptionParser): - def print_help(self, *args, **kwargs): - optparse.OptionParser.print_help(self, *args, **kwargs) - print - print "Available multilib methods:" - for key, value in sorted(METHOD_MAP.items()): - default = (key in DEFAULT_METHODS) and " (default)" or "" - print " %-10s %s%s" % (key, value.__doc__ or "", default) - - parser = MyOptionParser("usage: %prog [options]") - - parser.add_option( - "--arch", - ) - parser.add_option( - "--method", - action="append", - default=DEFAULT_METHODS, - help="multilib method", - ) - parser.add_option( - "--repo", - dest="repos", - action="append", - help="path or url to yum repo; can be specified multiple times", - ) - parser.add_option("--tmpdir") - parser.add_option("--logfile", action="store") - - opts, args = parser.parse_args() - - if args: - parser.error("no arguments expected") - - if not opts.repos: - parser.error("provide at least one repo") - - for method_name in opts.method: - if method_name not in METHOD_MAP: - parser.error("unknown method: %s" % method_name) - print opts.method - - tmpdir = opts.tmpdir - if not opts.tmpdir: - tmpdir = tempfile.mkdtemp(prefix="multilib_") - - nvra_list = do_multilib(opts.arch, opts.method, opts.repos, tmpdir, opts.logfile) - for nvra, method in nvra_list: - print "MULTILIB(%s): %s" % (method, nvra) - - if not opts.tmpdir: - shutil.rmtree(tmpdir) - - -if __name__ == "__main__": - main() diff --git a/src/pypungi/pathmatch.py b/src/pypungi/pathmatch.py deleted file mode 100644 index d37f38d..0000000 --- a/src/pypungi/pathmatch.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- - - -import fnmatch - - -def head_tail_split(name): - name_split = name.strip("/").split("/", 1) - if len(name_split) == 2: - head = name_split[0] - tail = name_split[1].strip("/") - else: - head, tail = name_split[0], None - return head, tail - - -class PathMatch(object): - def __init__(self, parent=None, desc=None): - self._patterns = {} - self._final_patterns = {} - self._values = [] - - def __setitem__(self, name, value): - head, tail = head_tail_split(name) - - if tail is not None: - # recursion - if head not in self._patterns: - self._patterns[head] = PathMatch(parent=self, desc=head) - self._patterns[head][tail] = value - else: - if head not in self._final_patterns: - self._final_patterns[head] = PathMatch(parent=self, desc=head) - if value not in self._final_patterns[head]._values: - self._final_patterns[head]._values.append(value) - - def __getitem__(self, name): - result = [] - head, tail = head_tail_split(name) - for pattern in self._patterns: - if fnmatch.fnmatch(head, pattern): - if tail is None: - values = self._patterns[pattern]._values - else: - values = self._patterns[pattern][tail] - for value in values: - if value not in result: - result.append(value) - - for pattern in self._final_patterns: - if tail is None: - x = head - else: - x = "%s/%s" % (head, tail) - if fnmatch.fnmatch(x, pattern): - values = self._final_patterns[pattern]._values - for value in values: - if value not in result: - result.append(value) - return result diff --git a/src/pypungi/util.py b/src/pypungi/util.py deleted file mode 100644 index 0a2ea11..0000000 --- a/src/pypungi/util.py +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/python -tt -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; version 2 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Library General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - -import subprocess -import os -import shutil -import sys -import hashlib - -def _doRunCommand(command, logger, rundir='/tmp', output=subprocess.PIPE, error=subprocess.PIPE, env=None): - """Run a command and log the output. Error out if we get something on stderr""" - - - logger.info("Running %s" % subprocess.list2cmdline(command)) - - p1 = subprocess.Popen(command, cwd=rundir, stdout=output, stderr=error, universal_newlines=True, env=env) - (out, err) = p1.communicate() - - if out: - logger.debug(out) - - if p1.returncode != 0: - logger.error("Got an error from %s" % command[0]) - logger.error(err) - raise OSError, "Got an error from %s: %s" % (command[0], err) - -def _link(local, target, logger, force=False): - """Simple function to link or copy a package, removing target optionally.""" - - if os.path.exists(target) and force: - os.remove(target) - - #check for broken links - if force and os.path.islink(target): - if not os.path.exists(os.readlink(target)): - os.remove(target) - - try: - os.link(local, target) - except OSError, e: - if e.errno != 18: # EXDEV - logger.error('Got an error linking from cache: %s' % e) - raise OSError, e - - # Can't hardlink cross file systems - shutil.copy2(local, target) - -def _ensuredir(target, logger, force=False, clean=False): - """Ensure that a directory exists, if it already exists, only continue - if force is set.""" - - # We have to check existance of a logger, as setting the logger could - # itself cause an issue. - def whoops(func, path, exc_info): - message = 'Could not remove %s' % path - if logger: - logger.error(message) - else: - sys.stderr(message) - sys.exit(1) - - if os.path.exists(target) and not os.path.isdir(target): - message = '%s exists but is not a directory.' % target - if logger: - logger.error(message) - else: - sys.stderr(message) - sys.exit(1) - - if not os.path.isdir(target): - os.makedirs(target) - elif force and clean: - shutil.rmtree(target, onerror=whoops) - os.makedirs(target) - elif force: - return - else: - message = 'Directory %s already exists. Use --force to overwrite.' % target - if logger: - logger.error(message) - else: - sys.stderr(message) - sys.exit(1) - -def _doCheckSum(path, hash, logger): - """Generate a checksum hash from a provided path. - Return a string of type:hash""" - - # Try to figure out what hash we want to do - try: - sum = hashlib.new(hash) - except ValueError: - logger.error("Invalid hash type: %s" % hash) - return False - - # Try to open the file, using binary flag. - try: - myfile = open(path, 'rb') - except IOError, e: - logger.error("Could not open file %s: %s" % (path, e)) - return False - - # Loop through the file reading chunks at a time as to not - # put the entire file in memory. That would suck for DVDs - while True: - chunk = myfile.read(8192) # magic number! Taking suggestions for better blocksize - if not chunk: - break # we're done with the file - sum.update(chunk) - myfile.close() - - return '%s:%s' % (hash, sum.hexdigest()) diff --git a/tests/test_arch.py b/tests/test_arch.py index 5925ad1..b502606 100755 --- a/tests/test_arch.py +++ b/tests/test_arch.py @@ -8,9 +8,13 @@ import os import sys import tempfile import shutil -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src", "pypungi"))) -from arch import * +here = sys.path[0] +if here != '/usr/bin': + # Git checkout + sys.path[0] = os.path.dirname(here) + +from pungi.arch import * class TestArch(unittest.TestCase): diff --git a/tests/test_pathmatch.py b/tests/test_pathmatch.py index 298677d..b2d7954 100755 --- a/tests/test_pathmatch.py +++ b/tests/test_pathmatch.py @@ -5,9 +5,13 @@ import unittest import os import sys -sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "src", "pypungi"))) -from pathmatch import PathMatch, head_tail_split +here = sys.path[0] +if here != '/usr/bin': + # Git checkout + sys.path[0] = os.path.dirname(here) + +from pungi.pathmatch import PathMatch, head_tail_split class TestHeadTailSplit(unittest.TestCase):