| |
@@ -703,7 +703,7 @@
|
| |
f_mock_chroots_many, f_custom_builds, f_db):
|
| |
|
| |
r_non_ml_chroot = self.tc.get(
|
| |
- "/coprs/{0}/{1}/repo/fedora-18/some.repo&arch=x86_64".format(
|
| |
+ "/coprs/{0}/{1}/repo/fedora-18/some.repo?arch=x86_64".format(
|
| |
self.u1.name, self.c1.name))
|
| |
|
| |
for f_version in range(19, 24):
|
| |
@@ -711,13 +711,15 @@
|
| |
# with disabled multilib there's no change between fedora repos,
|
| |
# no matter what the version or architecture is
|
| |
r_ml_chroot = self.tc.get(
|
| |
- "/coprs/{0}/{1}/repo/fedora-{2}/some.repo&arch={3}".format(
|
| |
+ "/coprs/{0}/{1}/repo/fedora-{2}/some.repo?arch={3}".format(
|
| |
self.u1.name, self.c1.name, f_version, arch))
|
| |
assert r_ml_chroot.data == r_non_ml_chroot.data
|
| |
|
| |
self.c1.multilib = True
|
| |
self.db.session.commit()
|
| |
|
| |
+ cache.clear() # f18 repofile is cached
|
| |
+
|
| |
# The project is now multilib, but f18 chroot doesn't have i386
|
| |
# countepart in c1
|
| |
|
| |
@@ -738,28 +740,48 @@
|
| |
assert r_ml_chroot.data == r_ml_first_chroot.data
|
| |
assert r_ml_chroot.data != r_non_ml_chroot.data
|
| |
|
| |
+ # and the non-ml variants need to match non-ml chroot f18
|
| |
+ # (this also checks that we don't cache 'some.repo' requests with
|
| |
+ # 'some.repo&arch=...')
|
| |
+ r_non_ml_repofile = self.tc.get(
|
| |
+ "/coprs/{0}/{1}/repo/fedora-{2}/some.repo".format(
|
| |
+ self.u1.name, self.c1.name, f_version))
|
| |
+ assert r_non_ml_repofile.data == r_non_ml_chroot.data
|
| |
+
|
| |
def parse_repofile(string):
|
| |
lines = string.split('\n')
|
| |
- repoids = [x.strip('[]') for x in lines if re.match(r'^\[.*\]$', x)]
|
| |
- baseurls = [x.split('=')[1] for x in lines if re.match(r'^baseurl=.*', x)]
|
| |
- gpgkeys = [x.split('=')[1] for x in lines if re.match(r'^gpgkey=.*', x)]
|
| |
- return repoids, baseurls, gpgkeys
|
| |
+ def get_params(name, lines):
|
| |
+ return [x.split('=')[1] for x in lines
|
| |
+ if re.match(r'^{}=.*'.format(name), x)]
|
| |
+ return (
|
| |
+ [x.strip('[]') for x in lines if re.match(r'^\[.*\]$', x)],
|
| |
+ get_params('baseurl', lines),
|
| |
+ get_params('gpgkey', lines),
|
| |
+ get_params('name', lines),
|
| |
+ get_params('cost', lines),
|
| |
+ )
|
| |
|
| |
non_ml_repofile = r_non_ml_chroot.data.decode('utf-8')
|
| |
ml_repofile = r_ml_first_chroot.data.decode('utf-8')
|
| |
|
| |
- repoids, baseurls, gpgkeys = parse_repofile(non_ml_repofile)
|
| |
+ repoids, baseurls, gpgkeys, _, costs = parse_repofile(non_ml_repofile)
|
| |
assert len(repoids) == len(baseurls) == len(gpgkeys) == 1
|
| |
+ assert len(costs) == 0
|
| |
|
| |
normal_gpgkey = gpgkeys[0]
|
| |
normal_repoid = repoids[0]
|
| |
normal_baseurl = baseurls[0]
|
| |
|
| |
- repoids, baseurls, gpgkeys = parse_repofile(ml_repofile)
|
| |
+ repoids, baseurls, gpgkeys, names, costs = parse_repofile(ml_repofile)
|
| |
assert len(repoids) == len(baseurls) == len(gpgkeys) == 2
|
| |
+ assert len(costs) == 1
|
| |
+ assert costs[0] == '1100'
|
| |
|
| |
assert normal_repoid == repoids[0]
|
| |
assert normal_repoid + ':ml' == repoids[1]
|
| |
+ assert 'x86_64' not in names[0]
|
| |
+ assert '(i386)' not in names[0]
|
| |
+ assert '(i386)' in names[1]
|
| |
assert gpgkeys[0] == gpgkeys[1] == normal_gpgkey
|
| |
assert normal_baseurl == baseurls[0]
|
| |
assert normal_baseurl.rsplit('-', 1)[0] == baseurls[1].rsplit('-', 1)[0]
|
| |
Please see commits for comments. FTR, I noticed the problems while
documenting the new feature.