| |
@@ -135,8 +135,27 @@
|
| |
./
|
| |
echo "createrepo read $(wc -l < $readpkglist) rpms from disk (s3)"
|
| |
rm $readpkglist
|
| |
- # Copy the resulting repodata back to the target
|
| |
- rsync -avh --delete "${outputdir}/repodata/" ./repodata/
|
| |
+
|
| |
+ # Copy the new repodata files to the target
|
| |
+ rsync -avh --exclude 'repomd.xml' "${outputdir}/repodata/" './repodata/'
|
| |
+ # Copy the repomd separately in order to force no caching for that object
|
| |
+ if [ -n "${S3BUCKET:-}" -a -n "${AWS_ACCESS_KEY_ID:-}" -a -n "${AWS_SECRET_ACCESS_KEY:-}" ]; then
|
| |
+ aws s3 cp \
|
| |
+ --cache-control max-age=60 \
|
| |
+ "${outputdir}/repodata/repomd.xml" \
|
| |
+ "s3://${S3BUCKET}/fedora/${release}/${arch}/repodata/repomd.xml"
|
| |
+ else
|
| |
+ if [ -n "${S3BUCKET:-}" ]; then
|
| |
+ echo -n "ERROR: You're running against an S3 BUCKET, " 1>&2
|
| |
+ echo "but no creds for 'aws s3 cp' operation." 1>&2
|
| |
+ fi
|
| |
+ # If no creds then maybe we're doing development on a local filesystem
|
| |
+ # Just copy the file.
|
| |
+ cp -av "${outputdir}/repodata/repomd.xml" './repodata/repomd.xml'
|
| |
+ fi
|
| |
+ # Delete files that are no longer needed
|
| |
+ rsync -avh --exclude 'repomd.xml' --delete "${outputdir}/repodata/" './repodata/'
|
| |
+
|
| |
popd >/dev/null
|
| |
done
|
| |
|
| |
We hit issues where cloudfront had cached the repomd.xml file and
it had become out of date and was causing issues because the repo
metadata files it pointed to were deleted (because there was a new
set of files that had been created).