| |
@@ -15,7 +15,11 @@
|
| |
from fedfind import exceptions as ff_exceptions
|
| |
from fedfind import helpers as ff_helpers
|
| |
from fedfind import release as ff_release
|
| |
- from fedora_image_uploader_messages import AzurePublishedV1, ContainerPublishedV1
|
| |
+ from fedora_image_uploader_messages import (
|
| |
+ AwsPublishedV1,
|
| |
+ AzurePublishedV1,
|
| |
+ ContainerPublishedV1,
|
| |
+ )
|
| |
from fedora_messaging import api, config
|
| |
from fedora_messaging import exceptions as fm_exceptions
|
| |
from fedora_messaging import message as fm_message
|
| |
@@ -55,7 +59,14 @@
|
| |
self.requests = Session()
|
| |
retry_config = Retry(total=5, backoff_factor=1)
|
| |
self.requests.mount("https://", adapters.HTTPAdapter(max_retries=retry_config))
|
| |
- self.handlers = (self.handle_azure, self.handle_container)
|
| |
+ handlers = {
|
| |
+ "aws": self.handle_aws,
|
| |
+ "azure": self.handle_azure,
|
| |
+ "container": self.handle_container,
|
| |
+ }
|
| |
+ self.handlers = [
|
| |
+ handler for conf_key, handler in handlers.items() if conf_key in self.conf.keys()
|
| |
+ ]
|
| |
# tracks the container repos we got images for, for manifest
|
| |
# creation purposes
|
| |
self.container_repos = dict()
|
| |
@@ -170,19 +181,7 @@
|
| |
},
|
| |
)
|
| |
if self.conf["container"].get("publish_amqp_messages", False):
|
| |
- try:
|
| |
- api.publish(message)
|
| |
- except (
|
| |
- fm_exceptions.PublishTimeout,
|
| |
- fm_exceptions.PublishReturned,
|
| |
- ) as e:
|
| |
- _log.warning("Unable to publish ContainerPublishV1 message: %s", str(e))
|
| |
- except fm_exceptions.PublishForbidden as e:
|
| |
- _log.error(
|
| |
- "Unable to publish message to topic %s, permission denied: %s",
|
| |
- message.topic,
|
| |
- str(e),
|
| |
- )
|
| |
+ fallible_publish(message)
|
| |
|
| |
def _missing_manifest_arches(self, source: str, builtarches: Iterable[str]) -> set:
|
| |
"""
|
| |
@@ -255,7 +254,9 @@
|
| |
|
| |
return image_dest
|
| |
|
| |
- def run_playbook(self, playbook: str, variables: dict, workdir: str):
|
| |
+ def run_playbook(
|
| |
+ self, playbook: str, variables: dict, workdir: str
|
| |
+ ) -> ansible_runner.runner.Runner:
|
| |
"""
|
| |
Execute Ansible playbook in workdir using variables.
|
| |
|
| |
@@ -278,6 +279,54 @@
|
| |
if result.rc != 0:
|
| |
_log.error(f"Playbook failed with return code {result.rc}")
|
| |
raise fm_exceptions.Nack()
|
| |
+ return result
|
| |
+
|
| |
+ def handle_aws(self, image: dict, ffrel: ff_release.Release):
|
| |
+ """Handle AWS images."""
|
| |
+ if image.get("subvariant") != "Cloud_Base" or "AmazonEC2" not in image.get("path", ""):
|
| |
+ return
|
| |
+
|
| |
+ with tempfile.TemporaryDirectory() as workdir:
|
| |
+ image_path = self.download_image(image, workdir, decompress=True)
|
| |
+ date = ffrel.metadata["composeinfo"]["payload"]["compose"]["date"]
|
| |
+ respin = ffrel.metadata["composeinfo"]["payload"]["compose"]["respin"]
|
| |
+ ami_name = (
|
| |
+ f"Fedora-Cloud-Base-AmazonEC2.{image['arch']}-{ffrel.relnum}-{date}.{respin}",
|
| |
+ )
|
| |
+ variables = {
|
| |
+ "base_region": self.conf["aws"]["base_region"],
|
| |
+ "s3_bucket_name": self.conf["aws"]["s3_bucket_name"],
|
| |
+ "ami_description": self.conf["aws"]["ami_description"],
|
| |
+ "ami_volume_dev_name": self.conf["aws"]["ami_volume_dev_name"],
|
| |
+ "ami_volume_type": self.conf["aws"]["ami_volume_type"],
|
| |
+ "ami_volume_size": self.conf["aws"]["ami_volume_size"],
|
| |
+ "ami_regions": self.conf["aws"]["ami_regions"],
|
| |
+ "ami_name": ami_name,
|
| |
+ "architecture": image["arch"],
|
| |
+ "image_source": image_path,
|
| |
+ "exclude_from_latest": True,
|
| |
+ "ansible_remote_tmp": workdir,
|
| |
+ }
|
| |
+
|
| |
+ playbook = os.path.join(PLAYBOOKS, "aws.yml")
|
| |
+ run = self.run_playbook(playbook, variables, workdir)
|
| |
+ # extract the AMI ids from the Ansible run
|
| |
+ regions = dict()
|
| |
+ for event in run.events:
|
| |
+ if event["event"] == "runner_on_ok":
|
| |
+ uploaded_ami = event["event_data"]["res"]["image_id"]
|
| |
+ region = event["invocation"]["module_args"]["region"]
|
| |
+ regions[region] = uploaded_ami
|
| |
+ message = AwsPublishedV1(
|
| |
+ body={
|
| |
+ "architecture": image["arch"],
|
| |
+ "compose_id": ffrel.cid,
|
| |
+ "image_name": ami_name,
|
| |
+ "regions": regions,
|
| |
+ },
|
| |
+ )
|
| |
+ if self.conf["aws"].get("publish_amqp_messages", False):
|
| |
+ fallible_publish(message)
|
| |
|
| |
def handle_azure(self, image: dict, ffrel: ff_release.Release):
|
| |
"""
|
| |
@@ -356,18 +405,7 @@
|
| |
# Gate publishing behind a feature flag so we can roll out updates while getting
|
| |
# proper permissions for publishing.
|
| |
if self.conf["azure"].get("publish_amqp_messages", False):
|
| |
- try:
|
| |
- api.publish(message=message)
|
| |
- except (fm_exceptions.PublishTimeout, fm_exceptions.PublishReturned) as e:
|
| |
- # There's always tomorrow for a new image, rather than restarting the whole
|
| |
- # process, we'll skip publishing the message and try again next time.
|
| |
- _log.warning("Unable to publish AzurePublishV1 message: %s", str(e))
|
| |
- except fm_exceptions.PublishForbidden as e:
|
| |
- _log.error(
|
| |
- "Unable to publish message to topic %s, permission denied: %s",
|
| |
- message.topic,
|
| |
- str(e),
|
| |
- )
|
| |
+ fallible_publish(message)
|
| |
try:
|
| |
self.azure_cleanup_images()
|
| |
except Exception:
|
| |
@@ -455,7 +493,7 @@
|
| |
|
| |
def handle_container(self, image: dict, ffrel: ff_release.Release):
|
| |
"""Handle container images."""
|
| |
- registries = self.conf.get("container", {}).get("registries")
|
| |
+ registries = self.conf["container"].get("registries")
|
| |
if not registries:
|
| |
# we can't do anything if no registries are configured
|
| |
return
|
| |
@@ -500,3 +538,23 @@
|
| |
self.container_repos[repo].append(arch)
|
| |
else:
|
| |
self.container_repos[repo] = [arch]
|
| |
+
|
| |
+
|
| |
+ def fallible_publish(message):
|
| |
+ """
|
| |
+ Helper to publish AMQP messages fallibly.
|
| |
+
|
| |
+ Rather than try really hard to publish every message, if the broker is unavailable it's
|
| |
+ reasonable to just wait until the next image (which happens daily) to get built and try
|
| |
+ again then.
|
| |
+ """
|
| |
+ try:
|
| |
+ api.publish(message=message)
|
| |
+ except (fm_exceptions.PublishTimeout, fm_exceptions.PublishReturned) as e:
|
| |
+ _log.warning("Unable to publish %s message: %s", message.__class__.__name__, str(e))
|
| |
+ except fm_exceptions.PublishForbidden as e:
|
| |
+ _log.error(
|
| |
+ "Unable to publish message to topic %s, permission denied: %s",
|
| |
+ message.topic,
|
| |
+ str(e),
|
| |
+ )
|
| |
Untested, still needs message publishing at least.