From 6c08456f65f72bb1380dc0634ad28541a299e9b7 Mon Sep 17 00:00:00 2001 From: root Date: Apr 30 2019 07:34:52 +0000 Subject: Automatic update of defaults --- diff --git a/nodepool/_local_hypervisor_openshift.yaml b/nodepool/_local_hypervisor_openshift.yaml new file mode 100644 index 0000000..404b62b --- /dev/null +++ b/nodepool/_local_hypervisor_openshift.yaml @@ -0,0 +1,2 @@ +# This file is managed by sfconfig, do not edit manually +--- diff --git a/nodepool/_local_hypervisor_runc.yaml b/nodepool/_local_hypervisor_runc.yaml new file mode 100644 index 0000000..666c99a --- /dev/null +++ b/nodepool/_local_hypervisor_runc.yaml @@ -0,0 +1 @@ +# This file is managed by sfconfig, do not edit manually diff --git a/nodepool/elements/README b/nodepool/elements/README new file mode 100644 index 0000000..257e142 --- /dev/null +++ b/nodepool/elements/README @@ -0,0 +1,5 @@ +Customs diskimage builder elements to be used by nodepool-builder. + +Nodepool is configured to use by default (with low precedence): +* https://softwarefactory-project.io/r/software-factory/sf-elements +* git://git.openstack.org/openstack-infra/project-config/nodepool/elements diff --git a/nodepool/elements/virt-customize/README b/nodepool/elements/virt-customize/README new file mode 100644 index 0000000..7dfaa32 --- /dev/null +++ b/nodepool/elements/virt-customize/README @@ -0,0 +1 @@ +This is a fake element to get the script on the nodepool-builder host diff --git a/nodepool/elements/virt-customize/disk-image-create b/nodepool/elements/virt-customize/disk-image-create new file mode 100755 index 0000000..bcc9fe1 --- /dev/null +++ b/nodepool/elements/virt-customize/disk-image-create @@ -0,0 +1,54 @@ +#!/bin/env python +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import argparse +import subprocess + + +def main(): + # Fake dib interface + parser = argparse.ArgumentParser() + parser.add_argument("-x", action='store_true', help="noop") + parser.add_argument("-t", help="Image types") + parser.add_argument("--checksum", action='store_true', help="noop") + parser.add_argument("--no-tmpfs", action='store_true', help="noop") + parser.add_argument("--qemu-img-options", help="noop") + parser.add_argument("-o", help="Image output") + parser.add_argument("elements", nargs='+', help="noop") + args = parser.parse_args() + cmd = ["sudo", "ansible-playbook", "-v"] + + # The first DIB element is the playbook name + cmd.append("%s.yaml" % args.elements[0]) + + # Set the image output var + cmd.extend(["-e", "image_output=%s" % args.o]) + + # Look for image types + img_types = set(args.t.split(',')) + unsupported_types = img_types.difference(set(('raw', 'qcow2'))) + if unsupported_types: + raise RuntimeError("Unsupported type: %s" % unsupported_types) + if "raw" in img_types: + cmd.extend(["-e", "raw_type=True"]) + if "qcow2" in img_types: + cmd.extend(["-e", "qcow2_type=True"]) + + # Execute the playbook + print("Running: ", cmd) + return subprocess.Popen(cmd, cwd="/etc/nodepool/elements/virt-customize").wait() + + +if __name__ == "__main__": + exit(main()) diff --git a/nodepool/elements/virt-customize/fedora-cloud.yaml b/nodepool/elements/virt-customize/fedora-cloud.yaml new file mode 100644 index 0000000..650a587 --- /dev/null +++ b/nodepool/elements/virt-customize/fedora-cloud.yaml @@ -0,0 +1,144 @@ +--- +- name: Build a fedora cloud image suitable for Zuul + hosts: localhost + vars: + image_url: https://download.fedoraproject.org/pub/fedora/linux/releases/29/Cloud/x86_64/images/Fedora-Cloud-Base-29-1.2.x86_64.qcow2 + image_checksum: "sha256:a30549d620bf6bf41d30a9a58626e59dfa70bb011fd7d50f6c4511ad2e479a39" + image_cache_file: "/var/cache/nodepool/Fedora-Cloud-Base-29-1.2.x86_64.qcow2" + image_cache_dir: "/var/cache/nodepool" + image_tmp_dir: "/var/tmp/{{ image_output | basename }}" + extra_packages: + # For validate-host and prepare-workspace + - traceroute + - iproute + - git + - rsync + # Extra system tools + - pigz + - bridge-utils + - wget + - unzip + # Basic CI tools + - make + - gcc + - patch + # RPM building tools + - redhat-lsb-core + - redhat-rpm-config + - rpm-build + - rpm-sign + - rpmlint + - createrepo + - gnupg2 + - expect + # Devel libraries + - libffi-devel + - libpcap-devel + - libseccomp-devel + - libxml2-devel + - libxslt-devel + - mariadb-devel + - openldap-devel + - openssl-devel + - python-devel + - readline-devel + - ruby-devel + - systemd-devel + - zlib-devel + # Python + - PyYAML + - python-virtualenv + - python-six + tasks: + - name: Ensure libvirt is started + service: + name: libvirtd + state: started + + - name: Check if image is already downloaded + stat: + path: "{{ image_cache_file }}" + register: _image_cache_file_stat + + - name: Download if checksum doesn't match + get_url: + url: "{{ image_url }}" + dest: "{{ image_cache_file }}" + checksum: "{{ image_checksum }}" + when: not _image_cache_file_stat.stat.exists + + - name: Update the cache + command: "virt-customize -m 1024 -a {{ image_cache_file }} --update" + + - name: Customize the image for zuul ci + block: + - name: Create tmp directory + file: + path: "{{ image_tmp_dir }}" + state: directory + mode: 0755 + + - name: Set filename copy fact + set_fact: + image_file: "{{ image_tmp_dir }}/{{ image_cache_file | basename }}" + + - name: Copy the image + copy: + src: "{{ image_cache_file }}" + dest: "{{ image_file }}" + remote_src: true + mode: 0644 + + - name: Prepare the sudoers file + copy: + content: | + Defaults !requiretty + zuul-worker ALL=(ALL) NOPASSWD:ALL + dest: "{{ image_tmp_dir }}/zuul-worker" + + - name: Prepare the authorized_keys file + copy: + src: /var/lib/nodepool/.ssh/zuul_rsa.pub + dest: "{{ image_tmp_dir }}/authorized_keys" + remote_src: true + + - name: Customize the image + command: >- + virt-customize -m 1024 -a {{ image_file }} + {# Ensure zuul user exists #} + --run-command 'adduser -m zuul-worker' + {# Setup authorized_keys #} + --mkdir '/home/zuul-worker/.ssh' + --chmod '0700:/home/zuul-worker/.ssh' + --copy-in '{{ image_tmp_dir }}/authorized_keys:/home/zuul-worker/.ssh/' + --chmod '0600:/home/zuul-worker/.ssh/authorized_keys' + --run-command 'chown -R zuul-worker:zuul-worker /home/zuul-worker/.ssh/' + {# Setup sudoers file #} + --copy-in '{{ image_tmp_dir }}/zuul-worker:/etc/sudoers.d/' + --chmod '0440:/etc/sudoers.d/zuul-worker' + {# Install extra packages #} + --install '{{ extra_packages | join(',') }}' + {# Disable IPv6 because rdo-cloud does not route v6 #} + --append-line '/etc/sysctl.conf:net.ipv6.conf.all.disable_ipv6 = 1' + --append-line '/etc/sysctl.conf:net.ipv6.conf.default.disable_ipv6 = 1' + --append-line '/etc/sysconfig/network:IPV6INIT=no' + --append-line '/etc/sysconfig/network:IPV6_AUTOCONF=no' + --append-line '/etc/sysconfig/network:IPV6_DEFROUTE=no' + {# Ensure yum is only resolve using ipv4 #} + --append-line '/etc/yum.conf:ip_resolve=4' + {# Ensure selinux labels are correct #} + --selinux-relabel + + - name: Create raw file + command: "qemu-img convert -O raw {{ image_file }} {{ image_output }}.raw" + when: raw_type | default(False) | bool + + - name: Create qcow file + command: "mv {{ image_file }} {{ image_output }}.qcow2" + when: qcow2_type | default(False) | bool + + always: + - name: Remove tmp directory + file: + path: "{{ image_tmp_dir }}" + state: absent diff --git a/nodepool/nodepool.yaml b/nodepool/nodepool.yaml new file mode 100644 index 0000000..12f8370 --- /dev/null +++ b/nodepool/nodepool.yaml @@ -0,0 +1,35 @@ +--- +diskimages: + - name: cloud-fedora + username: zuul-worker + elements: + # This is a fake element to reference the playbook + - fedora-cloud + env-vars: + PATH: /etc/nodepool/elements/virt-customize:/bin:/sbin:/usr/bin:/usr/sbin + formats: + - raw + +labels: + - name: cloud-fedora + +providers: + - name: rdo-cloud + cloud: rdocloud + launch-retries: 15 + region-name: regionOne + clean-floating-ips: true + image-name-format: '{image_name}-{timestamp}' + boot-timeout: 240 + rate: 1.0 + diskimages: + - name: cloud-fedora + config-drive: true + pools: + - name: main + max-servers: 3 + labels: + - name: cloud-fedora + min-ram: 2000 + flavor-name: rdo.m1.small + diskimage: cloud-fedora diff --git a/nodepool/openshift.yaml b/nodepool/openshift.yaml new file mode 100644 index 0000000..64bb998 --- /dev/null +++ b/nodepool/openshift.yaml @@ -0,0 +1,45 @@ +# Uncomment to enable openshift provider +#--- +# After the provider is registered in sfconfig.yaml, grab the context name using: +# sudo -u nodepool oc config get-contexts +# +# +# To use the openshift driver, a self provisioner service account is needed: +# Request the cluster operator to create: +# oc create sa nodepool +# oc adm policy add-cluster-role-to-user self-provisioner --serviceaccount=nodepool +# oc policy add-role-to-user admin --serviceaccount=nodepool +# oc sa get-token nodepool +# Then register the token in sfconfig.yaml +# +#providers: +# - name: openshift01 +# driver: openshift +# context: self-provisioner-service-account-context-name +# pools: +# - name: zuul-ci +# labels: +# - name: openshift-project +# type: project +# - name: openshift-pod-fedora +# type: pod +# image: docker.io/fedora:28 +# +# +############################################################################### +# Or use the openshiftpods driver with a regular service account: +# oc new-project nodepool +# oc create sa nodepool +# oc policy add-role-to-user admin --serviceaccount=nodepool +# oc sa get-token nodepool +# Then register the token in sfconfig.yaml +# +#providers: +# - name: openshift01 +# driver: openshiftpods +# context: "nodepool/openshift-example-com:8443/system:serviceaccount:nodepool:nodepool" +# pools: +# - name: nodepool +# labels: +# - name: openshift-pod +# image: docker.io/fedora:28 diff --git a/nodepool/runC/_linters-packages.yaml b/nodepool/runC/_linters-packages.yaml new file mode 100644 index 0000000..45d6921 --- /dev/null +++ b/nodepool/runC/_linters-packages.yaml @@ -0,0 +1,20 @@ +# This file is managed by ansible, do not edit directly + +- name: Define linters packages fact + set_fact: + linters_pkgs: + - yamllint + - bashate + - python-flake8 + - python-tox + - traceroute + - rpm-build + - python2-coverage + - python2-pelican + - python2-sphinx + - doc8 + +- name: Install linters packages + yum: + name: "{{ linters_pkgs }}" + state: present diff --git a/nodepool/runC/customize.yaml b/nodepool/runC/customize.yaml new file mode 100644 index 0000000..2d13aeb --- /dev/null +++ b/nodepool/runC/customize.yaml @@ -0,0 +1,6 @@ +--- +# Tasks in that files are executed on the hypervisor-oci node by the config-update +- include_tasks: _linters-packages.yaml + +# Uncomment to configure a fedora environment +# - include_tasks: fedora-rootfs.yaml diff --git a/nodepool/runC/fedora-rootfs.yaml b/nodepool/runC/fedora-rootfs.yaml new file mode 100644 index 0000000..0c7e6ee --- /dev/null +++ b/nodepool/runC/fedora-rootfs.yaml @@ -0,0 +1,66 @@ +- name: Set fedora version fact + set_fact: + fedora_version: 28 + target_dir: /srv/f28 + +- name: Set bwrap command fact + set_fact: + bwrap_command: "bwrap --unshare-pid --bind {{ target_dir }} / --proc /proc --dev /dev" + +- name: Extract fedora image + shell: | + set -ex + skopeo copy docker://fedora:{{ fedora_version }} dir:{{ target_dir }}-cache + mkdir -p {{ target_dir }} + tar -C {{ target_dir }} -xzf {{ target_dir }}-cache/$(python -c "import json; print(json.load(open('{{ target_dir }}-cache/manifest.json'))['layers'][0]['digest']).split(':')[1]") + cp /etc/resolv.conf /srv/f{{ fedora_version }}/etc/resolv.conf + {{ bwrap_command }} dnf install -y openssh-server + rm -Rf {{ target_dir }}-cache + args: + creates: "{{ target_dir }}/sbin/sshd" + +- name: Create sshd server keys + command: "{{ bwrap_command }} /usr/libexec/openssh/sshd-keygen rsa" + args: + creates: "{{ target_dir }}/etc/ssh/ssh_host_rsa_key" + +- name: Read host user id + command: awk -F ":" '/zuul-worker/ { print $3 }' /etc/passwd + register: _host_uid + +- name: Create zuul-worker user + command: "{{ bwrap_command }} useradd -u {{ _host_uid.stdout }} -m zuul-worker" + args: + creates: "{{ target_dir }}/home/zuul-worker" + +- name: Create /home/zuul-worker/.ssh + file: + path: "{{ target_dir }}/home/zuul-worker/.ssh" + state: directory + mode: 0700 + owner: zuul-worker + +- name: Adds ssh key + copy: + src: /var/lib/software-factory/bootstrap-data/ssh_keys/zuul_rsa.pub + dest: "{{ target_dir }}/home/zuul-worker/.ssh/authorized_keys" + owner: zuul-worker + +- name: Ensure src dir exists + file: + path: "{{ target_dir }}/home/zuul-worker/src" + state: directory + owner: zuul-worker + +- name: Install packages + command: > + {{ bwrap_command }} dnf install -y + iproute rsync git traceroute + python3-pip python3-devel + python3-tox python3-flake8 python3-ansible-lint python3-pycodestyle yamllint + python3-sphinx python3-pelican python3-jinja2 python3-coverage + rpm-build make gcc + +- name: Install pip packages + command: > + {{ bwrap_command }} pip3 install bashate doc8 diff --git a/playbooks/base/README b/playbooks/base/README new file mode 100644 index 0000000..4450121 --- /dev/null +++ b/playbooks/base/README @@ -0,0 +1 @@ +Base job playbooks diff --git a/playbooks/base/post.yaml b/playbooks/base/post.yaml new file mode 100644 index 0000000..5ecb861 --- /dev/null +++ b/playbooks/base/post.yaml @@ -0,0 +1,24 @@ +# This file is managed by ansible, do not edit directly +--- +- hosts: localhost + roles: + - role: add-fileserver + fileserver: "{{ site_sflogs }}" + - role: ara-report + # This depends-on https://review.openstack.org/577675 + ara_report_run: True + ara_report_type: database + ara_report_path: ara-report + +- hosts: "{{ site_sflogs.fqdn }}" + gather_facts: false + tasks: + # Use a block because play vars doesn't take precedence on roles vars + - block: + - import_role: name=upload-logs + - import_role: name=emit-job-report + - import_role: name=buildset-artifacts-location + vars: + zuul_log_url: "https://fedora.softwarefactory-project.io/logs" + zuul_logserver_root: "{{ site_sflogs.path }}" + diff --git a/playbooks/base/pre.yaml b/playbooks/base/pre.yaml new file mode 100644 index 0000000..a8035b7 --- /dev/null +++ b/playbooks/base/pre.yaml @@ -0,0 +1,16 @@ +# This file is managed by ansible, do not edit directly +--- +- hosts: localhost + tasks: + - block: + - import_role: name=emit-job-header + # This depends-on https://review.openstack.org/578234 + - import_role: name=log-inventory + vars: + zuul_log_url: "https://fedora.softwarefactory-project.io/logs" + +- hosts: all + roles: + - prepare-workspace + - role: validate-host + - add-build-sshkey diff --git a/playbooks/config/check-fetch-artifacts.yaml b/playbooks/config/check-fetch-artifacts.yaml new file mode 100644 index 0000000..288f4e1 --- /dev/null +++ b/playbooks/config/check-fetch-artifacts.yaml @@ -0,0 +1,14 @@ +# This file is managed by ansible, do not edit directly +--- +- hosts: localhost + tasks: + - name: Set speculative config path + set_fact: + config_root: "{{ zuul.executor.src_root }}/{{ zuul.project.canonical_name }}" + + - name: Fetch artifacts + synchronize: + src: "{{ config_root }}/build" + dest: "{{ zuul.executor.log_root }}/logs" + mode: pull + no_log: True diff --git a/playbooks/config/check.yaml b/playbooks/config/check.yaml new file mode 100644 index 0000000..0d3450d --- /dev/null +++ b/playbooks/config/check.yaml @@ -0,0 +1,160 @@ +# This file is managed by ansible, do not edit directly +--- +- hosts: localhost + tasks: + - name: Set speculative config path + set_fact: + config_root: "{{ zuul.executor.src_root }}/{{ zuul.project.canonical_name }}" + + - name: Fetch default config + get_url: + url: "{{ gateway_url }}/_defconf.tgz" + dest: "{{ config_root }}/" + retries: 30 + delay: 1 + + - name: Create defconf directory + file: + path: "{{ config_root }}/defconf" + state: directory + + - name: Extract default config + unarchive: + src: "{{ config_root }}/_defconf.tgz" + dest: "{{ config_root }}/defconf/" + + - name: include arch.yaml + include_vars: + file: "{{ config_root }}/defconf/arch.yaml" + name: arch + + - name: Create build directory to merge configuration + file: + path: "{{ config_root }}/build" + state: directory + + - name: Tenant env config-check preparation + block: + - name: Create defconf-master directory + file: + path: "{{ config_root }}/defconf-master" + state: directory + + - name: Fetch master SF default config + get_url: + url: "{{ master_sf_url }}/_defconf.tgz" + dest: "{{ config_root }}/_defconf-master.tgz" + retries: 30 + delay: 1 + + - name: Extract master SF default config + unarchive: + src: "{{ config_root }}/_defconf-master.tgz" + dest: "{{ config_root }}/defconf-master/" + + - name: Overwrite with master SF fake zuul.conf + copy: + remote_src: true + src: "{{ config_root }}/defconf-master/defconf-zuul.conf" + dest: "{{ config_root }}/defconf/defconf-zuul.conf" + + - set_fact: + tenant_options: "--tenant --master-sf-url {{ master_sf_url }}" + when: tenant_config is defined and tenant_config + + - name: Copy service_user password in workspace + copy: + content: "{{ service_user.password }}" + dest: "{{ config_root }}/.service_user_password" + no_log: true + + - name: Check resources changes + command: env - /usr/bin/managesf-resources remote-validate --remote-gateway {{ gateway_url }} + args: + chdir: "{{ config_root }}" + + - name: Check gerrit replication + command: git config -f gerrit/replication.config -l + args: + chdir: "{{ config_root }}" + when: '"gerrit" in arch.roles' + + # Use "env -" to remove zuul-executor environment that modifies python runtime (e.g. defines a 'zuul' python module) + - name: Check gerrit commentlinks + command: env - python -c "import yaml; 'commentlinks' in yaml.safe_load(open('gerrit/commentlinks.yaml'))" + args: + chdir: "{{ config_root }}" + when: '"gerrit" in arch.roles' + + - name: Check dashboards + command: env - python2 /usr/libexec/software-factory/sf-update-dashboard --check --input dashboards/ + args: + chdir: "{{ config_root }}" + when: '"gerrit" in arch.roles' + + - name: Check policy file + command: env - python -c "import yaml; yaml.safe_load(open('policies/policy.yaml'))" + args: + chdir: "{{ config_root }}" + + - name: Validate repoxplorer configuration + block: + - name: Check syntax errors in repoxplorer definition files + command: > + env - repoxplorer-config-validate --config {{ config_root }}/defconf/defconf-repoxplorer.py + args: + chdir: "{{ config_root }}" + when: '"repoxplorer" in arch.roles' + + - name: Validate nodepool configuration + block: + - name: Install fake _nodepool.yaml + copy: + remote_src: true + src: "{{ config_root }}/defconf/defconf-nodepool.yaml" + dest: "{{ config_root }}/build/_nodepool.yaml" + + - name: Merge nodepool config repo files + command: > + env - /usr/share/sf-config/scripts/sf-nodepool-conf-merger.py + nodepool/ build/nodepool.yaml + args: + chdir: "{{ config_root }}" + + - name: Run nodepool config-validate + command: > + env - /opt/rh/rh-python35/root/bin/nodepool -c build/nodepool.yaml + config-validate + args: + chdir: "{{ config_root }}" + when: '"nodepool-launcher" in arch.roles' + + - name: Validate zuul configuration + block: + - name: Install fake zuul.conf + copy: + remote_src: true + src: "{{ config_root }}/defconf/defconf-zuul.conf" + dest: "{{ config_root }}/build/zuul.conf" + + - name: Merge zuul tenant config + command: > + env - /bin/managesf-configuration zuul + --cache-dir {{ config_root }}/../.cache --config-dir {{ config_root }} --gateway-url {{ gateway_url }} {{ tenant_options | default('') }} --output build/main.yaml + args: + chdir: "{{ config_root }}" + + - name: Validate zuul config syntax + command: > + env - /opt/rh/rh-python35/root/bin/zuul -c zuul.conf tenant-conf-check + args: + chdir: "{{ config_root }}/build" + + - name: Validate metrics dashboards + block: + - name: Check syntax errors in metrics dashboards + shell: | + find . -regextype posix-egrep -regex '.*.(yaml|yml)$' | xargs -I yaml grafana-dashboard validate yaml + args: + chdir: "{{ config_root }}/metrics" + when: '"grafana" in arch.roles' diff --git a/playbooks/config/update.yaml b/playbooks/config/update.yaml new file mode 100644 index 0000000..86b4e19 --- /dev/null +++ b/playbooks/config/update.yaml @@ -0,0 +1,5 @@ +# This file is managed by ansible, do not edit directly +--- +- hosts: localhost + tasks: + - include_tasks: update_local.yaml diff --git a/playbooks/config/update_local.yaml b/playbooks/config/update_local.yaml new file mode 100644 index 0000000..f0712bc --- /dev/null +++ b/playbooks/config/update_local.yaml @@ -0,0 +1,26 @@ +# This file is managed by ansible, do not edit directly +--- +- name: Create SSH private key tempfile + tempfile: + state: file + register: ssh_private_key_tmp + +- name: Create SSH private key from secret + copy: + content: "{{ site_install_server.ssh_private_key }}" + dest: "{{ ssh_private_key_tmp.path }}" + mode: 0600 + +- name: Add zuul ssh key + command: "ssh-add {{ ssh_private_key_tmp.path }}" + +- name: Remove SSH private key from disk + command: "shred {{ ssh_private_key_tmp.path }}" + +- name: Add site_install_server server to known hosts + known_hosts: + name: "{{ site_install_server.fqdn }}" + key: "{{ site_install_server.ssh_known_hosts }}" + +- name: run config update + command: "ssh root@{{ site_install_server.fqdn }} sf_configrepo_update {{ (zuul | zuul_legacy_vars)['ZUUL_NEWREV'] }}" diff --git a/playbooks/config/update_tenant.yaml b/playbooks/config/update_tenant.yaml new file mode 100644 index 0000000..1298a4b --- /dev/null +++ b/playbooks/config/update_tenant.yaml @@ -0,0 +1,40 @@ +# This file is managed by ansible, do not edit directly +--- +- name: Discover path of config repository + command: git rev-parse --show-toplevel + register: config_path + +- name: Get last change sha + command: "git --git-dir={{ config_path.stdout }}/.git log -n1 --pretty=format:'%h' --no-merges" + register: git_log + +- name: Get last change on resources sha + command: "git --git-dir={{ config_path.stdout }}/.git log -n1 --pretty=format:'%h' --no-merges -- resources zuul" + register: git_log_resources + +- block: + - name: Create SSH private key tempfile + tempfile: + state: file + register: ssh_private_key_tmp + + - name: Create SSH private key from secret + copy: + content: "{{ site_tenant_update.ssh_private_key }}" + dest: "{{ ssh_private_key_tmp.path }}" + mode: 0600 + + - name: Add zuul ssh key + command: "ssh-add {{ ssh_private_key_tmp.path }}" + + - name: Remove SSH private key from disk + command: "shred {{ ssh_private_key_tmp.path }}" + + - name: Add site_tenant_update server to known hosts + known_hosts: + name: "{{ site_tenant_update.fqdn }}" + key: "{{ site_tenant_update.ssh_known_hosts }}" + + - name: Run tenant_update + command: "ssh root@{{ site_tenant_update.fqdn }} sf_tenant_update" + when: git_log_resources.stdout == git_log.stdout diff --git a/playbooks/openshift/build-project.yaml b/playbooks/openshift/build-project.yaml new file mode 100644 index 0000000..957c957 --- /dev/null +++ b/playbooks/openshift/build-project.yaml @@ -0,0 +1,84 @@ +# This file is managed by ansible, do not edit directly +--- +- name: prepare dumb bare clone of future state + git: + repo: "{{ zuul.executor.work_root }}/{{ zuul.project.src_dir }}" + dest: "{{ zuul.executor.work_root }}/{{ zuul.project.src_dir }}.git" + bare: yes + +- name: update server info for dumb http transport + command: git update-server-info + args: + chdir: "{{ zuul.executor.work_root }}/{{ zuul.project.src_dir }}.git" + +- name: create project dir on http server + command: > + {{ oc_command }} exec {{ zm_name }} -- mkdir -p {{ zuul.project.src_dir }}.git + +- name: copy project to http server + command: > + {{ oc_command }} rsync -q --progress=false + {{ zuul.executor.work_root }}/{{ zuul.project.src_dir }}.git/ + {{ zm_name }}:/opt/app-root/src/{{ zuul.project.src_dir }}.git/ + no_log: true + +- name: create project ImageStream spec + openshift_raw: + state: present + namespace: "{{ zuul.resources['project'].namespace }}" + context: "{{ zuul.resources['project'].context }}" + definition: + apiVersion: v1 + kind: ImageStream + metadata: + generation: 1 + labels: + app: "{{ zuul.project.short_name }}" + name: "{{ zuul.project.short_name }}" + spec: + lookupPolicy: + local: false + register: _image_stream + +- name: create project BuildConfig spec + openshift_raw: + state: present + namespace: "{{ zuul.resources['project'].namespace }}" + context: "{{ zuul.resources['project'].context }}" + definition: + apiVersion: v1 + kind: BuildConfig + metadata: + labels: + app: "{{ zuul.project.short_name }}" + name: "{{ zuul.project.short_name }}" + spec: + output: + to: + kind: ImageStreamTag + name: '{{ zuul.project.short_name }}:latest' + runPolicy: Serial + source: + git: + ref: master + uri: 'http://staging-http-server:8080/{{ zuul.project.src_dir }}.git' + type: Git + strategy: + sourceStrategy: + from: + kind: ImageStreamTag + name: '{{ base_image }}' + namespace: openshift + type: Source + triggers: + - type: ImageChange + - type: ConfigChange + +- name: wait for project image built + command: > + {{ oc_command }} get builds + -o "jsonpath={.items[?(@.metadata.labels.buildconfig!='staging-http-server')].status.phase}" + register: _project_build + retries: 600 + delay: 1 + until: "'Complete' in _project_build.stdout" diff --git a/playbooks/openshift/deploy-project.yaml b/playbooks/openshift/deploy-project.yaml new file mode 100644 index 0000000..51c55b8 --- /dev/null +++ b/playbooks/openshift/deploy-project.yaml @@ -0,0 +1,60 @@ +# This file is managed by ansible, do not edit directly +--- +- name: start the project + openshift_raw: + state: present + namespace: "{{ zuul.resources['project'].namespace }}" + context: "{{ zuul.resources['project'].context }}" + definition: + apiVersion: v1 + kind: DeploymentConfig + metadata: + generation: 2 + labels: + app: "{{ zuul.project.short_name }}" + name: "{{ zuul.project.short_name }}" + spec: + replicas: 1 + selector: + deploymentconfig: "{{ zuul.project.short_name }}" + strategy: + resources: {} + type: Rolling + template: + metadata: + labels: + app: "{{ zuul.project.short_name }}" + deploymentconfig: "{{ zuul.project.short_name }}" + spec: + containers: + - image: "{{ _image_stream.result.status.dockerImageRepository }}" + name: "{{ zuul.project.short_name }}" + command: [ "/bin/bash", "-c", "--" ] + args: [ "while true; do sleep 30; done;" ] + ports: + - containerPort: 8080 + protocol: TCP + - containerPort: 8443 + protocol: TCP + resources: {} + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + test: false + +- name: get project pod name + command: > + {{ oc_command }} get pods --field-selector=status.phase=Running + -o "jsonpath={.items[?(@.metadata.labels.app=='{{ zuul.project.short_name }}')].metadata.name}" + register: _pod_name + retries: 600 + delay: 1 + until: "zuul.project.short_name in _pod_name.stdout" + +- name: add pod to the job inventory + zuul_return: + data: + zuul: + inventory: '{"{{ zuul.project.short_name }}": {"ansible_connection": "kubectl", "ansible_host": "{{ _pod_name.stdout }}"}}' diff --git a/playbooks/openshift/pre.yaml b/playbooks/openshift/pre.yaml new file mode 100644 index 0000000..b2620c6 --- /dev/null +++ b/playbooks/openshift/pre.yaml @@ -0,0 +1,34 @@ +--- +- hosts: localhost + tasks: + - block: + - import_role: name=emit-job-header + # We need those tasks to use log-inventory, see: https://review.openstack.org/577674 + - name: Define zuul_info_dir fact + set_fact: + zuul_info_dir: "{{ zuul.executor.log_root }}/zuul-info" + + - name: Ensure Zuul Ansible directory exists + delegate_to: localhost + run_once: true + file: + path: "{{ zuul_info_dir }}" + state: directory + + - name: Define inventory_file fact + set_fact: + inventory_file: "/tmp/{{ zuul.build }}/ansible/inventory.yaml" + + - import_role: name=log-inventory + vars: + zuul_log_url: "https://fedora.softwarefactory-project.io/logs" + + - name: Set oc_command fact + set_fact: + oc_command: > + oc --context "{{ zuul.resources['project'].context }}" + --namespace "{{ zuul.resources['project'].namespace }}" + + - include_tasks: prepare-namespace.yaml + - include_tasks: build-project.yaml + - include_tasks: deploy-project.yaml diff --git a/playbooks/openshift/prepare-namespace.yaml b/playbooks/openshift/prepare-namespace.yaml new file mode 100644 index 0000000..d583469 --- /dev/null +++ b/playbooks/openshift/prepare-namespace.yaml @@ -0,0 +1,80 @@ +# This file is managed by ansible, do not edit directly +--- +- name: create staging-http DeploymentConfig + openshift_raw: + state: present + namespace: "{{ zuul.resources['project'].namespace }}" + context: "{{ zuul.resources['project'].context }}" + definition: + apiVersion: v1 + kind: DeploymentConfig + metadata: + generation: 2 + labels: + app: staging-http-server + name: staging-http-server + spec: + replicas: 1 + selector: + deploymentconfig: staging-http-server + strategy: + resources: {} + type: Rolling + template: + metadata: + labels: + app: staging-http-server + deploymentconfig: staging-http-server + spec: + containers: + - image: "docker.io/softwarefactoryproject/staging-http-server" + # imagePullPolicy: Always + name: staging-http-server + ports: + - containerPort: 8080 + protocol: TCP + - containerPort: 8443 + protocol: TCP + resources: {} + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + terminationGracePeriodSeconds: 30 + +- name: create staging-http Service spec + openshift_raw: + state: present + namespace: "{{ zuul.resources['project'].namespace }}" + context: "{{ zuul.resources['project'].context }}" + definition: + apiVersion: v1 + kind: Service + metadata: + labels: + app: staging-http-server + name: staging-http-server + spec: + ports: + - name: 8080-tcp + port: 8080 + protocol: TCP + targetPort: 8080 + selector: + deploymentconfig: staging-http-server + sessionAffinity: None + type: ClusterIP + status: + loadBalancer: {} + +- name: get staging-http-server pod name + command: > + {{ oc_command }} get pods --field-selector=status.phase=Running + -o "jsonpath={.items[?(@.metadata.labels.app=='staging-http-server')].metadata.name}" + register: _zm_name + retries: 600 + delay: 1 + until: "'staging-http' in _zm_name.stdout" + +- name: register staging-http-server pod name + set_fact: + zm_name: "{{ _zm_name.stdout }}" diff --git a/playbooks/openshift/unprivileged-machine.yaml b/playbooks/openshift/unprivileged-machine.yaml new file mode 100644 index 0000000..5055fcd --- /dev/null +++ b/playbooks/openshift/unprivileged-machine.yaml @@ -0,0 +1,39 @@ +--- +- hosts: localhost + tasks: + - block: + - import_role: name=emit-job-header + # We need those tasks to use log-inventory, see: https://review.openstack.org/577674 + - name: Define zuul_info_dir fact + set_fact: + zuul_info_dir: "{{ zuul.executor.log_root }}/zuul-info" + + - name: Ensure Zuul Ansible directory exists + delegate_to: localhost + run_once: true + file: + path: "{{ zuul_info_dir }}" + state: directory + + - name: Define inventory_file fact + set_fact: + inventory_file: "/tmp/{{ zuul.build }}/ansible/inventory.yaml" + + - import_role: name=log-inventory + vars: + zuul_log_url: "https://fedora.softwarefactory-project.io/logs" + + - name: Create src directory + command: > + oc --context "{{ zuul.resources['pod'].context }}" + --namespace "{{ zuul.resources['pod'].namespace }}" + exec {{ zuul.resources['pod'].pod }} mkdir src + + - name: Copy src repos to the pod + command: > + oc --context "{{ zuul.resources['pod'].context }}" + --namespace "{{ zuul.resources['pod'].namespace }}" + rsync -q --progress=false + {{ zuul.executor.src_root }}/ + {{ zuul.resources['pod'].pod }}:src/ + no_log: true diff --git a/playbooks/pages/build.yaml b/playbooks/pages/build.yaml new file mode 100644 index 0000000..b44b7e1 --- /dev/null +++ b/playbooks/pages/build.yaml @@ -0,0 +1,5 @@ +# This file is managed by ansible, do not edit directly +--- +- hosts: all + roles: + - role: build-pages diff --git a/playbooks/pages/fetch.yaml b/playbooks/pages/fetch.yaml new file mode 100644 index 0000000..c6f1592 --- /dev/null +++ b/playbooks/pages/fetch.yaml @@ -0,0 +1,5 @@ +# This file is managed by ansible, do not edit directly +--- +- hosts: all + roles: + - role: fetch-pages diff --git a/playbooks/pages/publish.yaml b/playbooks/pages/publish.yaml new file mode 100644 index 0000000..dbee215 --- /dev/null +++ b/playbooks/pages/publish.yaml @@ -0,0 +1,10 @@ +# This file is managed by ansible, do not edit directly +--- +- hosts: localhost + roles: + - role: add-fileserver + fileserver: "{{ site_pages }}" + +- hosts: "{{ site_pages.fqdn }}" + roles: + - role: upload-pages diff --git a/playbooks/wait-for-changes-ahead.yaml b/playbooks/wait-for-changes-ahead.yaml new file mode 100644 index 0000000..d78a109 --- /dev/null +++ b/playbooks/wait-for-changes-ahead.yaml @@ -0,0 +1,4 @@ +--- +- hosts: localhost + roles: + - wait-for-changes-ahead diff --git a/policies/policy.yaml b/policies/policy.yaml new file mode 100644 index 0000000..509f814 --- /dev/null +++ b/policies/policy.yaml @@ -0,0 +1,69 @@ +--- +# Default rules that should not be changed, but can be used as building blocks for more complex rules +'admin_or_service': 'rule:is_admin or rule:is_service' +'admin_api': 'rule:is_admin' +# is_owner applies to API calls where a user is the target. is_owner will be True if the requestor is the target of the action +'is_owner': 'username:%(username)s' +'owner_api': 'rule:is_owner' +'admin_or_owner': 'rule:is_admin or rule:is_owner' +# group checking depending on the target project +'is_ptl': 'group:%(project)s-ptl' +'is_core': 'group:%(project)s-core' +'is_dev': 'group:%(project)s-dev' +'ptl_api': 'rule:is_ptl' +'core_api': 'rule:is_core' +'dev_api': 'rule:is_dev' +'contributor_api': 'rule:ptl_api or rule:core_api or rule:dev_api' + +'authenticated_api': 'is_authenticated:True' +'any': '@' +'none': '!' +# Backup API +'managesf.backup:get': 'rule:admin_api' +'managesf.backup:create': 'rule:admin_api' +# Pages API CRUD +'managesf.pages:get': 'rule:admin_api or rule:ptl_api' +'managesf.pages:create': 'rule:admin_api or rule:ptl_api' +'managesf.pages:delete': 'rule:admin_api or rule:ptl_api' +# local user backend (for local authentication) API CRUD +'managesf.localuser:get': 'rule:authenticated_api' +'managesf.localuser:create_update': 'rule:admin_api or username:%(username)s' +'managesf.localuser:delete': 'rule:admin_api or username:%(username)s' +# This rule should be left alone, or local users will not be able to authenticate +'managesf.localuser:bind': 'rule:any' +# user API CRUD +'managesf.user:get': 'rule:authenticated_api' +'managesf.user:create': 'rule:admin_api or username:%(username)s' +'managesf.user:delete': 'rule:admin_api' +'managesf.user:update': 'rule:admin_api or username:%(username)s' +# gerrit hooks API +'managesf.hooks:trigger': 'rule:admin_or_service' +# template tests for projects API +'managesf.tests:add': 'rule:admin_api or rule:ptl_api' +# config (permissions) API +'managesf.config:get': 'rule:authenticated_api' +# resources API +'managesf.resources:get': 'rule:any' +'managesf.resources:validate': 'rule:admin_or_service' +'managesf.resources:apply': 'rule:admin_or_service' +# jobs API +'managesf.job:get': 'rule:any' +'managesf.job:stop': 'rule:admin_or_service' +'managesf.job:run': 'rule:admin_or_service' +# nodes API +'managesf.node:get': 'rule:any' +'managesf.node:hold': 'rule:admin_or_service' +'managesf.node:delete': 'rule:admin_or_service' +'managesf.node:image-get': 'rule:any' +'managesf.node:add_authorized_key': 'rule:admin_or_service' +'managesf.node:image-start-update': 'rule:admin_or_service' +'managesf.node:image-update-status': 'rule:admin_or_service' +# zuul API +'zuul.tenants:get': 'rule:any' +'zuul.tenant.status:get': 'rule:any' +'zuul.tenant.jobs:get': 'rule:any' +'zuul.tenant.builds:get': 'rule:any' +'zuul.tenant.console-stream:get': 'rule:any' +'zuul.status:get': 'rule:any' +'zuul.status.change:get': 'rule:any' +'zuul.project.public_keys:get': 'rule:any' diff --git a/resources/README b/resources/README new file mode 100644 index 0000000..77afcdd --- /dev/null +++ b/resources/README @@ -0,0 +1 @@ +Software Factory managed resources diff --git a/resources/_internal.yaml b/resources/_internal.yaml new file mode 100644 index 0000000..17e8f9f --- /dev/null +++ b/resources/_internal.yaml @@ -0,0 +1,26 @@ +# This file is managed by ansible, do not edit directly +--- +resources: + tenants: + fedora-staging: + description: "The fedora-staging tenant." + url: "https://fedora.softwarefactory-project.io/manage" + default-connection: pagure.io + + connections: + opendev.org: + base-url: "https://opendev.org" + type: git + + projects: + internal: + tenant: fedora-staging + description: Internal configuration project + source-repositories: + - fedora-project-config: + zuul/config-project: True + - fedora-zuul-jobs + - zuul/zuul-jobs: + connection: opendev.org + zuul/include: [job] + zuul/shadow: fedora-zuul-jobs diff --git a/roles/wait-for-changes-ahead/library/wait_for_changes_ahead.py b/roles/wait-for-changes-ahead/library/wait_for_changes_ahead.py new file mode 100755 index 0000000..4e43614 --- /dev/null +++ b/roles/wait-for-changes-ahead/library/wait_for_changes_ahead.py @@ -0,0 +1,107 @@ +#!/usr/bin/python + +# Copyright (c) 2018 Red Hat +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . +from __future__ import absolute_import, division, print_function +import traceback +import json +import time +from six.moves import urllib +from ansible.module_utils.basic import AnsibleModule + +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: wait_for_changes_ahead +short_description: Wait for zuul queue +author: Tristan de Cacqueray (@tristanC) +description: + - Wait for zuul queue ahead to SUCCEED +requirements: + - "python >= 3.5" +options: + zuul_web_url: + description: + - The zuul web url to query change status + required: true + type: str + zuul_change: + description: + - The change nr, patchset nr + required: true + type: str + wait_timeout: + description: + - The maximum waiting time + default: 7200 + type: int +''' + +log = list() + + +def main(): + module = AnsibleModule( + argument_spec=dict( + zuul_status_url=dict(required=True, type='str'), + zuul_change=dict(required=True, type='str'), + wait_timeout=dict(type='int'), + ) + ) + zuul_status_url = module.params['zuul_status_url'] + zuul_change = module.params['zuul_change'] + wait_timeout = module.params.get('wait_timeout', 120) + if not wait_timeout: + wait_timeout = 120 + wait_timeout = int(wait_timeout) * 60 + + if False: + module.exit_json(changed=False, msg="noop") + try: + start_time = time.monotonic() + while True: + req = urllib.request.urlopen( + zuul_status_url + "/change/%s" % zuul_change) + changes = json.loads(req.read().decode('utf-8')) + + if not changes: + module.fail_json(msg="Unknown change", log="\n".join(log)) + + found = None + for change in changes: + if change["live"] is True: + found = change + break + + if found and not change["item_ahead"]: + break + + if time.monotonic() - start_time > wait_timeout: + module.fail_json(msg="Timeout", log="\n".join(log)) + + time.sleep(30) + except Exception as e: + tb = traceback.format_exc() + log.append(str(e)) + log.append(tb) + module.fail_json(msg=str(e), log="\n".join(log)) + finally: + log_text = "\n".join(log) + module.exit_json(changed=False, msg=log_text) + + +if __name__ == '__main__': + main() diff --git a/roles/wait-for-changes-ahead/tasks/main.yaml b/roles/wait-for-changes-ahead/tasks/main.yaml new file mode 100644 index 0000000..f0c1aae --- /dev/null +++ b/roles/wait-for-changes-ahead/tasks/main.yaml @@ -0,0 +1,6 @@ +--- +- name: Wait for changes ahead + wait_for_changes_ahead: + zuul_status_url: "{{ zuul_web_url }}/api/tenant/{{ zuul.tenant }}/status" + zuul_change: "{{ zuul.change }},{{ zuul.patchset }}" + wait_timeout: "{{ wait_timeout|default(120) }}" diff --git a/zuul.d/README b/zuul.d/README new file mode 100644 index 0000000..78444aa --- /dev/null +++ b/zuul.d/README @@ -0,0 +1 @@ +Zuul config repo content diff --git a/zuul.d/_jobs-base.yaml b/zuul.d/_jobs-base.yaml new file mode 100644 index 0000000..ed338b5 --- /dev/null +++ b/zuul.d/_jobs-base.yaml @@ -0,0 +1,49 @@ +# This file is managed by ansible, do not edit directly +--- +- job: + name: base + parent: null + description: The base job. + pre-run: playbooks/base/pre.yaml + post-run: + - playbooks/base/post.yaml + roles: + - zuul: fedora-zuul-jobs + - zuul: zuul/zuul-jobs + timeout: 1800 + attempts: 3 + secrets: + - site_sflogs + nodeset: + nodes: + - name: container + label: cloud-fedora + +- semaphore: + name: semaphore-config-update + max: 1 + + +- job: + name: wait-for-changes-ahead + parent: null + timeout: 7200 + nodeset: + nodes: [] + vars: + zuul_web_url: "https://fedora.softwarefactory-project.io/zuul" + description: | + This job wait for the queue ahead to be empty. + + Responds to these variables: + + .. zuul:jobvar:: zuul_web_url + + The zuul web api url. + + .. zuul:jobvar:: wait_timeout + :default: 120 + + Wait timeout in minutes. + + run: playbooks/wait-for-changes-ahead.yaml diff --git a/zuul.d/_jobs-openshift.yaml b/zuul.d/_jobs-openshift.yaml new file mode 100644 index 0000000..2c97459 --- /dev/null +++ b/zuul.d/_jobs-openshift.yaml @@ -0,0 +1,48 @@ +# This file is managed by sfconfig, do not edit manually +# The Openshift driver is a Tech Preview, use at your own risk... +--- +- job: + name: base-openshift-native + parent: null + description: | + A base job that build and deploy a container image using the + project future state. + pre-run: playbooks/openshift/pre.yaml + post-run: + - playbooks/base/post.yaml + roles: + - zuul: fedora-zuul-jobs + - zuul: zuul/zuul-jobs + timeout: 1800 + # Set attempts to 1 until it's working well + attempts: 1 + secrets: + - site_sflogs + nodeset: + nodes: + - name: project + label: openshift-project + vars: + base_image: "python:3.6" + +- job: + name: base-openshift-pod + parent: null + description: | + A base job to spawn a vanilla container and copy the project + future state + pre-run: playbooks/openshift/unprivileged-machine.yaml + post-run: + - playbooks/base/post.yaml + roles: + - zuul: fedora-zuul-jobs + - zuul: zuul/zuul-jobs + # Set attempts to 1 until it's working well + attempts: 1 + secrets: + - site_sflogs + timeout: 1800 + nodeset: + nodes: + - name: pod + label: openshift-pod-fedora diff --git a/zuul.d/_jobs-pages.yaml b/zuul.d/_jobs-pages.yaml new file mode 100644 index 0000000..19b270d --- /dev/null +++ b/zuul.d/_jobs-pages.yaml @@ -0,0 +1,2 @@ +# This file is managed by ansible, do not edit directly +--- diff --git a/zuul.d/_pipelines.yaml b/zuul.d/_pipelines.yaml new file mode 100644 index 0000000..251f584 --- /dev/null +++ b/zuul.d/_pipelines.yaml @@ -0,0 +1,126 @@ +# This file is managed by ansible, do not edit directly +--- +- pipeline: + name: check + description: | + Newly uploaded patchsets enter this pipeline to receive an + initial +/-1 Verified vote. + manager: independent + require: + pagure.io: + merged: False + trigger: + pagure.io: + - event: pg_pull_request + action: comment + comment: (?i)^\s*recheck\s*$ + - event: pg_pull_request + action: + - opened + - changed + start: + pagure.io: + status: 'pending' + status-url: "https://fedora.softwarefactory-project.io/zuul/t/fedora-staging/status.html" + comment: false + success: + pagure.io: + status: 'success' + sqlreporter: + failure: + pagure.io: + status: 'failure' + sqlreporter: + +- pipeline: + name: post + post-review: true + description: This pipeline runs jobs that operate after each change is merged. + manager: independent + precedence: low + trigger: + pagure.io: + - event: pg_push + ref: ^refs/heads/.*$ + success: + sqlreporter: + failure: + smtp: + from: "zuul@fedora.softwarefactory-project.io" + to: "root@localhost" + subject: '[Zuul] Job failed in post pipeline: {change.project}' + sqlreporter: + +- pipeline: + name: pre-release + description: When a commit is tagged with a pre-release tag, this pipeline runs jobs that publish archives and documentation. + manager: independent + precedence: high + post-review: True + trigger: + success: + sqlreporter: + failure: + sqlreporter: + smtp: + from: "zuul@fedora.softwarefactory-project.io" + to: "root@localhost" + subject: '[Zuul] Job failed in pre-release pipeline: {change.project}' + +- pipeline: + name: release + post-review: true + description: When a commit is tagged as a release, this pipeline runs jobs that publish archives and documentation. + manager: independent + precedence: high + trigger: + success: + sqlreporter: + failure: + smtp: + from: "zuul@fedora.softwarefactory-project.io" + to: "root@localhost" + subject: '[Zuul] Job failed in release pipeline: {change.project}' + sqlreporter: + +- pipeline: + name: periodic + post-review: true + description: Jobs in this queue are triggered daily. + manager: independent + precedence: low + trigger: + timer: + - time: '0 0 * * *' + success: + sqlreporter: + failure: + smtp: + from: "zuul@fedora.softwarefactory-project.io" + to: "root@localhost" + subject: '[Zuul] Job failed in periodic pipeline: {change.project}' + sqlreporter: + +- pipeline: + name: experimental + description: On-demand pipeline for requesting a run against a set of jobs that are not yet gating. Leave review comment of "check experimental" to run jobs in this pipeline. + success-message: Build succeeded (experimental pipeline). + failure-message: Build failed (experimental pipeline). + manager: independent + precedence: normal + trigger: + success: + sqlreporter: + failure: + sqlreporter: + +- pipeline: + name: merge-check + description: > + Each time a change merges, this pipeline verifies that all open changes + on the same project are still mergeable. + failure-message: Build failed (merge-check pipeline). + manager: independent + ignore-dependencies: true + precedence: low + trigger: {} diff --git a/zuul.d/_projects.yaml b/zuul.d/_projects.yaml new file mode 100644 index 0000000..baad817 --- /dev/null +++ b/zuul.d/_projects.yaml @@ -0,0 +1,8 @@ +# This file is managed by ansible, do not edit directly +--- +- project: + name: ^.*$ + check: + jobs: [] + gate: + jobs: [] diff --git a/zuul/README b/zuul/README new file mode 100644 index 0000000..6549d4a --- /dev/null +++ b/zuul/README @@ -0,0 +1,11 @@ +This directory contains the zuul main.yaml configuration files. + +To test a new repository with zuul, add a file *my-project.yaml*: +- tenant: + name: 'local' + source: + gerrit: + untrusted-projects: + - my-project-repo-name + +Documentation is available here: https://softwarefactory-project.io/docs/zuul