#9 resultsdb cr
Merged 2 years ago by lrossett. Opened 2 years ago by lrossett.
kube-sig/ lrossett/gating-services-operator issue-5_resultsdb  into  devel

file added
+1
@@ -0,0 +1,1 @@ 

+ *.old

@@ -1,32 +0,0 @@ 

- FROM fedora:34

- 

- ARG VER=1.2

- 

- LABEL \

-   name="ResultsDB Listener Service" \

-   vendor="Fedora Kube SIG developers" \

-   license="MIT" \

-   description="" \

-   usage="https://pagure.io/ci-resultsdb-listener/tree/master"

- 

- RUN dnf -y install wget unzip fedora-messaging python3-pip && \

- dnf clean all

- 

- RUN wget https://pagure.io/ci-resultsdb-listener/archive/${VER}/ci-resultsdb-listener-${VER}.zip && \

- unzip ci-resultsdb-listener-${VER}.zip && \

- cp ci-resultsdb-listener-${VER}/conf/config.toml.prod-example /etc/fedora-messaging/config.toml && \

- (cd ci-resultsdb-listener-${VER}; python3 setup.py install;) && \

- pip3 install resultsdb_api && \

- rm -rf ci-resultsdb-listener-${VER} ci-resultsdb-listener-${VER}.zip

- 

- RUN mkdir -p /etc/ci-resultsdb-listener

- 

- COPY files/listener.cfg /etc/ci-resultsdb-listener/

- COPY files/entrypoint /entrypoint

- 

- RUN chmod 644 /etc/ci-resultsdb-listener/listener.cfg && \

- chmod 655 /entrypoint

- 

- ENTRYPOINT ["/entrypoint"]

- 

- USER 1001:0

@@ -1,20 +0,0 @@ 

- SHELL := /bin/bash

- REG := quay.io

- NAME := fedora/resultsdb-listener

- TAG := latest

- IMAGE := ${REG}/${NAME}:${TAG}

- 

- .DEFAULT_GOAL := image

- 

- image/build:

- 	@docker build -t ${IMAGE} .

- 

- image/push:

- 	@docker push ${IMAGE}

- 

- image/debug: image/build

- 	@docker run -it --entrypoint /bin/bash ${IMAGE}

- 

- image: image/build image/push

- 

- .PHONY: image/build image/push image/debug image

@@ -1,14 +0,0 @@ 

- #!/bin/bash

- 

- if [[ -z "${RESULTSDB_CONSUMER_CONFIG+x}" ]]; then

-   RESULTSDB_CONSUMER_CONFIG="/etc/fedora-messaging/config.toml"

- fi

- 

- if [[ -z "${RESULTSDB_CONSUMER_CALLBACK+x}" ]]; then

-   RESULTSDB_CONSUMER_CALLBACK="resultsdb_listener.consumer:Consumer"

- fi

- 

- fedora-messaging \

- --conf $RESULTSDB_CONSUMER_CONFIG \

- consume \

- --callback=$RESULTSDB_CONSUMER_CALLBACK 

\ No newline at end of file

@@ -1,3 +0,0 @@ 

- [sender]

- sender_type = StreamSender

- resultsdb_url = http://resultsdb:5001/api/v2.0/ 

\ No newline at end of file

file modified
+2 -2
@@ -1,4 +1,4 @@ 

- FROM fedora:34

+ FROM fedora:35

  

  LABEL \

    name="ResultsDB Service" \
@@ -27,4 +27,4 @@ 

  

  ENTRYPOINT ["/entrypoint"]

  

- USER 1001:0 

\ No newline at end of file

+ USER 1001

file modified
+2 -2
@@ -1,5 +1,5 @@ 

  SHELL := /bin/bash

- IMAGE := quay.io/fedora/resultsdb:latest

+ IMAGE := quay.io/fedora-kube-sig/resultsdb:latest

  .DEFAULT_GOAL := image

  

  image/build:
@@ -13,4 +13,4 @@ 

  

  image: image/build image/push

  

- .PHONY: image/build image/push image/debug image 

\ No newline at end of file

+ .PHONY: image/build image/push image/debug image

file added
+14
@@ -0,0 +1,14 @@ 

+ 

+ # Binaries for programs and plugins

+ *.exe

+ *.exe~

+ *.dll

+ *.so

+ *.dylib

+ bin

+ 

+ # editor and IDE paraphernalia

+ .idea

+ *.swp

+ *.swo

+ *~

file added
+9
@@ -0,0 +1,9 @@ 

+ FROM quay.io/operator-framework/ansible-operator:v1.11.0

+ 

+ COPY requirements.yml ${HOME}/requirements.yml

+ RUN ansible-galaxy collection install -r ${HOME}/requirements.yml \

+  && chmod -R ug+rwx ${HOME}/.ansible

+ 

+ COPY watches.yaml ${HOME}/watches.yaml

+ COPY roles/ ${HOME}/roles/

+ COPY playbooks/ ${HOME}/playbooks/

file added
+173
@@ -0,0 +1,173 @@ 

+ # VERSION defines the project version for the bundle.

+ # Update this value when you upgrade the version of your project.

+ # To re-generate a bundle for another specific version without changing the standard setup, you can:

+ # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)

+ # - use environment variables to overwrite this value (e.g export VERSION=0.0.2)

+ VERSION ?= 0.0.1

+ 

+ # CHANNELS define the bundle channels used in the bundle.

+ # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")

+ # To re-generate a bundle for other specific channels without changing the standard setup, you can:

+ # - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable)

+ # - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable")

+ ifneq ($(origin CHANNELS), undefined)

+ BUNDLE_CHANNELS := --channels=$(CHANNELS)

+ endif

+ 

+ # DEFAULT_CHANNEL defines the default channel used in the bundle.

+ # Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable")

+ # To re-generate a bundle for any other default channel without changing the default setup, you can:

+ # - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable)

+ # - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable")

+ ifneq ($(origin DEFAULT_CHANNEL), undefined)

+ BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)

+ endif

+ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)

+ 

+ # IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images.

+ # This variable is used to construct full image tags for bundle and catalog images.

+ #

+ # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both

+ # k8s.apps.fedoraproject.org/operator-bundle:$VERSION and k8s.apps.fedoraproject.org/operator-catalog:$VERSION.

+ IMAGE_TAG_BASE ?= k8s.apps.fedoraproject.org/operator

+ 

+ # BUNDLE_IMG defines the image:tag used for the bundle.

+ # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)

+ BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)

+ 

+ # Image URL to use all building/pushing image targets

+ IMG ?= quay.io/fedora-kube-sig/gating-services-operator:latest

+ 

+ all: docker-build

+ 

+ ##@ General

+ 

+ # The help target prints out all targets with their descriptions organized

+ # beneath their categories. The categories are represented by '##@' and the

+ # target descriptions by '##'. The awk commands is responsible for reading the

+ # entire set of makefiles included in this invocation, looking for lines of the

+ # file as xyz: ## something, and then pretty-format the target and help. Then,

+ # if there's a line with ##@ something, that gets pretty-printed as a category.

+ # More info on the usage of ANSI control characters for terminal formatting:

+ # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters

+ # More info on the awk command:

+ # http://linuxcommand.org/lc3_adv_awk.php

+ 

+ help: ## Display this help.

+ 	@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n  make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf "  \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)

+ 

+ ##@ Build

+ 

+ run: ansible-operator ## Run against the configured Kubernetes cluster in ~/.kube/config

+ 	ANSIBLE_ROLES_PATH="$(ANSIBLE_ROLES_PATH):$(shell pwd)/roles" $(ANSIBLE_OPERATOR) run

+ 

+ docker-build: ## Build docker image with the manager.

+ 	docker build -t ${IMG} .

+ 

+ docker-push: ## Push docker image with the manager.

+ 	docker push ${IMG}

+ 

+ ##@ Deployment

+ 

+ install: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.

+ 	$(KUSTOMIZE) build config/crd | kubectl apply -f -

+ 

+ uninstall: kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config.

+ 	$(KUSTOMIZE) build config/crd | kubectl delete -f -

+ 

+ deploy: kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.

+ 	cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}

+ 	$(KUSTOMIZE) build config/default | kubectl apply -f -

+ 

+ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config.

+ 	$(KUSTOMIZE) build config/default | kubectl delete -f -

+ 

+ OS := $(shell uname -s | tr '[:upper:]' '[:lower:]')

+ ARCH := $(shell uname -m | sed 's/x86_64/amd64/')

+ 

+ .PHONY: kustomize

+ KUSTOMIZE = $(shell pwd)/bin/kustomize

+ kustomize: ## Download kustomize locally if necessary.

+ ifeq (,$(wildcard $(KUSTOMIZE)))

+ ifeq (,$(shell which kustomize 2>/dev/null))

+ 	@{ \

+ 	set -e ;\

+ 	mkdir -p $(dir $(KUSTOMIZE)) ;\

+ 	curl -sSLo - https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v3.8.7/kustomize_v3.8.7_$(OS)_$(ARCH).tar.gz | \

+ 	tar xzf - -C bin/ ;\

+ 	}

+ else

+ KUSTOMIZE = $(shell which kustomize)

+ endif

+ endif

+ 

+ .PHONY: ansible-operator

+ ANSIBLE_OPERATOR = $(shell pwd)/bin/ansible-operator

+ ansible-operator: ## Download ansible-operator locally if necessary, preferring the $(pwd)/bin path over global if both exist.

+ ifeq (,$(wildcard $(ANSIBLE_OPERATOR)))

+ ifeq (,$(shell which ansible-operator 2>/dev/null))

+ 	@{ \

+ 	set -e ;\

+ 	mkdir -p $(dir $(ANSIBLE_OPERATOR)) ;\

+ 	curl -sSLo $(ANSIBLE_OPERATOR) https://github.com/operator-framework/operator-sdk/releases/download/v1.11.0/ansible-operator_$(OS)_$(ARCH) ;\

+ 	chmod +x $(ANSIBLE_OPERATOR) ;\

+ 	}

+ else

+ ANSIBLE_OPERATOR = $(shell which ansible-operator)

+ endif

+ endif

+ 

+ .PHONY: bundle

+ bundle: kustomize ## Generate bundle manifests and metadata, then validate generated files.

+ 	operator-sdk generate kustomize manifests -q

+ 	cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)

+ 	$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)

+ 	operator-sdk bundle validate ./bundle

+ 

+ .PHONY: bundle-build

+ bundle-build: ## Build the bundle image.

+ 	docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .

+ 

+ .PHONY: bundle-push

+ bundle-push: ## Push the bundle image.

+ 	$(MAKE) docker-push IMG=$(BUNDLE_IMG)

+ 

+ .PHONY: opm

+ OPM = ./bin/opm

+ opm: ## Download opm locally if necessary.

+ ifeq (,$(wildcard $(OPM)))

+ ifeq (,$(shell which opm 2>/dev/null))

+ 	@{ \

+ 	set -e ;\

+ 	mkdir -p $(dir $(OPM)) ;\

+ 	curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$(OS)-$(ARCH)-opm ;\

+ 	chmod +x $(OPM) ;\

+ 	}

+ else

+ OPM = $(shell which opm)

+ endif

+ endif

+ 

+ # A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).

+ # These images MUST exist in a registry and be pull-able.

+ BUNDLE_IMGS ?= $(BUNDLE_IMG)

+ 

+ # The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).

+ CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)

+ 

+ # Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.

+ ifneq ($(origin CATALOG_BASE_IMG), undefined)

+ FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)

+ endif

+ 

+ # Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.

+ # This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:

+ # https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator

+ .PHONY: catalog-build

+ catalog-build: opm ## Build a catalog image.

+ 	$(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)

+ 

+ # Push the catalog image.

+ .PHONY: catalog-push

+ catalog-push: ## Push a catalog image.

+ 	$(MAKE) docker-push IMG=$(CATALOG_IMG)

file added
+16
@@ -0,0 +1,16 @@ 

+ domain: k8s.apps.fedoraproject.org

+ layout:

+ - ansible.sdk.operatorframework.io/v1

+ plugins:

+   manifests.sdk.operatorframework.io/v2: {}

+   scorecard.sdk.operatorframework.io/v2: {}

+ projectName: operator

+ resources:

+ - api:

+     crdVersion: v1

+     namespaced: true

+   domain: k8s.apps.fedoraproject.org

+   group: gating

+   kind: ResultsDB

+   version: v1alpha1

+ version: "3"

@@ -0,0 +1,22 @@ 

+ apiVersion: networking.k8s.io/v1

+ kind: Ingress

+ metadata:

+   name: resultsdb-sample

+   annotations:

+     nginx.ingress.kubernetes.io/secure-backends: "true"

+     nginx.ingress.kubernetes.io/ssl-passthrough: "true"

+     nginx.ingress.kubernetes.io/ssl-redirect: "false"

+     nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"

+ spec:

+   tls:

+   rules:

+   - host: resultsdb.testing

+     http:

+       paths:

+         - path: /

+           pathType: Prefix

+           backend:

+             service:

+               name: resultsdb-sample-http

+               port:

+                 number: 5001

@@ -0,0 +1,12 @@ 

+ apiVersion: v1

+ kind: Secret

+ metadata:

+   name: postgres

+   labels:

+     app: postgres

+ stringData:

+   POSTGRES_HOST: postgres

+   POSTGRES_PORT: "5432"

+   POSTGRES_DB: resultsdb

+   POSTGRES_USER: resultsdb

+   POSTGRES_PASSWORD: resultsdb

@@ -0,0 +1,29 @@ 

+ apiVersion: apps/v1

+ kind: Deployment

+ metadata:

+   name: postgres

+ spec:

+   replicas: 1

+   selector:

+     matchLabels:

+       app: postgres

+   template:

+     metadata:

+       labels:

+         app: postgres

+     spec:

+       volumes:

+         - name: data

+           emptyDir: {}

+       containers:

+         - name: postgres

+           volumeMounts:

+             - mountPath: /var/lib/postgresql

+               name: data

+           image: quay.io/fedora-kube-sig/postgres:10.4-devel

+           imagePullPolicy: Always

+           ports:

+             - containerPort: 5432

+           envFrom:

+             - secretRef:

+                 name: postgres

@@ -0,0 +1,12 @@ 

+ apiVersion: v1

+ kind: Service

+ metadata:

+   name: postgres

+   labels:

+     app: postgres

+ spec:

+   type: NodePort

+   ports:

+     - port: 5432

+   selector:

+     app: postgres

@@ -0,0 +1,44 @@ 

+ ---

+ apiVersion: apiextensions.k8s.io/v1

+ kind: CustomResourceDefinition

+ metadata:

+   name: resultsdbs.gating.k8s.apps.fedoraproject.org

+ spec:

+   group: gating.k8s.apps.fedoraproject.org

+   names:

+     kind: ResultsDB

+     listKind: ResultsDBList

+     plural: resultsdbs

+     singular: resultsdb

+   scope: Namespaced

+   versions:

+   - name: v1alpha1

+     schema:

+       openAPIV3Schema:

+         description: ResultsDB is the Schema for the resultsdbs API

+         properties:

+           apiVersion:

+             description: 'APIVersion defines the versioned schema of this representation

+               of an object. Servers should convert recognized schemas to the latest

+               internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'

+             type: string

+           kind:

+             description: 'Kind is a string value representing the REST resource this

+               object represents. Servers may infer this from the endpoint the client

+               submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'

+             type: string

+           metadata:

+             type: object

+           spec:

+             description: Spec defines the desired state of ResultsDB

+             type: object

+             x-kubernetes-preserve-unknown-fields: true

+           status:

+             description: Status defines the observed state of ResultsDB

+             type: object

+             x-kubernetes-preserve-unknown-fields: true

+         type: object

+     served: true

+     storage: true

+     subresources:

+       status: {}

@@ -0,0 +1,6 @@ 

+ # This kustomization.yaml is not intended to be run by itself,

+ # since it depends on service name and namespace that are out of this kustomize package.

+ # It should be run by config/default

+ resources:

+ - bases/gating.k8s.apps.fedoraproject.org_resultsdbs.yaml

+ #+kubebuilder:scaffold:crdkustomizeresource

@@ -0,0 +1,30 @@ 

+ # Adds namespace to all resources.

+ namespace: operator-system

+ 

+ # Value of this field is prepended to the

+ # names of all resources, e.g. a deployment named

+ # "wordpress" becomes "alices-wordpress".

+ # Note that it should also match with the prefix (text before '-') of the namespace

+ # field above.

+ namePrefix: operator-

+ 

+ # Labels to add to all resources and selectors.

+ #commonLabels:

+ #  someName: someValue

+ 

+ bases:

+ - ../crd

+ - ../rbac

+ - ../manager

+ # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.

+ #- ../prometheus

+ 

+ patchesStrategicMerge:

+ # Protect the /metrics endpoint by putting it behind auth.

+ # If you want your controller-manager to expose the /metrics

+ # endpoint w/o any authn/z, please comment the following line.

+ - manager_auth_proxy_patch.yaml

+ 

+ # Mount the controller config file for loading manager configurations

+ # through a ComponentConfig type

+ #- manager_config_patch.yaml

@@ -0,0 +1,28 @@ 

+ # This patch inject a sidecar container which is a HTTP proxy for the

+ # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.

+ apiVersion: apps/v1

+ kind: Deployment

+ metadata:

+   name: controller-manager

+   namespace: system

+ spec:

+   template:

+     spec:

+       containers:

+       - name: kube-rbac-proxy

+         image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0

+         args:

+         - "--secure-listen-address=0.0.0.0:8443"

+         - "--upstream=http://127.0.0.1:8080/"

+         - "--logtostderr=true"

+         - "--v=10"

+         ports:

+         - containerPort: 8443

+           protocol: TCP

+           name: https

+       - name: manager

+         args:

+         - "--health-probe-bind-address=:6789"

+         - "--metrics-bind-address=127.0.0.1:8080"

+         - "--leader-elect"

+         - "--leader-election-id=operator"

@@ -0,0 +1,20 @@ 

+ apiVersion: apps/v1

+ kind: Deployment

+ metadata:

+   name: controller-manager

+   namespace: system

+ spec:

+   template:

+     spec:

+       containers:

+       - name: manager

+         args:

+         - "--config=controller_manager_config.yaml"

+         volumeMounts:

+         - name: manager-config

+           mountPath: /controller_manager_config.yaml

+           subPath: controller_manager_config.yaml

+       volumes:

+       - name: manager-config

+         configMap:

+           name: manager-config

@@ -0,0 +1,10 @@ 

+ apiVersion: controller-runtime.sigs.k8s.io/v1alpha1

+ kind: ControllerManagerConfig

+ health:

+   healthProbeBindAddress: :6789

+ metrics:

+   bindAddress: 127.0.0.1:8080

+ 

+ leaderElection:

+   leaderElect: true

+   resourceName: 811c9dc5.k8s.apps.fedoraproject.org

@@ -0,0 +1,10 @@ 

+ resources:

+ - manager.yaml

+ 

+ generatorOptions:

+   disableNameSuffixHash: true

+ 

+ configMapGenerator:

+ - name: manager-config

+   files:

+   - controller_manager_config.yaml

@@ -0,0 +1,51 @@ 

+ apiVersion: v1

+ kind: Namespace

+ metadata:

+   labels:

+     control-plane: controller-manager

+   name: system

+ ---

+ apiVersion: apps/v1

+ kind: Deployment

+ metadata:

+   name: controller-manager

+   namespace: system

+   labels:

+     control-plane: controller-manager

+ spec:

+   selector:

+     matchLabels:

+       control-plane: controller-manager

+   replicas: 1

+   template:

+     metadata:

+       labels:

+         control-plane: controller-manager

+     spec:

+       securityContext:

+         runAsNonRoot: true

+       containers:

+       - args:

+         - --leader-elect

+         - --leader-election-id=operator

+         image: controller:latest

+         name: manager

+         env:

+         - name: ANSIBLE_GATHERING

+           value: explicit

+         securityContext:

+           allowPrivilegeEscalation: false

+         livenessProbe:

+           httpGet:

+             path: /healthz

+             port: 6789

+           initialDelaySeconds: 15

+           periodSeconds: 20

+         readinessProbe:

+           httpGet:

+             path: /readyz

+             port: 6789

+           initialDelaySeconds: 5

+           periodSeconds: 10

+       serviceAccountName: controller-manager

+       terminationGracePeriodSeconds: 10

@@ -0,0 +1,7 @@ 

+ # These resources constitute the fully configured set of manifests

+ # used to generate the 'manifests/' directory in a bundle.

+ resources:

+ - bases/operator.clusterserviceversion.yaml

+ - ../default

+ - ../samples

+ - ../scorecard

@@ -0,0 +1,2 @@ 

+ resources:

+ - monitor.yaml

@@ -0,0 +1,20 @@ 

+ 

+ # Prometheus Monitor Service (Metrics)

+ apiVersion: monitoring.coreos.com/v1

+ kind: ServiceMonitor

+ metadata:

+   labels:

+     control-plane: controller-manager

+   name: controller-manager-metrics-monitor

+   namespace: system

+ spec:

+   endpoints:

+     - path: /metrics

+       port: https

+       scheme: https

+       bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token

+       tlsConfig:

+         insecureSkipVerify: true

+   selector:

+     matchLabels:

+       control-plane: controller-manager

@@ -0,0 +1,9 @@ 

+ apiVersion: rbac.authorization.k8s.io/v1

+ kind: ClusterRole

+ metadata:

+   name: metrics-reader

+ rules:

+ - nonResourceURLs:

+   - "/metrics"

+   verbs:

+   - get

@@ -0,0 +1,17 @@ 

+ apiVersion: rbac.authorization.k8s.io/v1

+ kind: ClusterRole

+ metadata:

+   name: proxy-role

+ rules:

+ - apiGroups:

+   - authentication.k8s.io

+   resources:

+   - tokenreviews

+   verbs:

+   - create

+ - apiGroups:

+   - authorization.k8s.io

+   resources:

+   - subjectaccessreviews

+   verbs:

+   - create

@@ -0,0 +1,12 @@ 

+ apiVersion: rbac.authorization.k8s.io/v1

+ kind: ClusterRoleBinding

+ metadata:

+   name: proxy-rolebinding

+ roleRef:

+   apiGroup: rbac.authorization.k8s.io

+   kind: ClusterRole

+   name: proxy-role

+ subjects:

+ - kind: ServiceAccount

+   name: controller-manager

+   namespace: system

@@ -0,0 +1,15 @@ 

+ apiVersion: v1

+ kind: Service

+ metadata:

+   labels:

+     control-plane: controller-manager

+   name: controller-manager-metrics-service

+   namespace: system

+ spec:

+   ports:

+   - name: https

+     port: 8443

+     protocol: TCP

+     targetPort: https

+   selector:

+     control-plane: controller-manager

@@ -0,0 +1,18 @@ 

+ resources:

+ # All RBAC will be applied under this service account in

+ # the deployment namespace. You may comment out this resource

+ # if your manager will use a service account that exists at

+ # runtime. Be sure to update RoleBinding and ClusterRoleBinding

+ # subjects if changing service account names.

+ - service_account.yaml

+ - role.yaml

+ - role_binding.yaml

+ - leader_election_role.yaml

+ - leader_election_role_binding.yaml

+ # Comment the following 4 lines if you want to disable

+ # the auth proxy (https://github.com/brancz/kube-rbac-proxy)

+ # which protects your /metrics endpoint.

+ - auth_proxy_service.yaml

+ - auth_proxy_role.yaml

+ - auth_proxy_role_binding.yaml

+ - auth_proxy_client_clusterrole.yaml

@@ -0,0 +1,37 @@ 

+ # permissions to do leader election.

+ apiVersion: rbac.authorization.k8s.io/v1

+ kind: Role

+ metadata:

+   name: leader-election-role

+ rules:

+ - apiGroups:

+   - ""

+   resources:

+   - configmaps

+   verbs:

+   - get

+   - list

+   - watch

+   - create

+   - update

+   - patch

+   - delete

+ - apiGroups:

+   - coordination.k8s.io

+   resources:

+   - leases

+   verbs:

+   - get

+   - list

+   - watch

+   - create

+   - update

+   - patch

+   - delete

+ - apiGroups:

+   - ""

+   resources:

+   - events

+   verbs:

+   - create

+   - patch

@@ -0,0 +1,12 @@ 

+ apiVersion: rbac.authorization.k8s.io/v1

+ kind: RoleBinding

+ metadata:

+   name: leader-election-rolebinding

+ roleRef:

+   apiGroup: rbac.authorization.k8s.io

+   kind: Role

+   name: leader-election-role

+ subjects:

+ - kind: ServiceAccount

+   name: controller-manager

+   namespace: system

@@ -0,0 +1,24 @@ 

+ # permissions for end users to edit resultsdbs.

+ apiVersion: rbac.authorization.k8s.io/v1

+ kind: ClusterRole

+ metadata:

+   name: resultsdb-editor-role

+ rules:

+ - apiGroups:

+   - gating.k8s.apps.fedoraproject.org

+   resources:

+   - resultsdbs

+   verbs:

+   - create

+   - delete

+   - get

+   - list

+   - patch

+   - update

+   - watch

+ - apiGroups:

+   - gating.k8s.apps.fedoraproject.org

+   resources:

+   - resultsdbs/status

+   verbs:

+   - get

@@ -0,0 +1,20 @@ 

+ # permissions for end users to view resultsdbs.

+ apiVersion: rbac.authorization.k8s.io/v1

+ kind: ClusterRole

+ metadata:

+   name: resultsdb-viewer-role

+ rules:

+ - apiGroups:

+   - gating.k8s.apps.fedoraproject.org

+   resources:

+   - resultsdbs

+   verbs:

+   - get

+   - list

+   - watch

+ - apiGroups:

+   - gating.k8s.apps.fedoraproject.org

+   resources:

+   - resultsdbs/status

+   verbs:

+   - get

@@ -0,0 +1,59 @@ 

+ ---

+ apiVersion: rbac.authorization.k8s.io/v1

+ kind: ClusterRole

+ metadata:

+   name: manager-role

+ rules:

+   ##

+   ## Base operator rules

+   ##

+   - apiGroups:

+       - ""

+     resources:

+       - services

+       - configmaps

+       - secrets

+       - pods

+       - pods/exec

+       - pods/log

+     verbs:

+       - create

+       - delete

+       - get

+       - list

+       - patch

+       - update

+       - watch

+   - apiGroups:

+       - apps

+     resources:

+       - deployments

+       - daemonsets

+       - replicasets

+       - statefulsets

+     verbs:

+       - create

+       - delete

+       - get

+       - list

+       - patch

+       - update

+       - watch

+   ##

+   ## Rules for gating.k8s.apps.fedoraproject.org/v1alpha1, Kind: ResultsDB

+   ##

+   - apiGroups:

+       - gating.k8s.apps.fedoraproject.org

+     resources:

+       - resultsdbs

+       - resultsdbs/status

+       - resultsdbs/finalizers

+     verbs:

+       - create

+       - delete

+       - get

+       - list

+       - patch

+       - update

+       - watch

+ #+kubebuilder:scaffold:rules

@@ -0,0 +1,12 @@ 

+ apiVersion: rbac.authorization.k8s.io/v1

+ kind: ClusterRoleBinding

+ metadata:

+   name: manager-rolebinding

+ roleRef:

+   apiGroup: rbac.authorization.k8s.io

+   kind: ClusterRole

+   name: manager-role

+ subjects:

+ - kind: ServiceAccount

+   name: controller-manager

+   namespace: system

@@ -0,0 +1,5 @@ 

+ apiVersion: v1

+ kind: ServiceAccount

+ metadata:

+   name: controller-manager

+   namespace: system

@@ -0,0 +1,8 @@ 

+ apiVersion: gating.k8s.apps.fedoraproject.org/v1alpha1

+ kind: ResultsDB

+ metadata:

+   name: sample

+ spec:

+   image: quay.io/fedora-kube-sig/resultsdb:latest

+   psql_secret_ref: postgres

+   replicas: 1

@@ -0,0 +1,4 @@ 

+ ## Append samples you want in your CSV to this file as resources ##

+ resources:

+ - gating_v1alpha1_resultsdb.yaml

+ #+kubebuilder:scaffold:manifestskustomizesamples

@@ -0,0 +1,7 @@ 

+ apiVersion: scorecard.operatorframework.io/v1alpha3

+ kind: Configuration

+ metadata:

+   name: config

+ stages:

+ - parallel: true

+   tests: []

@@ -0,0 +1,16 @@ 

+ resources:

+ - bases/config.yaml

+ patchesJson6902:

+ - path: patches/basic.config.yaml

+   target:

+     group: scorecard.operatorframework.io

+     version: v1alpha3

+     kind: Configuration

+     name: config

+ - path: patches/olm.config.yaml

+   target:

+     group: scorecard.operatorframework.io

+     version: v1alpha3

+     kind: Configuration

+     name: config

+ #+kubebuilder:scaffold:patchesJson6902

@@ -0,0 +1,10 @@ 

+ - op: add

+   path: /stages/0/tests/-

+   value:

+     entrypoint:

+     - scorecard-test

+     - basic-check-spec

+     image: quay.io/operator-framework/scorecard-test:v1.11.0

+     labels:

+       suite: basic

+       test: basic-check-spec-test

@@ -0,0 +1,50 @@ 

+ - op: add

+   path: /stages/0/tests/-

+   value:

+     entrypoint:

+     - scorecard-test

+     - olm-bundle-validation

+     image: quay.io/operator-framework/scorecard-test:v1.11.0

+     labels:

+       suite: olm

+       test: olm-bundle-validation-test

+ - op: add

+   path: /stages/0/tests/-

+   value:

+     entrypoint:

+     - scorecard-test

+     - olm-crds-have-validation

+     image: quay.io/operator-framework/scorecard-test:v1.11.0

+     labels:

+       suite: olm

+       test: olm-crds-have-validation-test

+ - op: add

+   path: /stages/0/tests/-

+   value:

+     entrypoint:

+     - scorecard-test

+     - olm-crds-have-resources

+     image: quay.io/operator-framework/scorecard-test:v1.11.0

+     labels:

+       suite: olm

+       test: olm-crds-have-resources-test

+ - op: add

+   path: /stages/0/tests/-

+   value:

+     entrypoint:

+     - scorecard-test

+     - olm-spec-descriptors

+     image: quay.io/operator-framework/scorecard-test:v1.11.0

+     labels:

+       suite: olm

+       test: olm-spec-descriptors-test

+ - op: add

+   path: /stages/0/tests/-

+   value:

+     entrypoint:

+     - scorecard-test

+     - olm-status-descriptors

+     image: quay.io/operator-framework/scorecard-test:v1.11.0

+     labels:

+       suite: olm

+       test: olm-status-descriptors-test

@@ -0,0 +1,14 @@ 

+ ---

+ apiVersion: apps/v1

+ kind: Deployment

+ metadata:

+   name: controller-manager

+   namespace: system

+ spec:

+   template:

+     spec:

+       containers:

+         - name: manager

+           env:

+           - name: ANSIBLE_DEBUG_LOGS

+             value: "TRUE"

@@ -0,0 +1,25 @@ 

+ # Adds namespace to all resources.

+ namespace: osdk-test

+ 

+ namePrefix: osdk-

+ 

+ # Labels to add to all resources and selectors.

+ #commonLabels:

+ #  someName: someValue

+ 

+ patchesStrategicMerge:

+ - manager_image.yaml

+ - debug_logs_patch.yaml

+ - ../default/manager_auth_proxy_patch.yaml

+ 

+ apiVersion: kustomize.config.k8s.io/v1beta1

+ kind: Kustomization

+ resources:

+ - ../crd

+ - ../rbac

+ - ../manager

+ images:

+ - name: testing

+   newName: testing-operator

+ patches:

+ - path: pull_policy/Never.yaml

@@ -0,0 +1,12 @@ 

+ ---

+ apiVersion: apps/v1

+ kind: Deployment

+ metadata:

+   name: controller-manager

+   namespace: system

+ spec:

+   template:

+     spec:

+       containers:

+         - name: manager

+           image: testing

@@ -0,0 +1,12 @@ 

+ ---

+ apiVersion: apps/v1

+ kind: Deployment

+ metadata:

+   name: controller-manager

+   namespace: system

+ spec:

+   template:

+     spec:

+       containers:

+         - name: manager

+           imagePullPolicy: Always

@@ -0,0 +1,12 @@ 

+ ---

+ apiVersion: apps/v1

+ kind: Deployment

+ metadata:

+   name: controller-manager

+   namespace: system

+ spec:

+   template:

+     spec:

+       containers:

+         - name: manager

+           imagePullPolicy: IfNotPresent

@@ -0,0 +1,12 @@ 

+ ---

+ apiVersion: apps/v1

+ kind: Deployment

+ metadata:

+   name: controller-manager

+   namespace: system

+ spec:

+   template:

+     spec:

+       containers:

+         - name: manager

+           imagePullPolicy: Never

@@ -0,0 +1,29 @@ 

+ ---

+ - name: Converge

+   hosts: localhost

+   connection: local

+   gather_facts: no

+   collections:

+     - community.kubernetes

+ 

+   tasks:

+     - name: Create Namespace

+       k8s:

+         api_version: v1

+         kind: Namespace

+         name: '{{ namespace }}'

+ 

+     - name: Deploy postgres

+       k8s:

+         src: "{{ components_dir }}/psql/{{ item }}"

+         namespace: "{{ namespace }}"

+         state: present

+         wait: true

+       with_items:

+         - 0-secret.yaml

+         - 1-deployment.yaml

+         - 2-service.yaml

+ 

+     - import_tasks: kustomize.yml

+       vars:

+         state: present

@@ -0,0 +1,6 @@ 

+ ---

+ - name: Create

+   hosts: localhost

+   connection: local

+   gather_facts: false

+   tasks: []

@@ -0,0 +1,24 @@ 

+ ---

+ - name: Destroy

+   hosts: localhost

+   connection: local

+   gather_facts: false

+   collections:

+     - community.kubernetes

+ 

+   tasks:

+     - import_tasks: kustomize.yml

+       vars:

+         state: absent

+ 

+     - name: Destroy Namespace

+       k8s:

+         api_version: v1

+         kind: Namespace

+         name: '{{ namespace }}'

+         state: absent

+ 

+     - name: Unset pull policy

+       command: '{{ kustomize }} edit remove patch pull_policy/{{ operator_pull_policy }}.yaml'

+       args:

+         chdir: '{{ config_dir }}/testing'

@@ -0,0 +1,16 @@ 

+ ---

+ - name: Build kustomize testing overlay

+   # load_restrictor must be set to none so we can load patch files from the default overlay

+   # command: '{{ kustomize }} build  --load_restrictor none .'

+   command: '{{ kustomize }} build --load-restrictor LoadRestrictionsNone .'

+   args:

+     chdir: '{{ config_dir }}/testing'

+   register: resources

+   changed_when: false

+ 

+ - name: Set resources to {{ state }}

+   k8s:

+     definition: '{{ item }}'

+     state: '{{ state }}'

+     wait: yes

+   loop: '{{ resources.stdout | from_yaml_all | list }}'

@@ -0,0 +1,37 @@ 

+ ---

+ dependency:

+   name: galaxy

+ driver:

+   name: delegated

+ lint: |

+   set -e

+   yamllint -d "{extends: relaxed, rules: {line-length: {max: 120}}}" .

+ platforms:

+   - name: cluster

+     groups:

+       - k8s

+ provisioner:

+   name: ansible

+   lint: |

+     set -e

+     ansible-lint

+   inventory:

+     group_vars:

+       all:

+         namespace: ${TEST_OPERATOR_NAMESPACE:-osdk-test}

+     host_vars:

+       localhost:

+         ansible_python_interpreter: '{{ ansible_playbook_python }}'

+         config_dir: ${MOLECULE_PROJECT_DIRECTORY}/config

+         samples_dir: ${MOLECULE_PROJECT_DIRECTORY}/config/samples

+         components_dir: ${MOLECULE_PROJECT_DIRECTORY}/config/components

+         operator_image: ${OPERATOR_IMAGE:-""}

+         operator_pull_policy: ${OPERATOR_PULL_POLICY:-"Always"}

+         kustomize: ${KUSTOMIZE_PATH:-kustomize}

+   env:

+     K8S_AUTH_KUBECONFIG: ${KUBECONFIG:-"~/.kube/config"}

+ verifier:

+   name: ansible

+   lint: |

+     set -e

+     ansible-lint

@@ -0,0 +1,28 @@ 

+ ---

+ - name: Prepare

+   hosts: localhost

+   connection: local

+   gather_facts: false

+ 

+   tasks:

+     - name: Ensure operator image is set

+       fail:

+         msg: |

+           You must specify the OPERATOR_IMAGE environment variable in order to run the

+           'default' scenario

+       when: not operator_image

+ 

+     - name: Set testing image

+       command: '{{ kustomize }} edit set image testing={{ operator_image }}'

+       args:

+         chdir: '{{ config_dir }}/testing'

+ 

+     - name: Set pull policy

+       command: '{{ kustomize }} edit add patch --path pull_policy/{{ operator_pull_policy }}.yaml'

+       args:

+         chdir: '{{ config_dir }}/testing'

+ 

+     - name: Set testing namespace

+       command: '{{ kustomize }} edit set namespace {{ namespace }}'

+       args:

+         chdir: '{{ config_dir }}/testing'

@@ -0,0 +1,60 @@ 

+ ---

+ - name: Create the gating.k8s.apps.fedoraproject.org/v1alpha1.ResultsDB

+   k8s:

+     state: present

+     namespace: '{{ namespace }}'

+     definition: "{{ lookup('template', '/'.join([samples_dir, cr_file])) | from_yaml }}"

+     wait: yes

+     wait_timeout: 300

+     wait_condition:

+       type: Running

+       reason: Successful

+       status: "True"

+   vars:

+     cr_file: 'gating_v1alpha1_resultsdb.yaml'

+ 

+ - block:

+     - name: Test resultsdb settings secret

+       community.kubernetes.k8s_info:

+         api_version: v1

+         kind: Secret

+         name: resultsdb-sample-config

+         namespace: "{{ namespace }}"

+       register: _k8s_info

+ 

+     - name: Assert resultsdb settings secret

+       assert:

+         that:

+           - _k8s_info.resources | length == 1

+           - "'settings.py' in _k8s_info.resources[0].data"

+         fail_msg: Failed to retrieve resultsdb settings secret

+ 

+ - block:

+     - name: Test resultsdb deployment

+       community.kubernetes.k8s_info:

+         api_version: apps/v1

+         kind: Deployment

+         name: resultsdb-sample

+         namespace: "{{ namespace }}"

+       register: _k8s_info

+ 

+     - name: Assert resultsdb deployment

+       assert:

+         that:

+           - _k8s_info.resources | length == 1

+         fail_msg: Failed to retrieve resultsdb deployment

+ 

+ - block:

+     - name: Test resultsdb service

+       community.kubernetes.k8s_info:

+         api_version: v1

+         kind: Service

+         name: resultsdb-sample-http

+         namespace: "{{ namespace }}"

+       register: _k8s_info

+ 

+     - name: Assert resultsdb service

+       assert:

+         that:

+           - _k8s_info.resources | length == 1

+         fail_msg: Failed to retrieve resultsdb service

@@ -0,0 +1,57 @@ 

+ ---

+ - name: Verify

+   hosts: localhost

+   connection: local

+   gather_facts: no

+   collections:

+     - community.kubernetes

+ 

+   vars:

+     ctrl_label: control-plane=controller-manager

+ 

+   tasks:

+     - block:

+         - name: Import all test files from tasks/

+           include_tasks: '{{ item }}'

+           with_fileglob:

+             - tasks/*_test.yml

+       rescue:

+         - name: Retrieve relevant resources

+           k8s_info:

+             api_version: '{{ item.api_version }}'

+             kind: '{{ item.kind }}'

+             namespace: '{{ namespace }}'

+           loop:

+             - api_version: v1

+               kind: Pod

+             - api_version: apps/v1

+               kind: Deployment

+             - api_version: v1

+               kind: Secret

+             - api_version: v1

+               kind: ConfigMap

+           register: debug_resources

+ 

+         - name: Retrieve Pod logs

+           k8s_log:

+             name: '{{ item.metadata.name }}'

+             namespace: '{{ namespace }}'

+             container: manager

+           loop: "{{ q('k8s', api_version='v1', kind='Pod', namespace=namespace, label_selector=ctrl_label) }}"

+           register: debug_logs

+ 

+         - name: Output gathered resources

+           debug:

+             var: debug_resources

+ 

+         - name: Output gathered logs

+           debug:

+             var: item.log_lines

+           loop: '{{ debug_logs.results }}'

+ 

+         - name: Re-emit failure

+           vars:

+             failed_task:

+               result: '{{ ansible_failed_result }}'

+           fail:

+             msg: '{{ failed_task }}'

@@ -0,0 +1,24 @@ 

+ ---

+ - name: Converge

+   hosts: localhost

+   connection: local

+   gather_facts: no

+ 

+   tasks:

+     - name: Build operator image

+       docker_image:

+         build:

+           path: '{{ project_dir }}'

+           pull: no

+         name: '{{ operator_image }}'

+         tag: latest

+         push: no

+         source: build

+         force_source: yes

+ 

+     - name: Load image into kind cluster

+       command: kind load docker-image --name osdk-test '{{ operator_image }}'

+       register: result

+       changed_when: '"not yet present" in result.stdout'

+ 

+ - import_playbook: ../default/converge.yml

@@ -0,0 +1,8 @@ 

+ ---

+ - name: Create

+   hosts: localhost

+   connection: local

+   gather_facts: false

+   tasks:

+     - name: Create test kind cluster

+       command: kind create cluster --name osdk-test --kubeconfig {{ kubeconfig }}

@@ -0,0 +1,16 @@ 

+ ---

+ - name: Destroy

+   hosts: localhost

+   connection: local

+   gather_facts: false

+   collections:

+     - community.kubernetes

+ 

+   tasks:

+     - name: Destroy test kind cluster

+       command: kind delete cluster --name osdk-test --kubeconfig {{ kubeconfig }}

+ 

+     - name: Unset pull policy

+       command: '{{ kustomize }} edit remove patch pull_policy/{{ operator_pull_policy }}.yaml'

+       args:

+         chdir: '{{ config_dir }}/testing'

@@ -0,0 +1,43 @@ 

+ ---

+ dependency:

+   name: galaxy

+ driver:

+   name: delegated

+ lint: |

+   set -e

+   yamllint -d "{extends: relaxed, rules: {line-length: {max: 120}}}" .

+ platforms:

+   - name: cluster

+     groups:

+       - k8s

+ provisioner:

+   name: ansible

+   playbooks:

+     prepare: ../default/prepare.yml

+     verify: ../default/verify.yml

+   lint: |

+     set -e

+     ansible-lint

+   inventory:

+     group_vars:

+       all:

+         namespace: ${TEST_OPERATOR_NAMESPACE:-osdk-test}

+     host_vars:

+       localhost:

+         ansible_python_interpreter: '{{ ansible_playbook_python }}'

+         config_dir: ${MOLECULE_PROJECT_DIRECTORY}/config

+         samples_dir: ${MOLECULE_PROJECT_DIRECTORY}/config/samples

+         project_dir: ${MOLECULE_PROJECT_DIRECTORY}

+         components_dir: ${MOLECULE_PROJECT_DIRECTORY}/config/components

+         operator_image: testing-operator

+         operator_pull_policy: "Never"

+         kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}"

+         kustomize: ${KUSTOMIZE_PATH:-kustomize}

+   env:

+     K8S_AUTH_KUBECONFIG: ${MOLECULE_EPHEMERAL_DIRECTORY}/kubeconfig

+     KUBECONFIG: ${MOLECULE_EPHEMERAL_DIRECTORY}/kubeconfig

+ verifier:

+   name: ansible

+   lint: |

+     set -e

+     ansible-lint

empty or binary file added
@@ -0,0 +1,6 @@ 

+ ---

+ collections:

+   - name: community.kubernetes

+     version: "1.2.1"

+   - name: operator_sdk.util

+     version: "0.2.0"

@@ -0,0 +1,43 @@ 

+ Role Name

+ =========

+ 

+ A brief description of the role goes here.

+ 

+ Requirements

+ ------------

+ 

+ Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance,

+ if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.

+ 

+ Role Variables

+ --------------

+ 

+ A description of the settable variables for this role should go here, including any variables that are in 

+ defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables 

+ that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well

+ 

+ Dependencies

+ ------------

+ 

+ A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set

+ for other roles, or variables that are used from other roles.

+ 

+ Example Playbook

+ ----------------

+ 

+ Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for

+ users too:

+ 

+     - hosts: servers

+       roles:

+          - { role: username.rolename, x: 42 }

+ 

+ License

+ -------

+ 

+ BSD

+ 

+ Author Information

+ ------------------

+ 

+ An optional section for the role authors to include contact information, or a website (HTML is not allowed).

@@ -0,0 +1,6 @@ 

+ ---

+ # defaults file for ResultsDB

+ resultsdb_image: "{{ image | default('quay.io/fedora-kube-sig/resultsdb:latest') }}"

+ resultsdb_psql_secret_ref: "{{ psql_secret_ref | default('resultsdb-psql') }}"

+ resultsdb_replicas: "{{ replicas | default(1) | int }}"

+ resultsdb_secret_key: "{{ secret_key | default('12345') }}"

empty or binary file added
@@ -0,0 +1,2 @@ 

+ ---

+ # handlers file for ResultsDB

@@ -0,0 +1,64 @@ 

+ ---

+ galaxy_info:

+   author: your name

+   description: your description

+   company: your company (optional)

+ 

+   # If the issue tracker for your role is not on github, uncomment the

+   # next line and provide a value

+   # issue_tracker_url: http://example.com/issue/tracker

+ 

+   # Some suggested licenses:

+   # - BSD (default)

+   # - MIT

+   # - GPLv2

+   # - GPLv3

+   # - Apache

+   # - CC-BY

+   license: license (GPLv2, CC-BY, etc)

+ 

+   min_ansible_version: 2.9

+ 

+   # If this a Container Enabled role, provide the minimum Ansible Container version.

+   # min_ansible_container_version:

+ 

+   # Optionally specify the branch Galaxy will use when accessing the GitHub

+   # repo for this role. During role install, if no tags are available,

+   # Galaxy will use this branch. During import Galaxy will access files on

+   # this branch. If Travis integration is configured, only notifications for this

+   # branch will be accepted. Otherwise, in all cases, the repo's default branch

+   # (usually master) will be used.

+   #github_branch:

+ 

+   #

+   # Provide a list of supported platforms, and for each platform a list of versions.

+   # If you don't wish to enumerate all versions for a particular platform, use 'all'.

+   # To view available platforms and versions (or releases), visit:

+   # https://galaxy.ansible.com/api/v1/platforms/

+   #

+   # platforms:

+   # - name: Fedora

+   #   versions:

+   #   - all

+   #   - 25

+   # - name: SomePlatform

+   #   versions:

+   #   - all

+   #   - 1.0

+   #   - 7

+   #   - 99.99

+ 

+   galaxy_tags: []

+     # List tags for your role here, one per line. A tag is a keyword that describes

+     # and categorizes the role. Users find roles by searching for tags. Be sure to

+     # remove the '[]' above, if you add tags to this list.

+     #

+     # NOTE: A tag is limited to a single word comprised of alphanumeric characters.

+     #       Maximum 20 tags per role.

+ 

+ dependencies: []

+   # List your role dependencies here, one per line. Be sure to remove the '[]' above,

+   # if you add dependencies to this list.

+ collections:

+ - operator_sdk.util

+ - community.kubernetes

@@ -0,0 +1,38 @@ 

+ ---

+ # tasks file for ResultsDB

+ - name: Check if psql secret exists

+   community.kubernetes.k8s_info:

+     api_version: v1

+     kind: Secret

+     name: "{{ resultsdb_psql_secret_ref }}"

+     namespace: "{{ ansible_operator_meta.namespace }}"

+   register: _k8s_info_psql

+   failed_when: "_k8s_info_psql.resources | length == 0"

+ 

+ - name: Set psql secret variable

+   set_fact:

+     resultsdb_psql_info: "{{ _k8s_info_psql.resources[0] }}"

+ 

+ - name: Apply resultsdb app secret

+   community.kubernetes.k8s:

+     state: present

+     wait: true

+     definition: "{{ lookup('template', 'secret-settings.yaml') }}"

+   vars:

+     resultsdb_psql_user: "{{ resultsdb_psql_info.data['POSTGRES_USER'] | b64decode }}"

+     resultsdb_psql_password: "{{ resultsdb_psql_info.data['POSTGRES_PASSWORD'] | b64decode }}"

+     resultsdb_psql_host: "{{ resultsdb_psql_info.data['POSTGRES_HOST'] | b64decode }}"

+     resultsdb_psql_port: "{{ resultsdb_psql_info.data['POSTGRES_PORT'] | b64decode }}"

+     resultsdb_psql_db: "{{ resultsdb_psql_info.data['POSTGRES_DB'] | b64decode  }}"

+ 

+ - name: Apply resultsdb deployment

+   community.kubernetes.k8s:

+     state: present

+     wait: true

+     definition: "{{ lookup('template', 'deployment.yaml') }}"

+ 

+ - name: Apply resultsdb service

+   community.kubernetes.k8s:

+     state: present

+     wait: true

+     definition: "{{ lookup('template', 'service.yaml') }}"

@@ -0,0 +1,29 @@ 

+ apiVersion: v1

+ kind: ConfigMap

+ metadata:

+   name: "resultsdb-{{ ansible_operator_meta.name }}-httpd-config"

+   namespace: "{{ ansible_operator_meta.namespace }}"

+   labels:

+     app: "resultsdb-{{ ansible_operator_meta.name }}"

+ data:

+   resultsdb.conf: |-

+     <IfModule !auth_basic_module>

+       LoadModule auth_basic_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_auth_basic.so'

+     </IfModule>

+     <IfModule !authn_file_module>

+       LoadModule authn_file_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authn_file.so'

+     </IfModule>

+     <IfModule !authz_user_module>

+       LoadModule authz_user_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authz_user.so'

+     </IfModule>

+ 

+     <Location "/">

+       AuthType Basic

+       AuthName "Authentication Required"

+       AuthBasicProvider file

+       AuthUserFile "/etc/resultsdb/.htpasswd"

+       <LimitExcept GET>

+         Require valid-user

+       </LimitExcept>

+     </Location>

+ 

@@ -0,0 +1,64 @@ 

+ apiVersion: apps/v1

+ kind: Deployment

+ metadata:

+   name: "resultsdb-{{ ansible_operator_meta.name }}"

+   namespace: "{{ ansible_operator_meta.namespace }}"

+   labels:

+     app: "resultsdb-{{ ansible_operator_meta.name }}"

+ spec:

+   replicas: {{ resultsdb_replicas }}

+   selector:

+     matchLabels:

+       app: "resultsdb-{{ ansible_operator_meta.name }}"

+   template:

+     metadata:

+       labels:

+         app: "resultsdb-{{ ansible_operator_meta.name }}"

+     spec:

+       initContainers:

+         - name: resultsdb-initdb

+           image: "{{ resultsdb_image }}"

+           command:

+             - /bin/sh

+             - -i

+             - -c

+             - |

+               # try for 10 minutes (600 seconds)

+               e=$(( $(date +%s) + 600 ))

+               i=0

+               while [ $(date +%s) -lt $e ]; do

+                 echo 'TRY #'$((++i))

+                 if resultsdb init_db ; then

+                   exit 0

+                 fi

+               done

+               exit 1

+           volumeMounts:

+             - name: config-volume

+               mountPath: /etc/resultsdb

+               readOnly: true

+       containers:

+         - name: resultsdb

+           image: "{{ resultsdb_image }}"

+           ports:

+             - containerPort: 5001

+           readinessProbe:

+             timeoutSeconds: 1

+             initialDelaySeconds: 5

+             httpGet:

+               path: /api/v2.0/

+               port: 5001

+           livenessProbe:

+             timeoutSeconds: 1

+             initialDelaySeconds: 30

+             httpGet:

+               path: /api/v2.0/

+               port: 5001

+           volumeMounts:

+             - name: config-volume

+               mountPath: /etc/resultsdb

+               readOnly: true

+       volumes:

+         - name: config-volume

+           secret:

+             secretName: resultsdb-{{ ansible_operator_meta.name }}-config

@@ -0,0 +1,25 @@ 

+ apiVersion: v1

+ kind: Secret

+ metadata:

+   name: "resultsdb-{{ ansible_operator_meta.name }}-config"

+   namespace: "{{ ansible_operator_meta.namespace }}"

+   labels:

+     app: "resultsdb-{{ ansible_operator_meta.name }}"

+ stringData:

+   settings.py: |-

+     SECRET_KEY = '{{ resultsdb_secret_key }}'

+     SQLALCHEMY_DATABASE_URI='postgresql+psycopg2://{{ resultsdb_psql_user }}:{{ resultsdb_psql_password }}@{{ resultsdb_psql_host }}:{{ resultsdb_psql_port }}/{{ resultsdb_psql_db }}'

+ 

+     FILE_LOGGING = False

+     LOGFILE = '/var/log/resultsdb/resultsdb.log'

+     SYSLOG_LOGGING = False

+     STREAM_LOGGING = True

+     RUN_HOST= '0.0.0.0'

+     RUN_PORT = 5001

+ 

+     MESSAGE_BUS_PUBLISH = False

+     MESSAGE_BUS_PUBLISH_TASKOTRON = False

+     MESSAGE_BUS_PLUGIN = 'fedmsg'

+ 

+     MESSAGE_BUS_KWARGS = {'modname': 'resultsdb'}

+     ADDITIONAL_RESULT_OUTCOMES = ['CRASHED', 'QUEUED', 'RUNNING']

@@ -0,0 +1,15 @@ 

+ apiVersion: v1

+ kind: Service

+ metadata:

+   name: "resultsdb-{{ ansible_operator_meta.name }}-http"

+   namespace: "{{ ansible_operator_meta.namespace }}"

+   labels:

+     app: "resultsdb-{{ ansible_operator_meta.name }}"

+ spec:

+   selector:

+     app: "resultsdb-{{ ansible_operator_meta.name }}"

+   type: NodePort

+   ports:

+     - name: http

+       port: 5001

+       targetPort: 5001

@@ -0,0 +1,2 @@ 

+ ---

+ # vars file for ResultsDB

@@ -0,0 +1,7 @@ 

+ ---

+ # Use the 'create api' subcommand to add watches to this file.

+ - version: v1alpha1

+   group: gating.k8s.apps.fedoraproject.org

+   kind: ResultsDB

+   role: resultsdb

+ #+kubebuilder:scaffold:watch

fixes #5

Changes

  • Adds extra components files in config/components
  • Adds a ResultsDB CR and API
  • Molecule tests for ResultsDB CR

Verification

  • Run molecule test -s kind

Pull-Request has been merged by lrossett

2 years ago
Metadata
Changes Summary 76
+1
file added
.gitignore
-32
file removed
images/resultsdb-listener/Dockerfile
-20
file removed
images/resultsdb-listener/Makefile
-14
file removed
images/resultsdb-listener/files/entrypoint
-3
file removed
images/resultsdb-listener/files/listener.cfg
+2 -2
file changed
images/resultsdb/Dockerfile
+2 -2
file changed
images/resultsdb/Makefile
+14
file added
operator/.gitignore
+9
file added
operator/Dockerfile
+173
file added
operator/Makefile
+16
file added
operator/PROJECT
+22
file added
operator/config/components/ingress/ingress.yaml
+12
file added
operator/config/components/psql/0-secret.yaml
+29
file added
operator/config/components/psql/1-deployment.yaml
+12
file added
operator/config/components/psql/2-service.yaml
+44
file added
operator/config/crd/bases/gating.k8s.apps.fedoraproject.org_resultsdbs.yaml
+6
file added
operator/config/crd/kustomization.yaml
+30
file added
operator/config/default/kustomization.yaml
+28
file added
operator/config/default/manager_auth_proxy_patch.yaml
+20
file added
operator/config/default/manager_config_patch.yaml
+10
file added
operator/config/manager/controller_manager_config.yaml
+10
file added
operator/config/manager/kustomization.yaml
+51
file added
operator/config/manager/manager.yaml
+7
file added
operator/config/manifests/kustomization.yaml
+2
file added
operator/config/prometheus/kustomization.yaml
+20
file added
operator/config/prometheus/monitor.yaml
+9
file added
operator/config/rbac/auth_proxy_client_clusterrole.yaml
+17
file added
operator/config/rbac/auth_proxy_role.yaml
+12
file added
operator/config/rbac/auth_proxy_role_binding.yaml
+15
file added
operator/config/rbac/auth_proxy_service.yaml
+18
file added
operator/config/rbac/kustomization.yaml
+37
file added
operator/config/rbac/leader_election_role.yaml
+12
file added
operator/config/rbac/leader_election_role_binding.yaml
+24
file added
operator/config/rbac/resultsdb_editor_role.yaml
+20
file added
operator/config/rbac/resultsdb_viewer_role.yaml
+59
file added
operator/config/rbac/role.yaml
+12
file added
operator/config/rbac/role_binding.yaml
+5
file added
operator/config/rbac/service_account.yaml
+8
file added
operator/config/samples/gating_v1alpha1_resultsdb.yaml
+4
file added
operator/config/samples/kustomization.yaml
+7
file added
operator/config/scorecard/bases/config.yaml
+16
file added
operator/config/scorecard/kustomization.yaml
+10
file added
operator/config/scorecard/patches/basic.config.yaml
+50
file added
operator/config/scorecard/patches/olm.config.yaml
+14
file added
operator/config/testing/debug_logs_patch.yaml
+25
file added
operator/config/testing/kustomization.yaml
+12
file added
operator/config/testing/manager_image.yaml
+12
file added
operator/config/testing/pull_policy/Always.yaml
+12
file added
operator/config/testing/pull_policy/IfNotPresent.yaml
+12
file added
operator/config/testing/pull_policy/Never.yaml
+29
file added
operator/molecule/default/converge.yml
+6
file added
operator/molecule/default/create.yml
+24
file added
operator/molecule/default/destroy.yml
+16
file added
operator/molecule/default/kustomize.yml
+37
file added
operator/molecule/default/molecule.yml
+28
file added
operator/molecule/default/prepare.yml
+60
file added
operator/molecule/default/tasks/resultsdb_test.yml
+57
file added
operator/molecule/default/verify.yml
+24
file added
operator/molecule/kind/converge.yml
+8
file added
operator/molecule/kind/create.yml
+16
file added
operator/molecule/kind/destroy.yml
+43
file added
operator/molecule/kind/molecule.yml
+0
file added
operator/playbooks/.placeholder
+6
file added
operator/requirements.yml
+43
file added
operator/roles/resultsdb/README.md
+6
file added
operator/roles/resultsdb/defaults/main.yml
+0
file added
operator/roles/resultsdb/files/.placeholder
+2
file added
operator/roles/resultsdb/handlers/main.yml
+64
file added
operator/roles/resultsdb/meta/main.yml
+38
file added
operator/roles/resultsdb/tasks/main.yml
+29
file added
operator/roles/resultsdb/templates/configmap-httpd.yaml
+64
file added
operator/roles/resultsdb/templates/deployment.yaml
+25
file added
operator/roles/resultsdb/templates/secret-settings.yaml
+15
file added
operator/roles/resultsdb/templates/service.yaml
+2
file added
operator/roles/resultsdb/vars/main.yml
+7
file added
operator/watches.yaml