#338 CI/CD: inject full integration test to promotion workflows
Merged a year ago by rayson. Opened a year ago by rayson.

@@ -1,3 +1,5 @@ 

  NAME=waiverdb-prod-integration-test

  IMAGE=quay.io/factory2/waiverdb:prod

  ENVIRONMENT=prod

+ BACKEND_INTEGRATION_TEST_JOB=factory2-stage-integration-test

+ BACKEND_INTEGRATION_TEST_JOB_NAMESPACE=c3i

@@ -1,1 +1,1 @@ 

- waiverdb-integration-test-template.yaml

+ waiverdb-full-integration-test-template.yaml

@@ -1,3 +1,5 @@ 

  NAME=waiverdb-stage-integration-test

  IMAGE=quay.io/factory2/waiverdb:stage

  ENVIRONMENT=stage

+ BACKEND_INTEGRATION_TEST_JOB=factory2-stage-integration-test

+ BACKEND_INTEGRATION_TEST_JOB_NAMESPACE=c3i

@@ -1,1 +1,1 @@ 

- waiverdb-integration-test-template.yaml

+ waiverdb-full-integration-test-template.yaml

@@ -0,0 +1,117 @@ 

+ # Template to produce a new OpenShift pipeline for running integration tests

+ #

+ ---

+ apiVersion: v1

+ kind: Template

+ metadata:

+   name: waiverdb-full-integration-test

+ labels:

Aren't labels usually part of metadata?

+   template: waiverdb-full-integration-test

+ parameters:

+ - name: NAME

+   displayName: Short unique identifier for the templated instances

+   description: This field is used to deploy multiple pipelines to one OpenShift project from this template.

+   required: true

+   value: waiverdb-full-integration-test

+ - name: IMAGE

+   displayName: The container image to be tested

+   description: This field must be in repo:tag or repo@sha256 format

+   value: quay.io/factory2/waiverdb:latest

+ - name: WAIVERDB_GIT_REPO

+   displayName: WaiverDB Git repo URL

+   description: Default WaiverDB Git repo URL in which to run functional tests against

+   required: true

+   value: "https://pagure.io/waiverdb.git"

+ - name: WAIVERDB_GIT_REF

+   displayName: WaiverDB Git repo ref

+   description: Default WaiverDB Git repo ref in which to run functional tests against

+   required: true

+   value: master

+ - name: JENKINS_AGENT_IMAGE

+   displayName: Container image for Jenkins slave pods

+   required: true

+   value: docker-registry.engineering.redhat.com/factory2/waiverdb-jenkins-slave:latest

+ - name: CONTAINER_REGISTRY_CREDENTIALS

+   displayName: Secret name of container registries used for pulling and pushing images

+   value: factory2-pipeline-registry-credentials

+   required: false

+ - name: JENKINS_AGENT_CLOUD_NAME

+   displayName: Name of OpenShift cloud in Jenkins master configuration

+   required: true

+   value: openshift

+ - name: ENVIRONMENT

+   displayName: environment name (dev/stage/prod)

+   required: true

+   value: stage

+ - name: MESSAGING_PROVIDER

+   displayName: Name of the JMS messaging provider

+   value: Red Hat UMB

+ - name: BACKEND_INTEGRATION_TEST_JOB

+   displayName: backend integration test job to trigger

+   required: true

+ - name: BACKEND_INTEGRATION_TEST_JOB_NAMESPACE

+   displayName: backend integration test job to trigger

+   required: false

+   value: c3i

+ objects:

+ - kind: ServiceAccount

+   apiVersion: v1

+   metadata:

+     name: "${NAME}-jenkins-slave"

+     labels:

+       app: "${NAME}"

+ - kind: RoleBinding

+   apiVersion: v1

+   metadata:

+     name: "${NAME}-jenkins-slave_edit"

+     labels:

+       app: "${NAME}"

+   subjects:

+   - kind: ServiceAccount

+     name: "${NAME}-jenkins-slave"

+   roleRef:

+     name: edit

+ - kind: "BuildConfig"

+   apiVersion: "v1"

+   metadata:

+     name: "${NAME}"

+     labels:

+       app: "${NAME}"

+   spec:

+     runPolicy: "Serial" # FIXME: Parallel is supported, but we have limited quota in UpShift.

+     completionDeadlineSeconds: 1800

+     source:

+       git:

+         uri: "${WAIVERDB_GIT_REPO}"

+         ref: "${WAIVERDB_GIT_REF}"

+     strategy:

+       type: JenkinsPipeline

+       jenkinsPipelineStrategy:

+         env:

+         - name: "WAIVERDB_GIT_REPO"

+           value: "${WAIVERDB_GIT_REPO}"

+         - name: "WAIVERDB_GIT_REF"

+           value: "${WAIVERDB_GIT_REF}"

+         - name: "IMAGE"

+           value: "${IMAGE}"

+         - name: IMAGE_IS_SCRATCH

+           value: "true"

+         - name: "CONTAINER_REGISTRY_CREDENTIALS"

+           value: "${CONTAINER_REGISTRY_CREDENTIALS}"

+         - name: "TEST_ID"

+           value: ""

+         - name: JENKINS_AGENT_IMAGE

+           value: "${JENKINS_AGENT_IMAGE}"

+         - name: JENKINS_AGENT_CLOUD_NAME

+           value: "${JENKINS_AGENT_CLOUD_NAME}"

+         - name: JENKINS_AGENT_SERVICE_ACCOUNT

+           value: "${NAME}-jenkins-slave"

+         - name: ENVIRONMENT

+           value: "${ENVIRONMENT}"

+         - name: MESSAGING_PROVIDER

+           value: "${MESSAGING_PROVIDER}"

+         - name: BACKEND_INTEGRATION_TEST_JOB

+           value: "${BACKEND_INTEGRATION_TEST_JOB}"

+         - name: BACKEND_INTEGRATION_TEST_JOB_NAMESPACE

+           value: "${BACKEND_INTEGRATION_TEST_JOB_NAMESPACE}"

+         jenkinsfilePath: openshift/pipelines/templates/waiverdb-full-integration-test.Jenkinsfile

@@ -0,0 +1,122 @@ 

+ pipeline {

+   agent {

+     kubernetes {

+       cloud "${params.JENKINS_AGENT_CLOUD_NAME}"

+       label "jenkins-slave-${UUID.randomUUID().toString()}"

+       serviceAccount "${params.JENKINS_AGENT_SERVICE_ACCOUNT}"

+       defaultContainer 'jnlp'

+       yaml """

+       apiVersion: v1

+       kind: Pod

+       metadata:

+         labels:

+           app: "${env.JOB_BASE_NAME}"

+           factory2-pipeline-kind: "waiverdb-integration-test-pipeline"

+           factory2-pipeline-build-number: "${env.BUILD_NUMBER}"

+       spec:

+         containers:

+         - name: jnlp

+           image: "${params.JENKINS_AGENT_IMAGE}"

+           imagePullPolicy: Always

+           resources:

+             requests:

+               memory: 512Mi

+               cpu: 200m

+             limits:

+               memory: 768Mi

+               cpu: 300m

+       """

+     }

+   }

+   options {

+     timestamps()

+     timeout(time: 30, unit: 'MINUTES')

+   }

+   stages {

+     stage('Run integration test') {

+       steps {

+         script {

+           env.IMAGE_DIGEST = getImageDigest(env.IMAGE)

+           if (!env.IMAGE_DIGEST) {

+             env.IMAGE_URI = env.IMAGE

+              if (!env.IMAGE_URI.startsWith('atomic:') && !env.IMAGE_URI.startsWith('docker://')) {

+                   env.IMAGE_URI = 'docker://' + env.IMAGE_URI

+               }

+             echo "Image URI ${env.IMAGE_URI} doesn't contain the image digest. Fetching from the registry..."

+             def metadataText = sh(returnStdout: true, script: 'skopeo inspect ${IMAGE_URI}').trim()

+             def metadata = readJSON text: metadataText

+             env.IMAGE_DIGEST = metadata.Digest

+           }

+           if (!env.IMAGE_DIGEST) {

+             error "Couldn't get digest of image '${env.IMAGE}'"

+           }

+           echo "Digest of image '${env.IMAGE}': ${env.IMAGE_DIGEST}"

+ 

+           // TODO: in the short term, we don't run integration tests here

+           // but trigger the integration test job in the c3i project.

+           openshift.withCluster() {

+             openshift.withProject(params.BACKEND_INTEGRATION_TEST_JOB_NAMESPACE) {

+               def testBcSelector = openshift.selector('bc', params.BACKEND_INTEGRATION_TEST_JOB)

+               def buildSelector = testBcSelector.startBuild(

+                 '-e', "WAIVERDB_IMAGE=${env.IMAGE}",

+                 '-e', "TARGET_IMAGE_REPO=factory2/waiverdb",

+                 '-e', "TARGET_IMAGE_DIGEST=${env.IMAGE_DIGEST}",

+                 '-e', "TARGET_IMAGE_IS_SCRATCH=${env.IMAGE_IS_SCRATCH}",

+                 '-e', "TARGET_IMAGE_VERREL=${env.BUILD_TAG}",

+                 '-e', "TESTCASE_CATEGORY=${env.ENVIRONMENT}",

+                 )

+               waitForBuild(buildSelector)

+               echo "Integration test passed."

+             }

+           }

+         }

+       }

+     }

+   }

+ }

+ 

+ // Extract digest from the image URI

+ // e.g. factory2/waiverdb@sha256:35201c572fc8a137862b7a256476add8d7465fa5043d53d117f4132402f8ef6b

+ //   -> sha256:35201c572fc8a137862b7a256476add8d7465fa5043d53d117f4132402f8ef6b

+ @NonCPS

+ def getImageDigest(String image) {

+   def matcher = (env.IMAGE =~ /@(sha256:\w+)$/)

+   return matcher ? matcher[0][1] : ''

+ }

+ 

+ // Wait for a build to complete.

+ // Taken from https://pagure.io/c3i-library/blob/master/f/src/com/redhat/c3i/util/Builder.groovy

+ // Note: We can't use `c3i.wait()` here because that function switches to the default project.

+ //  Filed a PR for this issue: https://pagure.io/c3i-library/pull-request/19

+ def waitForBuild(build) {

+   echo "Waiting for ${build.name()} to start..."

+   timeout(5) {

+     build.watch {

+       return !(it.object().status.phase in ["New", "Pending", "Unknown"])

+     }

+   }

+   def buildobj = build.object()

+   def buildurl = buildobj.metadata.annotations['openshift.io/jenkins-build-uri']

+   if (buildurl) {

+     echo "Details: ${buildurl}"

+   }

+   if (buildobj.spec.strategy.type == "JenkinsPipeline") {

+     echo "Waiting for ${build.name()} to complete..."

+     build.logs("--tail=1")

+     timeout(60) {

+       build.watch {

+         it.object().status.phase != "Running"

+       }

+     }

+   } else {

+     echo "Following build logs..."

+     while (build.object().status.phase == "Running") {

+       build.logs("--tail=1", "--timestamps=true", "-f")

+     }

+   }

+   buildobj = build.object()

+   if (buildobj.status.phase != "Complete") {

+     error "Build ${buildobj.metadata.name} ${buildobj.status.phase}"

+   }

+   return build.name()

+ }

This change will run the full integration test in the dev->stage and stage->prod promotion workflows. The old functional test will still be used for gating the dev image.

In the short term, we are running the Factory 2.0 integration test in the PSI c3i project rather than the waiverdb-test project.

@lholecek @gnaponie @mprahl @csomh Could you do a review?

Aren't labels usually part of metadata?

The top-level labels field is a shortcut to apply labels to all objects processed from the template: https://docs.openshift.com/container-platform/3.11/dev_guide/templates.html#writing-labels

rebased onto 94b605f48764cd26c38c774ae916dda488bfb6f8

a year ago

Can you use following command to get the digest?

podman inspect --format '{{.Digest}}' ${IMAGE_URI}

Why not use the library?

Here we are waiting for an OpenShift build in another project, however the exposed c3i.wait() function calls openshift.withCluster(), which will change to the project that runs Jenkins master. I've filed a PR for this issue: https://pagure.io/c3i-library/pull-request/19.

Is TTY really needed?

Not needed. I will remove it.

Can you use following command to get the digest?
podman inspect --format '{{.Digest}}' ${IMAGE_URI}

I tried this command in a container, however it complains failure to create overlayfs over an overlayfs. It seems to me we have to use skopeo here.

rebased onto 0d36d6c

a year ago

+1 Nice, thanks for the explanations and being patient with me. :)

@lholecek Thanks. I will merge it and see if everything works fine

Pull-Request has been merged by rayson

a year ago