#315 Check decision_context and others in gating.yaml files
Merged 5 years ago by gnaponie. Opened 5 years ago by lholecek.
lholecek/greenwave remote-decision-context  into  master

@@ -76,8 +76,6 @@ 

          rules:

            - !PassingTestCaseRule {test_case_name: dist.depcheck}

  

- the decision_context it is not really important at the very moment.

- 

  *NB*. It is not possible to insert a RemoteRule inside a gating.yaml file.

  This will provoke an error.

  

file modified
+4 -2
@@ -345,7 +345,8 @@ 

      waivers = [w for w in waivers if w['id'] not in ignore_waivers]

  

      for policy in subject_policies:

-         answers.extend(policy.check(subject_identifier, results_retriever, waivers))

+         answers.extend(

+             policy.check(product_version, subject_identifier, results_retriever, waivers))

  

      if build_policies:

          build_nvrs = retrieve_builds_in_update(subject_identifier)
@@ -361,7 +362,8 @@ 

              ]

  

              for policy in build_policies:

-                 answers.extend(policy.check(nvr, results_retriever, nvr_waivers))

+                 answers.extend(

+                     policy.check(product_version, nvr, results_retriever, nvr_waivers))

      else:

          build_nvrs = []

  

file modified
+34 -27
@@ -266,13 +266,13 @@ 

  

      This base class is not used directly.

      """

-     def check(self, subject_type, subject_identifier, results_retriever, waivers):

+     def check(self, policy, product_version, subject_identifier, results_retriever, waivers):

          """

          Evaluate this policy rule for the given item.

  

          Args:

-             subject_type (str): Type of thing we are making a decision about

-                 (for example, 'koji_build', 'bodhi_update', ...)

+             policy (Policy): Parent policy of the rule

+             product_version (str): Product version we are making a decision about

              subject_identifier (str): Item we are making a decision about (for

                  example, Koji build NVR, Bodhi update id, ...)

              results_retriever (ResultsRetriever): Object for retrieving data
@@ -295,8 +295,8 @@ 

      yaml_tag = '!RemoteRule'

      safe_yaml_attributes = {}

  

-     def check(self, subject_type, subject_identifier, results_retriever, waivers):

-         if subject_type != 'koji_build':

+     def check(self, policy, product_version, subject_identifier, results_retriever, waivers):

+         if policy.subject_type != 'koji_build':

              return []

  

          pkg_namespace, pkg_name, rev = greenwave.resources.retrieve_scm_from_koji(
@@ -315,21 +315,26 @@ 

          try:

              policies = RemotePolicy.safe_load_all(response)

          except SafeYAMLError as e:

-             if any(waives_invalid_gating_yaml(waiver, subject_type, subject_identifier)

+             if any(waives_invalid_gating_yaml(waiver, policy.subject_type, subject_identifier)

                      for waiver in waivers):

                  return []

              return [

                  InvalidGatingYaml(

-                     subject_type, subject_identifier, 'invalid-gating-yaml', str(e))

+                     policy.subject_type, subject_identifier, 'invalid-gating-yaml', str(e))

              ]

  

          answers = []

-         for policy in policies:

-             response = policy.check(subject_identifier, results_retriever, waivers)

-             if isinstance(response, list):

-                 answers.extend(response)

-             else:

-                 answers.append(response)

+         for remote_policy in policies:

+             if remote_policy.applies_to(

+                     policy.decision_context, product_version, policy.subject_type):

+                 response = remote_policy.check(

+                     product_version, subject_identifier, results_retriever, waivers)

+ 

+                 if isinstance(response, list):

+                     answers.extend(response)

+                 else:

+                     answers.append(response)

+ 

          return answers

  

      def to_json(self):
@@ -350,9 +355,9 @@ 

          'scenario': SafeYAMLString(optional=True),

      }

  

-     def check(self, subject_type, subject_identifier, results_retriever, waivers):

+     def check(self, policy, product_version, subject_identifier, results_retriever, waivers):

          matching_results = results_retriever.retrieve(

-             subject_type, subject_identifier, self.test_case_name)

+             policy.subject_type, subject_identifier, self.test_case_name)

          matching_waivers = [

              w for w in waivers if (w['testcase'] == self.test_case_name and w['waived'] is True)]

  
@@ -365,13 +370,13 @@ 

          latest_result = next(matching_results, None)

          if not latest_result:

              if not matching_waivers:

-                 return TestResultMissing(subject_type, subject_identifier, self.test_case_name,

-                                          self.scenario)

+                 return TestResultMissing(

+                     policy.subject_type, subject_identifier, self.test_case_name, self.scenario)

              return TestResultMissingWaived(

-                 subject_type, subject_identifier, self.test_case_name, self.scenario)

+                 policy.subject_type, subject_identifier, self.test_case_name, self.scenario)

  

          # For compose make decisions based on all architectures and variants.

-         if subject_type == 'compose':

+         if policy.subject_type == 'compose':

              matching_results = itertools.chain([latest_result], matching_results)

              visited_arch_variants = set()

              answers = []
@@ -387,8 +392,9 @@ 

  

                  if arch_variant not in visited_arch_variants:

                      visited_arch_variants.add(arch_variant)

-                     answers.append(

-                         self._answer_for_result(result, waivers, subject_type, subject_identifier))

+                     answer = self._answer_for_result(

+                         result, waivers, policy.subject_type, subject_identifier)

+                     answers.append(answer)

  

              return answers

  
@@ -396,7 +402,7 @@ 

          # will be the latest chronologically, because ResultsDB always returns

          # results ordered by `submit_time` descending.

          return self._answer_for_result(

-             latest_result, waivers, subject_type, subject_identifier)

+             latest_result, waivers, policy.subject_type, subject_identifier)

  

      def to_json(self):

          return {
@@ -439,7 +445,7 @@ 

          'repos': SafeYAMLList(str),

      }

  

-     def check(self, subject_type, subject_identifier, results_retriever, waivers):

+     def check(self, policy, product_version, subject_identifier, results_retriever, waivers):

          """ Check that the subject passes testcase for the given results, but

          only if the subject is a build of a package name configured for

          this rule, specified by "repos".  Any of the repos may be a glob.
@@ -453,7 +459,7 @@ 

          satisfied (ignored).

          """

  

-         if subject_type != 'koji_build':

+         if policy.subject_type != 'koji_build':

              return []

  

          pkg_name = subject_identifier.rsplit('-', 2)[0]
@@ -463,7 +469,7 @@ 

          rule = PassingTestCaseRule()

          # pylint: disable=attribute-defined-outside-init

          rule.test_case_name = self.test_case_name

-         return rule.check(subject_type, subject_identifier, results_retriever, waivers)

+         return rule.check(policy, product_version, subject_identifier, results_retriever, waivers)

  

      def to_json(self):

          return {
@@ -502,7 +508,7 @@ 

                  self.applies_to_product_version(product_version) and

                  subject_type == self.subject_type)

  

-     def check(self, subject_identifier, results_retriever, waivers):

+     def check(self, product_version, subject_identifier, results_retriever, waivers):

          # If an item is about a package and it is in the blacklist, return RuleSatisfied()

          if self.subject_type == 'koji_build':

              name = subject_identifier.rsplit('-', 2)[0]
@@ -510,7 +516,8 @@ 

                  return [BlacklistedInPolicy(subject_identifier) for rule in self.rules]

          answers = []

          for rule in self.rules:

-             response = rule.check(self.subject_type, subject_identifier, results_retriever, waivers)

+             response = rule.check(

+                 self, product_version, subject_identifier, results_retriever, waivers)

              if isinstance(response, list):

                  answers.extend(response)

              else:

@@ -89,13 +89,13 @@ 

  

      # Ensure that absence of a result is failure.

      item, results, waivers = {}, DummyResultsRetriever(), []

-     decision = policy.check(item, results, waivers)

+     decision = policy.check('fedora-rawhide', item, results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], TestResultMissing)

  

      # But also that waiving the absence works.

      waivers = [{'testcase': 'sometest', 'waived': True}]

-     decision = policy.check(item, results, waivers)

+     decision = policy.check('fedora-rawhide', item, results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], RuleSatisfied)

  
@@ -112,7 +112,7 @@ 

  --- !Policy

  id: some_id

  product_versions:

- - irrelevant

+ - fedora-rawhide

  decision_context: test

  subject_type: koji_build

  rules:
@@ -130,17 +130,17 @@ 

      }

  

      item, waivers = 'some_nevr', [waiver]

-     decision = policy.check(item, results, [])

+     decision = policy.check('fedora-rawhide', item, results, [])

      assert len(decision) == 1

      assert isinstance(decision[0], TestResultFailed)

  

-     decision = policy.check(item, results, waivers)

+     decision = policy.check('fedora-rawhide', item, results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], RuleSatisfied)

  

      # Also, be sure that negative waivers work.

      waivers[0]['waived'] = False

-     decision = policy.check(item, results, waivers)

+     decision = policy.check('fedora-rawhide', item, results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], TestResultFailed)

  
@@ -157,7 +157,7 @@ 

  --- !Policy

  id: some_id

  product_versions:

- - irrelevant

+ - fedora-rawhide

  decision_context: test

  subject_type: bodhi_update

  rules:
@@ -175,17 +175,17 @@ 

          u'waived': True,

      }]

  

-     decision = policy.check(item, results, [])

+     decision = policy.check('fedora-rawhide', item, results, [])

      assert len(decision) == 1

      assert isinstance(decision[0], TestResultFailed)

  

-     decision = policy.check(item, results, waivers)

+     decision = policy.check('fedora-rawhide', item, results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], RuleSatisfied)

  

      # Also, be sure that negative waivers work.

      waivers[0]['waived'] = False

-     decision = policy.check(item, results, waivers)

+     decision = policy.check('fedora-rawhide', item, results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], TestResultFailed)

  
@@ -207,47 +207,47 @@ 

  

      # Ensure that we fail with no results

      results, waivers = DummyResultsRetriever(), []

-     decision = policy.check('nethack-1.2.3-1.el9000', results, waivers)

+     decision = policy.check('rhel-9000', 'nethack-1.2.3-1.el9000', results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], TestResultMissing)

  

      # That a matching, failing result can fail

      results = DummyResultsRetriever('nethack-1.2.3-1.el9000', 'sometest', 'FAILED')

-     decision = policy.check('nethack-1.2.3-1.el9000', results, waivers)

+     decision = policy.check('rhel-9000', 'nethack-1.2.3-1.el9000', results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], TestResultFailed)

  

      # That a matching, passing result can pass

      results = DummyResultsRetriever('nethack-1.2.3-1.el9000', 'sometest')

-     decision = policy.check('nethack-1.2.3-1.el9000', results, waivers)

+     decision = policy.check('rhel-9000', 'nethack-1.2.3-1.el9000', results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], RuleSatisfied)

  

      # That a non-matching passing result is ignored.

      results = DummyResultsRetriever('foobar-1.2.3-1.el9000', 'sometest')

-     decision = policy.check('foobar-1.2.3-1.el9000', results, waivers)

+     decision = policy.check('rhel-9000', 'foobar-1.2.3-1.el9000', results, waivers)

      assert decision == []

  

      # That a non-matching failing result is ignored.

      results = DummyResultsRetriever('foobar-1.2.3-1.el9000', 'sometest', 'FAILED')

-     decision = policy.check('foobar-1.2.3-1.el9000', results, waivers)

+     decision = policy.check('rhel-9000', 'foobar-1.2.3-1.el9000', results, waivers)

      assert decision == []

  

      # Ensure that fnmatch globs work in absence

      results, waivers = DummyResultsRetriever(), []

-     decision = policy.check('python-foobar-1.2.3-1.el9000', results, waivers)

+     decision = policy.check('rhel-9000', 'python-foobar-1.2.3-1.el9000', results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], TestResultMissing)

  

      # Ensure that fnmatch globs work in the negative.

      results = DummyResultsRetriever('python-foobar-1.2.3-1.el9000', 'sometest', 'FAILED')

-     decision = policy.check('python-foobar-1.2.3-1.el9000', results, waivers)

+     decision = policy.check('rhel-9000', 'python-foobar-1.2.3-1.el9000', results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], TestResultFailed)

  

      # Ensure that fnmatch globs work in the positive.

      results = DummyResultsRetriever('python-foobar-1.2.3-1.el9000', 'sometest')

-     decision = policy.check('python-foobar-1.2.3-1.el9000', results, waivers)

+     decision = policy.check('rhel-9000', 'python-foobar-1.2.3-1.el9000', results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], RuleSatisfied)

  
@@ -377,19 +377,19 @@ 

  

                  # Ensure that presence of a result is success.

                  results = DummyResultsRetriever(nvr, 'dist.upgradepath')

-                 decision = policy.check(nvr, results, waivers)

+                 decision = policy.check('fedora-26', nvr, results, waivers)

                  assert len(decision) == 1

                  assert isinstance(decision[0], RuleSatisfied)

  

                  # Ensure that absence of a result is failure.

                  results = DummyResultsRetriever()

-                 decision = policy.check(nvr, results, waivers)

+                 decision = policy.check('fedora-26', nvr, results, waivers)

                  assert len(decision) == 1

                  assert isinstance(decision[0], TestResultMissing)

  

                  # And that a result with a failure, is a failure.

                  results = DummyResultsRetriever(nvr, 'dist.upgradepath', 'FAILED')

-                 decision = policy.check(nvr, results, waivers)

+                 decision = policy.check('fedora-26', nvr, results, waivers)

                  assert len(decision) == 1

                  assert isinstance(decision[0], TestResultFailed)

  
@@ -428,7 +428,7 @@ 

  

                  results, waivers = [], []

                  expected_details = "Policy 'untitled': Attribute 'product_versions' is required"

-                 decision = policy.check(nvr, results, waivers)

+                 decision = policy.check('fedora-26', nvr, results, waivers)

                  assert len(decision) == 1

                  assert isinstance(decision[0], InvalidGatingYaml)

                  assert decision[0].is_satisfied is False
@@ -483,7 +483,7 @@ 

                      policy = policies[0]

  

                      results, waivers = [], []

-                     decision = policy.check(nvr, results, waivers)

+                     decision = policy.check('fedora-26', nvr, results, waivers)

                      assert len(decision) == 1

                      assert isinstance(decision[0], InvalidGatingYaml)

                      assert decision[0].is_satisfied is False
@@ -546,7 +546,7 @@ 

                          'product_version': 'fedora-26',

                          'comment': 'Waiving the invalig gating.yaml file'

                      }]

-                     decision = policy.check(nvr, results, waivers)

+                     decision = policy.check('fedora-26', nvr, results, waivers)

                      assert len(decision) == 0

  

  
@@ -789,6 +789,6 @@ 

      results = DummyResultsRetriever(nv, 'test_for_new_type', 'PASSED',

                                      'component-version')

      waivers = []

-     decision = policy.check(nv, results, waivers)

+     decision = policy.check('fedora-29', nv, results, waivers)

      assert len(decision) == 1

      assert isinstance(decision[0], RuleSatisfied)

Use policies from remote gating.yaml files only if they match
decision_context, product_version and subject_type for current
decision (as it's done for internal policies).

Fixes #282

Signed-off-by: Lukas Holecek hluk@email.cz

+1
But we should also update the doc. In the "Tutorial - How to configure the RemoteRule" there is written that it's not really important how you configure the decision context.

But we should also update the doc. In the "Tutorial - How to configure the RemoteRule" there is written that it's not really important how you configure the decision context.

Oh, forgot about that. Updating...

rebased onto 8ac429c690c714bd81d16f8fd2efcb732315a0ea

5 years ago

I've removed following line from docs.

the decision_context it is not really important at the very moment.

It already mentions that the context must match the one in decision request. But it also mentions that product_version and subject_type must match. Should I check for all of these values instead of just the decision_context? I would need to refactor the code bit more.

Yeah so I guess we should make the code consistent with the doc

rebased onto 24ce15982c74c69ffaa1da124b19a9fdd992fa9e

5 years ago

rebased onto 8384b6f5887b8c9b5e88e650fba004b25580482b

5 years ago

rebased onto 34318fb350509ca60f69f07bce3ecf158e77cdde

5 years ago

rebased onto 373a583

5 years ago

Commit a366324 fixes this pull-request

Pull-Request has been merged by gnaponie

5 years ago

Pull-Request has been merged by gnaponie

5 years ago