#108 Add scenario values to the API response.
Merged 6 years ago by ralph. Opened 6 years ago by ralph.

@@ -70,7 +70,8 @@ 

                      'item': nvr,

                      'type': 'koji_build'

                  },

-                 'type': 'test-result-missing'

+                 'type': 'test-result-missing',

+                 'scenario': None,

              },

              {

                  'testcase': 'dist.upgradepath',
@@ -78,7 +79,8 @@ 

                      'item': nvr,

                      'type': 'koji_build'

                  },

-                 'type': 'test-result-missing'

+                 'type': 'test-result-missing',

+                 'scenario': None,

              }

          ],

          'summary': '2 of 3 required tests not found',

file modified
+39 -25
@@ -83,9 +83,6 @@ 

      'dist.upgradepath',

  ]

  

- OPENQA_TASKS = [

-     'compose.install_no_user',

- ]

  OPENQA_SCENARIOS = [

      'scenario1',

      'scenario2',
@@ -257,13 +254,15 @@ 

              'item': {'item': nvr, 'type': 'koji_build'},

              'result_id': result['id'],

              'testcase': 'dist.rpmdiff.comparison.xml_validity',

+             'scenario': None,

              'type': 'test-result-failed'

          },

      ] + [

          {

              'item': {'item': nvr, 'type': 'koji_build'},

              'testcase': name,

-             'type': 'test-result-missing'

+             'type': 'test-result-missing',

+             'scenario': None,

          } for name in all_rpmdiff_testcase_names if name != 'dist.rpmdiff.comparison.xml_validity'

      ]

      assert sorted(res_data['unsatisfied_requirements']) == sorted(expected_unsatisfied_requirements)
@@ -289,7 +288,8 @@ 

          {

              'item': {'item': nvr, 'type': 'koji_build'},

              'testcase': name,

-             'type': 'test-result-missing'

+             'type': 'test-result-missing',

+             'scenario': None,

          } for name in all_rpmdiff_testcase_names

      ]

      assert res_data['unsatisfied_requirements'] == expected_unsatisfied_requirements
@@ -379,7 +379,8 @@ 

              'item': {'item': nvr, 'type': 'koji_build'},

              'result_id': result['id'],

              'testcase': 'dist.abicheck',

-             'type': 'test-result-failed'

+             'type': 'test-result-failed',

+             'scenario': None,

          },

      ]

      assert res_data['unsatisfied_requirements'] == expected_unsatisfied_requirements
@@ -420,7 +421,8 @@ 

          {

              'item': {'item': nvr, 'type': 'koji_build'},

              'testcase': TASKTRON_RELEASE_CRITICAL_TASKS[0],

-             'type': 'test-result-missing'

+             'type': 'test-result-missing',

+             'scenario': None,

          },

      ]

      assert r.status_code == 200
@@ -435,12 +437,13 @@ 

      If we require two scenarios to pass, and both pass, then we pass.

      """

      compose_id = testdatabuilder.unique_compose_id()

-     for testcase_name in OPENQA_TASKS:

-         for scenario in OPENQA_SCENARIOS:

-             testdatabuilder.create_result(item=compose_id,

-                                           testcase_name=testcase_name,

-                                           scenario=scenario,

-                                           outcome='PASSED')

+     testcase_name = 'compose.install_no_user'

+     for scenario in OPENQA_SCENARIOS:

+         testdatabuilder.create_result(

+             item=compose_id,

+             testcase_name=testcase_name,

+             scenario=scenario,

+             outcome='PASSED')

      data = {

          'decision_context': 'rawhide_compose_sync_to_mirrors',

          'product_version': 'fedora-rawhide',
@@ -464,17 +467,19 @@ 

      """

  

      compose_id = testdatabuilder.unique_compose_id()

-     for testcase_name in OPENQA_TASKS:

-         # Scenario 1 passes..

-         testdatabuilder.create_result(item=compose_id,

-                                       testcase_name=testcase_name,

-                                       scenario='scenario1',

-                                       outcome='PASSED')

-         # But scenario 2 fails!

-         testdatabuilder.create_result(item=compose_id,

-                                       testcase_name=testcase_name,

-                                       scenario='scenario2',

-                                       outcome='FAILED')

+     testcase_name = 'compose.install_no_user'

+     # Scenario 1 passes..

+     testdatabuilder.create_result(

+         item=compose_id,

+         testcase_name=testcase_name,

+         scenario='scenario1',

+         outcome='PASSED')

+     # But scenario 2 fails!

+     result = testdatabuilder.create_result(

+         item=compose_id,

+         testcase_name=testcase_name,

+         scenario='scenario2',

+         outcome='FAILED')

      data = {

          'decision_context': 'rawhide_compose_sync_to_mirrors',

          'product_version': 'fedora-rawhide',
@@ -489,6 +494,14 @@ 

      assert res_data['applicable_policies'] == ['openqa_important_stuff_for_rawhide']

      expected_summary = '1 of 2 required tests failed'

      assert res_data['summary'] == expected_summary

+     expected_unsatisfied_requirements = [{

+         u'item': {u'item': compose_id},

+         u'result_id': result['id'],

+         u'testcase': testcase_name,

+         u'type': u'test-result-failed',

+         u'scenario': u'scenario2',

+     }]

+     assert res_data['unsatisfied_requirements'] == expected_unsatisfied_requirements

  

  

  def test_ignore_waiver(requests_session, greenwave_server, testdatabuilder):
@@ -530,7 +543,8 @@ 

              'item': {'item': nvr, 'type': 'koji_build'},

              'result_id': result['id'],

              'testcase': all_rpmdiff_testcase_names[0],

-             'type': 'test-result-failed'

+             'type': 'test-result-failed',

+             'scenario': None,

          },

      ]

      assert res_data['policies_satisfied'] is False

file modified
+8 -4
@@ -47,15 +47,17 @@ 

      ResultsDB with a matching item and test case name).

      """

  

-     def __init__(self, item, test_case_name):

+     def __init__(self, item, test_case_name, scenario):

          self.item = item

          self.test_case_name = test_case_name

+         self.scenario = scenario

  

      def to_json(self):

          return {

              'type': 'test-result-missing',

              'item': self.item,

              'testcase': self.test_case_name,

+             'scenario': self.scenario,

          }

  

  
@@ -65,9 +67,10 @@ 

      not ``PASSED`` or ``INFO``) and no corresponding waiver was found.

      """

  

-     def __init__(self, item, test_case_name, result_id):

+     def __init__(self, item, test_case_name, scenario, result_id):

          self.item = item

          self.test_case_name = test_case_name

+         self.scenario = scenario

          self.result_id = result_id

  

      def to_json(self):
@@ -75,6 +78,7 @@ 

              'type': 'test-result-failed',

              'item': self.item,

              'testcase': self.test_case_name,

+             'scenario': self.scenario,

              'result_id': self.result_id,

          }

  
@@ -154,7 +158,7 @@ 

                                  r['data'].get('scenario', [])]

  

          if not matching_results:

-             return TestResultMissing(item, self.test_case_name)

+             return TestResultMissing(item, self.test_case_name, self._scenario)

          # If we find multiple matching results, we always use the first one which

          # will be the latest chronologically, because ResultsDB always returns

          # results ordered by `submit_time` descending.
@@ -164,7 +168,7 @@ 

          # XXX limit who is allowed to waive

          if any(w['result_id'] == matching_result['id'] and w['waived'] for w in waivers):

              return RuleSatisfied()

-         return TestResultFailed(item, self.test_case_name, matching_result['id'])

+         return TestResultFailed(item, self.test_case_name, self._scenario, matching_result['id'])

  

      @property

      def _scenario(self):

@@ -13,14 +13,14 @@ 

  def test_summarize_answers():

      assert summarize_answers([RuleSatisfied()]) == \

          'all required tests passed'

-     assert summarize_answers([TestResultFailed('item', 'test', 'id'), RuleSatisfied()]) == \

+     assert summarize_answers([TestResultFailed('item', 'test', None, 'id'), RuleSatisfied()]) == \

          '1 of 2 required tests failed'

-     assert summarize_answers([TestResultMissing('item', 'test')]) == \

+     assert summarize_answers([TestResultMissing('item', 'test', None)]) == \

          'no test results found'

-     assert summarize_answers([TestResultMissing('item', 'test'),

-                               TestResultFailed('item', 'test', 'id')]) == \

+     assert summarize_answers([TestResultMissing('item', 'test', None),

+                               TestResultFailed('item', 'test', None, 'id')]) == \

          '1 of 2 required tests failed'

-     assert summarize_answers([TestResultMissing('item', 'test'), RuleSatisfied()]) == \

+     assert summarize_answers([TestResultMissing('item', 'test', None), RuleSatisfied()]) == \

          '1 of 2 required tests not found'

  

  

At the request of @adamwill. Without this, it would be hard to
understand responses about failed rawhide composes.

How come it's just None in all the test cases?

I guess this is because the policies we are using in the test suite don't have any scenarios set, even though we must be using that in the prod policy... Do we need to update the test suite policies?

I guess this is because the policies we are using in the test suite don't have any scenarios set, even though we must be using that in the prod policy...

Mostly -- I did add a scenario'd test case requirement for compose.install_no_user in conf/policies/fedora.yaml.

1 new commit added

  • Ensure at least one test returns a non-None scenario in the response.
6 years ago

@dcallagh use of scenarios is fairly new (as in, the last few days) - @ralph added it after I pointed out it's necessary to distinguish between openQA results (where we quite often run the same test case on multiple media).

Pull-Request has been merged by ralph

6 years ago