| |
@@ -83,9 +83,6 @@
|
| |
'dist.upgradepath',
|
| |
]
|
| |
|
| |
- OPENQA_TASKS = [
|
| |
- 'compose.install_no_user',
|
| |
- ]
|
| |
OPENQA_SCENARIOS = [
|
| |
'scenario1',
|
| |
'scenario2',
|
| |
@@ -257,13 +254,15 @@
|
| |
'item': {'item': nvr, 'type': 'koji_build'},
|
| |
'result_id': result['id'],
|
| |
'testcase': 'dist.rpmdiff.comparison.xml_validity',
|
| |
+ 'scenario': None,
|
| |
'type': 'test-result-failed'
|
| |
},
|
| |
] + [
|
| |
{
|
| |
'item': {'item': nvr, 'type': 'koji_build'},
|
| |
'testcase': name,
|
| |
- 'type': 'test-result-missing'
|
| |
+ 'type': 'test-result-missing',
|
| |
+ 'scenario': None,
|
| |
} for name in all_rpmdiff_testcase_names if name != 'dist.rpmdiff.comparison.xml_validity'
|
| |
]
|
| |
assert sorted(res_data['unsatisfied_requirements']) == sorted(expected_unsatisfied_requirements)
|
| |
@@ -289,7 +288,8 @@
|
| |
{
|
| |
'item': {'item': nvr, 'type': 'koji_build'},
|
| |
'testcase': name,
|
| |
- 'type': 'test-result-missing'
|
| |
+ 'type': 'test-result-missing',
|
| |
+ 'scenario': None,
|
| |
} for name in all_rpmdiff_testcase_names
|
| |
]
|
| |
assert res_data['unsatisfied_requirements'] == expected_unsatisfied_requirements
|
| |
@@ -379,7 +379,8 @@
|
| |
'item': {'item': nvr, 'type': 'koji_build'},
|
| |
'result_id': result['id'],
|
| |
'testcase': 'dist.abicheck',
|
| |
- 'type': 'test-result-failed'
|
| |
+ 'type': 'test-result-failed',
|
| |
+ 'scenario': None,
|
| |
},
|
| |
]
|
| |
assert res_data['unsatisfied_requirements'] == expected_unsatisfied_requirements
|
| |
@@ -420,7 +421,8 @@
|
| |
{
|
| |
'item': {'item': nvr, 'type': 'koji_build'},
|
| |
'testcase': TASKTRON_RELEASE_CRITICAL_TASKS[0],
|
| |
- 'type': 'test-result-missing'
|
| |
+ 'type': 'test-result-missing',
|
| |
+ 'scenario': None,
|
| |
},
|
| |
]
|
| |
assert r.status_code == 200
|
| |
@@ -435,12 +437,13 @@
|
| |
If we require two scenarios to pass, and both pass, then we pass.
|
| |
"""
|
| |
compose_id = testdatabuilder.unique_compose_id()
|
| |
- for testcase_name in OPENQA_TASKS:
|
| |
- for scenario in OPENQA_SCENARIOS:
|
| |
- testdatabuilder.create_result(item=compose_id,
|
| |
- testcase_name=testcase_name,
|
| |
- scenario=scenario,
|
| |
- outcome='PASSED')
|
| |
+ testcase_name = 'compose.install_no_user'
|
| |
+ for scenario in OPENQA_SCENARIOS:
|
| |
+ testdatabuilder.create_result(
|
| |
+ item=compose_id,
|
| |
+ testcase_name=testcase_name,
|
| |
+ scenario=scenario,
|
| |
+ outcome='PASSED')
|
| |
data = {
|
| |
'decision_context': 'rawhide_compose_sync_to_mirrors',
|
| |
'product_version': 'fedora-rawhide',
|
| |
@@ -464,17 +467,19 @@
|
| |
"""
|
| |
|
| |
compose_id = testdatabuilder.unique_compose_id()
|
| |
- for testcase_name in OPENQA_TASKS:
|
| |
- # Scenario 1 passes..
|
| |
- testdatabuilder.create_result(item=compose_id,
|
| |
- testcase_name=testcase_name,
|
| |
- scenario='scenario1',
|
| |
- outcome='PASSED')
|
| |
- # But scenario 2 fails!
|
| |
- testdatabuilder.create_result(item=compose_id,
|
| |
- testcase_name=testcase_name,
|
| |
- scenario='scenario2',
|
| |
- outcome='FAILED')
|
| |
+ testcase_name = 'compose.install_no_user'
|
| |
+ # Scenario 1 passes..
|
| |
+ testdatabuilder.create_result(
|
| |
+ item=compose_id,
|
| |
+ testcase_name=testcase_name,
|
| |
+ scenario='scenario1',
|
| |
+ outcome='PASSED')
|
| |
+ # But scenario 2 fails!
|
| |
+ result = testdatabuilder.create_result(
|
| |
+ item=compose_id,
|
| |
+ testcase_name=testcase_name,
|
| |
+ scenario='scenario2',
|
| |
+ outcome='FAILED')
|
| |
data = {
|
| |
'decision_context': 'rawhide_compose_sync_to_mirrors',
|
| |
'product_version': 'fedora-rawhide',
|
| |
@@ -489,6 +494,14 @@
|
| |
assert res_data['applicable_policies'] == ['openqa_important_stuff_for_rawhide']
|
| |
expected_summary = '1 of 2 required tests failed'
|
| |
assert res_data['summary'] == expected_summary
|
| |
+ expected_unsatisfied_requirements = [{
|
| |
+ u'item': {u'item': compose_id},
|
| |
+ u'result_id': result['id'],
|
| |
+ u'testcase': testcase_name,
|
| |
+ u'type': u'test-result-failed',
|
| |
+ u'scenario': u'scenario2',
|
| |
+ }]
|
| |
+ assert res_data['unsatisfied_requirements'] == expected_unsatisfied_requirements
|
| |
|
| |
|
| |
def test_ignore_waiver(requests_session, greenwave_server, testdatabuilder):
|
| |
@@ -530,7 +543,8 @@
|
| |
'item': {'item': nvr, 'type': 'koji_build'},
|
| |
'result_id': result['id'],
|
| |
'testcase': all_rpmdiff_testcase_names[0],
|
| |
- 'type': 'test-result-failed'
|
| |
+ 'type': 'test-result-failed',
|
| |
+ 'scenario': None,
|
| |
},
|
| |
]
|
| |
assert res_data['policies_satisfied'] is False
|
| |
At the request of @adamwill. Without this, it would be hard to
understand responses about failed rawhide composes.