| |
@@ -83,6 +83,14 @@
|
| |
'dist.upgradepath',
|
| |
]
|
| |
|
| |
+ OPENQA_TASKS = [
|
| |
+ 'compose.install_no_user',
|
| |
+ ]
|
| |
+ OPENQA_SCENARIOS = [
|
| |
+ 'scenario1',
|
| |
+ 'scenario2',
|
| |
+ ]
|
| |
+
|
| |
|
| |
def test_inspect_policies(requests_session, greenwave_server):
|
| |
r = requests_session.get(greenwave_server.url + 'api/v1.0/policies',
|
| |
@@ -90,20 +98,23 @@
|
| |
assert r.status_code == 200
|
| |
body = r.json()
|
| |
policies = body['policies']
|
| |
- assert len(policies) == 4
|
| |
+ assert len(policies) == 5
|
| |
assert any(p['id'] == 'taskotron_release_critical_tasks' for p in policies)
|
| |
assert any(p['decision_context'] == 'bodhi_update_push_stable' for p in policies)
|
| |
assert any(p['product_versions'] == ['fedora-26'] for p in policies)
|
| |
expected_rules = [
|
| |
{'rule': 'PassingTestCaseRule',
|
| |
- 'test_case_name': 'dist.abicheck'},
|
| |
+ 'test_case_name': 'dist.abicheck',
|
| |
+ 'scenario': None},
|
| |
]
|
| |
assert any(p['rules'] == expected_rules for p in policies)
|
| |
expected_rules = [
|
| |
{'rule': 'PassingTestCaseRule',
|
| |
- 'test_case_name': 'dist.rpmdeplint'},
|
| |
+ 'test_case_name': 'dist.rpmdeplint',
|
| |
+ 'scenario': None},
|
| |
{'rule': 'PassingTestCaseRule',
|
| |
- 'test_case_name': 'dist.upgradepath'}]
|
| |
+ 'test_case_name': 'dist.upgradepath',
|
| |
+ 'scenario': None}]
|
| |
assert any(p['rules'] == expected_rules for p in policies)
|
| |
|
| |
|
| |
@@ -418,6 +429,66 @@
|
| |
assert res_data['unsatisfied_requirements'] == expected_unsatisfied_requirements
|
| |
|
| |
|
| |
+ def test_make_a_decison_on_passed_result_with_scenario(requests_session, greenwave_server, testdatabuilder):
|
| |
+ """
|
| |
+ If we require two scenarios to pass, and both pass, then we pass.
|
| |
+ """
|
| |
+ compose_id = testdatabuilder.unique_compose_id()
|
| |
+ for testcase_name in OPENQA_TASKS:
|
| |
+ for scenario in OPENQA_SCENARIOS:
|
| |
+ testdatabuilder.create_result(item=compose_id,
|
| |
+ testcase_name=testcase_name,
|
| |
+ scenario=scenario,
|
| |
+ outcome='PASSED')
|
| |
+ data = {
|
| |
+ 'decision_context': 'rawhide_compose_sync_to_mirrors',
|
| |
+ 'product_version': 'fedora-rawhide',
|
| |
+ 'subject': [{'item': compose_id}],
|
| |
+ }
|
| |
+ r = requests_session.post(greenwave_server.url + 'api/v1.0/decision',
|
| |
+ headers={'Content-Type': 'application/json'},
|
| |
+ data=json.dumps(data))
|
| |
+ assert r.status_code == 200
|
| |
+ res_data = r.json()
|
| |
+ assert res_data['policies_satisified'] is True
|
| |
+ assert res_data['applicable_policies'] == ['openqa_important_stuff_for_rawhide']
|
| |
+ expected_summary = 'all required tests passed'
|
| |
+ assert res_data['summary'] == expected_summary
|
| |
+
|
| |
+
|
| |
+ def test_make_a_decison_on_failing_result_with_scenario(requests_session, greenwave_server, testdatabuilder):
|
| |
+ """
|
| |
+ If we require two scenarios to pass, and one is failing, then we fail.
|
| |
+ """
|
| |
+
|
| |
+ compose_id = testdatabuilder.unique_compose_id()
|
| |
+ for testcase_name in OPENQA_TASKS:
|
| |
+ # Scenario 1 passes..
|
| |
+ testdatabuilder.create_result(item=compose_id,
|
| |
+ testcase_name=testcase_name,
|
| |
+ scenario='scenario1',
|
| |
+ outcome='PASSED')
|
| |
+ # But scenario 2 fails!
|
| |
+ testdatabuilder.create_result(item=compose_id,
|
| |
+ testcase_name=testcase_name,
|
| |
+ scenario='scenario2',
|
| |
+ outcome='FAILED')
|
| |
+ data = {
|
| |
+ 'decision_context': 'rawhide_compose_sync_to_mirrors',
|
| |
+ 'product_version': 'fedora-rawhide',
|
| |
+ 'subject': [{'item': compose_id}],
|
| |
+ }
|
| |
+ r = requests_session.post(greenwave_server.url + 'api/v1.0/decision',
|
| |
+ headers={'Content-Type': 'application/json'},
|
| |
+ data=json.dumps(data))
|
| |
+ assert r.status_code == 200
|
| |
+ res_data = r.json()
|
| |
+ assert res_data['policies_satisified'] is False
|
| |
+ assert res_data['applicable_policies'] == ['openqa_important_stuff_for_rawhide']
|
| |
+ expected_summary = '1 of 2 required tests failed'
|
| |
+ assert res_data['summary'] == expected_summary
|
| |
+
|
| |
+
|
| |
def test_ignore_waiver(requests_session, greenwave_server, testdatabuilder):
|
| |
"""
|
| |
This tests that a waiver can be ignored when making the decision.
|
| |
I spent some time this afternoon with @mohanboddu and @adamwill
investigating use of greenwave for gating rawhide composes.
https://infrastructure.fedoraproject.org/cgit/ansible.git/commit/?id=f693b6de0d6566881adc189404176edb0bef6121
One thing we ran into was that the requirements for a rawhide compose
can't be expressed only in terms of testcase names. They must be
specified in terms of both the testcase name and the scenario.
This patch gives greenwave the ability to do that. A passing
testcaserule now optionally allows also specifying a required scenario
for that testcase.
I don't yet have a full list of testcase X scenario combinations, but
when I have one, I'll commit it to ansible.