From 86fd1b1831d34612d62e999004f3217c607377f1 Mon Sep 17 00:00:00 2001 From: FrantiĊĦek Zatloukal Date: Mar 21 2019 14:50:28 +0000 Subject: Merge branch 'develop' --- diff --git a/APIDOCS.apiary b/APIDOCS.apiary index a70894b..c9bda5c 100644 --- a/APIDOCS.apiary +++ b/APIDOCS.apiary @@ -63,8 +63,8 @@ Should you need to store additional data use the `data` keyval store. The stored For example in Taskotron, `item` and `type` are used to represent "what was tested", where `type` could be `koji_build`, `bodhi_update`, `compose`, `docker_image` ... and `item` is then the (reasonable) identifier of said "item under test". -We advise agaings using `_expand`, `_auth`, `_fields` and `_sort` as key names - although ResultsDB will store and return the data, these keys might not be -query-able for searching the `Results` collection, as these are reserved for the future API functionalities. (FIXME: reword to make more sense?) +We advise agaings using `_expand`, `_auth`, `_fields`, `_sort` and `_distinct_on` as key names - although ResultsDB will store and return the data, these keys might not be +query-able for searching the `Results` collection, as these are reserved for the future API functionalities. Through all the `Result` instances, there is a `href` attribute, that represents a link to self. @@ -179,7 +179,7 @@ Examples are provided in the Parameters section of the documentation. } -## Get a list of current Results for a specified filter _FIXME: reword to make more sense_ [GET /results/latest{?keyval,testcase,groups,since}] +## Get a list of latest Results for a specified filter [GET /results/latest{?keyval,testcases,groups,since,_distinct_on}] Especially with automation in mind, a simpe query to get the latest `Results` of all the `Testcases` based on a filter makes a lot of sense. For example Koji could be interested in data like "All current results for the `koji_build` `koschei-1.7.2-1.fc24`", without @@ -189,21 +189,26 @@ This endpoint does just that - takes filter parameters, and returns the most rec Only `Testcases` with at least one `Result` that meet the filter are present - e.g. if ResultsDB contained `dist.rpmlint` and `dist.rpmgrill` `Testcases`, but there was only a `dist.rpmlint` `Result` for the `koschei-1.7.2-1.fc24` `koji_build`, just `dist.rpmlint`'s `Result` would be returned. +An additional available parameter is `_distinct_on`, if specified allows the user to group by additional fields (example: `scenario`). + + Parameters + keyval (string) - Any key-value pair in `Result.data`. Replace `keyval` with the key's name: `...&item=koschei-1.7.2-1.fc24` - Multiple values can be provided, separate by commas to get `or` filter based on all the values provided: `...&arch=x86_64,noarch` - `like` filter with `*` as wildcards: `...&item:like=koschei*fc24*` - Multiple key-value pairs provide `and` filter, e.g. to search for all `Results` with `item` like `koschei*fc24*` and `arch` being either `noarch` or `x86_64`: `...&item:like=koschei*fc24*&arch=noarch` - + testcase (string, optional) + + testcases (string, optional) - Use to narrow down `Testcases` of interest. By default, all `Testcases` are searched for `Results` - - Multiple values can be provided, separate by coma to get `or` filter based on all the values provided: `...&testcase=dist.rpmlint,dist.depcheck` - - `like` filter with `*` as wildcards: `...&testcase:like=dist.*` + - Multiple values can be provided, separate by comma to get `or` filter based on all the values provided: `...&testcases=dist.rpmlint,dist.depcheck` + - `like` filter with `*` as wildcards: `...&testcases:like=dist.*` + groups: `27f94e36-62ec-11e6-83fd-525400d7d6a4` (string, optional) - - Multiple values can be provided, separate by commas to get `or` filter based on all the values provided: `...&group=uuid1,uuid2` + - Multiple values can be provided, separate by commas to get `or` filter based on all the values provided: `...&groups=uuid1,uuid2` + since: `2016-08-15T13:00:00` (string) Date (or datetime) in ISO8601 format. To specify range, separate start and end date(time) by comma: `...&since=2016-08-14,2016-08-15T13:42:57` + + _distinct_on: `scenario` (string, optional) + - The value can be any `key` in `Result.data`. Example: `...&_distinct_on=scenario` + - Multiple values can be provided, separate by comma. Example: `...&_distinct_on=scenario,item` + Request `.../results/latest?item=koschei-1.7.2-1.fc24&type=koji_build` + Parameters diff --git a/Makefile b/Makefile index 28fde3b..2ec4ed8 100644 --- a/Makefile +++ b/Makefile @@ -55,7 +55,7 @@ update-makefile: test: $(VENV) set -e source $(VENV)/bin/activate; - TEST='true' py.test --cov-report=term-missing --cov $(MODULENAME); + TEST='true' NO_CAN_HAS_POSTGRES='sadly' py.test --cov-report=term-missing --cov $(MODULENAME); deactivate .PHONY: test-ci @@ -63,7 +63,7 @@ test: $(VENV) test-ci: $(VENV) set -e source $(VENV)/bin/activate - TEST='true' py.test --cov-report=xml --cov $(MODULENAME) + TEST='true' NO_CAN_HAS_POSTGRES='sadly' py.test --cov-report=xml --cov $(MODULENAME) deactivate .PHONY: pylint diff --git a/conf/fedora-messaging-example.toml b/conf/fedora-messaging-example.toml new file mode 100644 index 0000000..5396b25 --- /dev/null +++ b/conf/fedora-messaging-example.toml @@ -0,0 +1,18 @@ +# A sample configuration for fedora-messaging. This file is in the TOML format. +# For complete details on all configuration options, see the documentation. + +amqp_url = "amqp://" + +publish_exchange = "amq.topic" + +# The topic_prefix configuration value will add a prefix to the topics of every sent message. +# This is used for migrating from fedmsg, and should not be used afterwards. +topic_prefix = "" + +[tls] +ca_cert = "/etc/pki/tls/certs/ca-bundle.crt" +keyfile = "/my/client/key.pem" +certfile = "/my/client/cert.pem" + +[client_properties] +app = "ResultsDB" diff --git a/conf/settings.py.example b/conf/settings.py.example index 9651374..f633a7d 100644 --- a/conf/settings.py.example +++ b/conf/settings.py.example @@ -31,7 +31,7 @@ REQUIRED_DATA = { } # Extend the list of allowed outcomes. -ADDITIONAL_RESULT_OUTCOMES = () +ADDITIONAL_RESULT_OUTCOMES = [] # Fedmenu configuration FEDMENU_URL = 'https://apps.fedoraproject.org/fedmenu' @@ -58,11 +58,6 @@ MESSAGE_BUS_PUBLISH = False # default, but you could create your own. # Supported values: 'dummy', 'stomp', 'fedmsg' MESSAGE_BUS_PLUGIN = 'fedmsg' -# You can pass extra arguments to your message bus plugin here. For instance, -# the fedmsg plugin expects an extra `modname` argument that can be used to -# configure the topic, like this: -# ... -# e.g. org.fedoraproject.prod.taskotron.result.new MESSAGE_BUS_KWARGS = {'modname': 'resultsdb'} ## Alternatively, you could use the 'stomp' messaging plugin. @@ -83,5 +78,3 @@ MESSAGE_BUS_KWARGS = {'modname': 'resultsdb'} # Publish Taskotron-compatible fedmsgs on the 'taskotron' topic MESSAGE_BUS_PUBLISH_TASKOTRON = False - - diff --git a/requirements.txt b/requirements.txt index 9f94996..3e80d51 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ # A note for maintainers: Please keep this list in sync and in the same order # as the spec file. -fedmsg >= 0.16.2 +fedora-messaging alembic >= 0.8.3 Flask >= 0.10.1 # FIXME: Flask-RESTful 0.3.6 breaks tests, see https://phab.qa.fedoraproject.org/T961 @@ -21,3 +21,4 @@ SQLAlchemy >= 0.9.8 # Test suite requirements pytest >= 2.4.2 pytest-cov >= 1.6 +psycopg2 diff --git a/resultsdb.spec b/resultsdb.spec index 6aa4d8c..7fcceb2 100644 --- a/resultsdb.spec +++ b/resultsdb.spec @@ -1,6 +1,6 @@ Name: resultsdb # NOTE: if you update version, *make sure* to also update `resultsdb/__init__.py` -Version: 2.1.2 +Version: 2.2.0 Release: 1%{?dist} Summary: Results store for automated tasks @@ -10,9 +10,7 @@ Source0: https://qa.fedoraproject.org/releases/%{name}/%{name}-%{version} BuildArch: noarch -%if 0%{?fedora} -Requires: fedmsg -Requires: python3-fedmsg +Requires: python3-fedora-messaging >= 1.5.0 Requires: python3-alembic Requires: python3-flask Requires: python3-flask-restful @@ -20,20 +18,8 @@ Requires: python3-flask-sqlalchemy Requires: python3-iso8601 Requires: python3-six Requires: python3-sqlalchemy -%else -Requires: fedmsg >= 0.16.2 -Requires: python-alembic >= 0.8.3 -Requires: python-flask >= 0.10.1 -Requires: python-flask-restful >= 0.2.11 -Requires: python-flask-sqlalchemy >= 2.0 -Requires: python2-iso8601 >= 0.1.10 -Requires: python2-six >= 1.9.0 -Requires: python-sqlalchemy >= 0.9.8 -%endif - -%if 0%{?fedora} -BuildRequires: fedmsg -BuildRequires: python3-fedmsg + +BuildRequires: python3-fedora-messaging >= 1.5.0 BuildRequires: python3-alembic BuildRequires: python3-flask BuildRequires: python3-flask-restful @@ -43,18 +29,6 @@ BuildRequires: python3-pytest BuildRequires: python3-pytest-cov BuildRequires: python3-devel BuildRequires: python3-setuptools -%else -BuildRequires: fedmsg >= 0.16.2 -BuildRequires: python-alembic >= 0.8.3 -BuildRequires: python-flask >= 0.10.1 -BuildRequires: python-flask-restful >= 0.2.11 -BuildRequires: python-flask-sqlalchemy >= 2.0 -BuildRequires: python2-iso8601 >= 0.1.10 -BuildRequires: python2-pytest -BuildRequires: python2-pytest-cov -BuildRequires: python2-devel -BuildRequires: python2-setuptools -%endif %description ResultsDB is a results store engine for, but not limited to, Fedora QA tools. @@ -63,28 +37,17 @@ ResultsDB is a results store engine for, but not limited to, Fedora QA tools. %setup -q %check -%if 0%{?fedora} -PYTHONPATH=%{buildroot}%{python3_sitelib}/ pytest-3 -%else -PYTHONPATH=%{buildroot}%{python2_sitelib}/ py.test -%endif +NO_CAN_HAS_POSTGRES='sadly' PYTHONPATH=%{buildroot}%{python3_sitelib}/ pytest-3 + # This seems to be the only place where we can remove pyco files, see: # https://fedoraproject.org/wiki/Packaging:Python#Byte_compiling rm -f %{buildroot}%{_sysconfdir}/resultsdb/*.py{c,o} %build -%if 0%{?fedora} %{__python3} setup.py build -%else -%{__python2} setup.py build -%endif %install -%if 0%{?fedora} %{__python3} setup.py install --skip-build --root %{buildroot} -%else -%{__python2} setup.py install --skip-build --root %{buildroot} -%endif # apache and wsgi settings install -d %{buildroot}%{_datadir}/resultsdb/conf @@ -99,13 +62,8 @@ install -p -m 0644 conf/settings.py.example %{buildroot}%{_sysconfdir}/resultsdb %doc README.md conf/* %license LICENSE -%if 0%{?fedora} %{python3_sitelib}/resultsdb %{python3_sitelib}/*.egg-info -%else -%{python2_sitelib}/resultsdb -%{python2_sitelib}/*.egg-info -%endif %attr(755,root,root) %{_bindir}/resultsdb %dir %{_sysconfdir}/resultsdb @@ -115,6 +73,12 @@ install -p -m 0644 conf/settings.py.example %{buildroot}%{_sysconfdir}/resultsdb %{_datadir}/resultsdb/* %changelog +* Thu Mar 21 2019 Frantisek Zatloukal - 2.2.0-1 +- Latest endpoint: group by additional fields +- specfile: Drop python 2 support +- Drop support for fedmsg and replace by fedora-messaging +- settings.py: use lists instead of tuples + * Tue Nov 20 2018 Frantisek Zatloukal - 2.1.2-1 - Support Python 3, use it on Fedora - Fix ImmutableMultiDict handling for python 3.7 diff --git a/resultsdb/__init__.py b/resultsdb/__init__.py index 5d762f8..7313e1f 100644 --- a/resultsdb/__init__.py +++ b/resultsdb/__init__.py @@ -30,7 +30,7 @@ import os # the version as used in setup.py -__version__ = "2.1.2" +__version__ = "2.2.0" try: basestring diff --git a/resultsdb/controllers/api_v2.py b/resultsdb/controllers/api_v2.py index ffce47c..69c6b59 100644 --- a/resultsdb/controllers/api_v2.py +++ b/resultsdb/controllers/api_v2.py @@ -20,6 +20,8 @@ import re import uuid +import string +import random from functools import partial from flask import Blueprint, jsonify, request, url_for @@ -319,21 +321,22 @@ def create_group(): # ============================================================================= # RESULTS # ============================================================================= - def select_results(since_start=None, since_end=None, outcomes=None, groups=None, testcases=None, testcases_like=None, result_data=None, _sort=None): # Checks if the sort parameter specified in the request is valid before querying. # Sorts by submit_time in a descending order if the sort parameter is absent or invalid. + q = db.session.query(Result) query_sorted = False if _sort: sort_match = re.match(r'^(?Pasc|desc):(?P.+)$', _sort) - if sort_match: - if sort_match.group('column') == 'submit_time': - sort_order = {'asc': db.asc, 'desc': db.desc}[sort_match.group('order')] - sort_column = getattr(Result, sort_match.group('column')) - q = db.session.query(Result).order_by(sort_order(sort_column)) - query_sorted = True + if sort_match and sort_match.group('column') == 'submit_time': + sort_order = {'asc': db.asc, 'desc': db.desc}[sort_match.group('order')] + sort_column = getattr(Result, sort_match.group('column')) + q = q.order_by(sort_order(sort_column)) + query_sorted = True + if _sort and _sort == 'disable_sorting': + query_sorted = True if not query_sorted: - q = db.session.query(Result).order_by(db.desc(Result.submit_time)) + q = q.order_by(db.desc(Result.submit_time)) # Time constraints if since_start: @@ -417,6 +420,7 @@ def __get_results_parse_args(): args['testcases'] = [tc.strip() for tc in args['testcases'].split(',') if tc.strip()] args['testcases:like'] = [tc.strip() for tc in args['testcases:like'].split(',') if tc.strip()] args['groups'] = [group.strip() for group in args['groups'].split(',') if group.strip()] + args['_distinct_on'] = [_distinct_on.strip() for _distinct_on in args['_distinct_on'].split(',') if _distinct_on.strip()] retval['args'] = args # find results_data with the query parameters @@ -437,60 +441,6 @@ def __get_results_parse_args(): return retval -RP['get_results_latest'] = reqparse.RequestParser() -RP['get_results_latest'].add_argument('since', location='args') -RP['get_results_latest'].add_argument('groups', default="", location='args') -# TODO - can this be done any better? -RP['get_results_latest'].add_argument('_sort', default="", location='args') -RP['get_results_latest'].add_argument('testcases', default="", location='args') -RP['get_results_latest'].add_argument('testcases:like', default="", location='args') -# These two are ignored. They're present so reqparse isn't confused by JSONP. -RP['get_results_latest'].add_argument('callback', location='args') -RP['get_results_latest'].add_argument('_', location='args') - - -@api.route('/results/latest', methods=['GET']) -def get_results_latest(): - p = __get_results_parse_args() - if p['error'] is not None: - return p['error'] - - args = p['args'] - - q = select_results( - since_start=args['since']['start'], - since_end=args['since']['end'], - groups=args['groups'], - testcases=args['testcases'], - testcases_like=args['testcases:like'], - result_data=p['result_data'], - _sort=args['_sort'], - ) - - # Produce a subquery with the same filter criteria as above *except* - # test case name, which we group by and join on. - sq = select_results( - since_start=args['since']['start'], - since_end=args['since']['end'], - groups=args['groups'], - result_data=p['result_data'], - )\ - .order_by(None)\ - .with_entities( - Result.testcase_name.label('testcase_name'), - db.func.max(Result.submit_time).label('max_submit_time'))\ - .group_by(Result.testcase_name)\ - .subquery() - q = q.join(sq, db.and_(Result.testcase_name == sq.c.testcase_name, - Result.submit_time == sq.c.max_submit_time)) - - results = q.all() - - return jsonify(dict( - data=[SERIALIZE(o) for o in results], - )) - - RP['get_results'] = reqparse.RequestParser() RP['get_results'].add_argument('page', default=0, type=int, location='args') RP['get_results'].add_argument('limit', default=QUERY_LIMIT, type=int, location='args') @@ -498,6 +448,7 @@ RP['get_results'].add_argument('since', location='args') RP['get_results'].add_argument('outcome', location='args') RP['get_results'].add_argument('groups', default="", location='args') RP['get_results'].add_argument('_sort', default="", location='args') +RP['get_results'].add_argument('_distinct_on', default="", location='args') # TODO - can this be done any better? RP['get_results'].add_argument('testcases', default="", location='args') RP['get_results'].add_argument('testcases:like', default="", location='args') @@ -539,6 +490,81 @@ def get_results(group_ids=None, testcase_names=None): )) +@api.route('/results/latest', methods=['GET']) +def get_results_latest(): + p = __get_results_parse_args() + if p['error'] is not None: + return p['error'] + + args = p['args'] + since_start = args['since'].get('start', None) + since_end = args['since'].get('end', None) + groups = args.get('groups', None) + testcases = args.get('testcases', None) + testcases_like = args.get('testcases:like', None) + distinct_on = args.get('_distinct_on', None) + + if not distinct_on: + q = select_results( + since_start=since_start, + since_end=since_end, + groups=groups, + testcases=testcases, + testcases_like=testcases_like, + result_data=p['result_data'], + ) + + # Produce a subquery with the same filter criteria as above *except* + # test case name, which we group by and join on. + sq = select_results( + since_start=since_start, + since_end=since_end, + groups=groups, + result_data=p['result_data'], + )\ + .order_by(None)\ + .with_entities( + Result.testcase_name.label('testcase_name'), + db.func.max(Result.submit_time).label('max_submit_time'))\ + .group_by(Result.testcase_name)\ + .subquery() + q = q.join(sq, db.and_(Result.testcase_name == sq.c.testcase_name, + Result.submit_time == sq.c.max_submit_time)) + + results = q.all() + + return jsonify(dict( + data=[SERIALIZE(o) for o in results], + )) + + + if not any([testcases, testcases_like, since_start, since_end, groups, p['result_data']]): + return jsonify({'message': ("Please, provide at least one " + "filter beside '_distinct_on'")}), 400 + + q = db.session.query(Result) + q = select_results(since_start=since_start, since_end=since_end, + groups=groups, testcases=testcases, + testcases_like=testcases_like, result_data=p['result_data'], _sort="disable_sorting") + + values_distinct_on = [Result.testcase_name] + for i, key in enumerate(distinct_on): + name = 'result_data_%s_%s' % (i, key) + alias = db.aliased(db.session.query(ResultData).filter(ResultData.key == key).subquery(), name=name) + q = q.outerjoin(alias) + values_distinct_on.append(db.text('{}.value'.format(name))) + + q = q.distinct(*values_distinct_on) + q = q.order_by(*values_distinct_on).order_by(db.desc(Result.submit_time)) + + results = q.all() + results = dict( + data=[SERIALIZE(o) for o in results], + ) + results['data'] = sorted(results['data'], key=lambda x: x['submit_time'], reverse=True) + return jsonify(results) + + @api.route('/groups//results', methods=['GET']) @api.route('/testcases//results', methods=['GET']) def get_results_by_group_testcase(group_id=None, testcase_name=None): diff --git a/resultsdb/messaging.py b/resultsdb/messaging.py index fc9127c..4b8d8c2 100644 --- a/resultsdb/messaging.py +++ b/resultsdb/messaging.py @@ -22,7 +22,8 @@ import json import pkg_resources -import fedmsg +from fedora_messaging.api import Message, publish +from fedora_messaging.exceptions import PublishReturned, ConnectionException from resultsdb import db from resultsdb.models.results import Result, ResultData @@ -83,7 +84,7 @@ def publish_taskotron_message(result, include_job_url=False): if datum.key in ('item', 'type',) ) task['name'] = result.testcase.name - msg = { + body = { 'task': task, 'result': { 'id': result.id, @@ -95,9 +96,18 @@ def publish_taskotron_message(result, include_job_url=False): } if include_job_url: # only in the v1 API - msg['result']['job_url'] = result.groups[0].ref_url if result.groups else None + body['result']['job_url'] = result.groups[0].ref_url if result.groups else None - fedmsg.publish(topic='result.new', modname='taskotron', msg=msg) + try: + msg = Message ( + topic='taskotron.result.new', + body=body + ) + publish(msg) + except PublishReturned as e: + log.error('Fedora Messaging broker rejected message {}: {}'.format(msg.id, e)) + except ConnectionException as e: + log.error('Error sending message {}: {}'.format(msg.id, e)) def create_message(result): @@ -138,7 +148,18 @@ class FedmsgPlugin(MessagingPlugin): """ A fedmsg plugin, used to publish to the fedmsg bus. """ def publish(self, message): - fedmsg.publish(topic='result.new', modname=self.modname, msg=message) + + try: + msg = Message( + topic='{}.result.new'.format(self.modname), + body=message + ) + publish(msg) + except PublishReturned as e: + log.error('Fedora Messaging broker rejected message {}: {}'.format(msg.id, e)) + except ConnectionException as e: + log.error('Error sending message {}: {}'.format(msg.id, e)) + class StompPlugin(MessagingPlugin): diff --git a/testing/functest_api_v20.py b/testing/functest_api_v20.py index 6977cdf..1e5cba9 100644 --- a/testing/functest_api_v20.py +++ b/testing/functest_api_v20.py @@ -49,6 +49,7 @@ class TestFuncApiV20(): cls.dbfile = tempfile.NamedTemporaryFile(delete=False) cls.dbfile.close() resultsdb.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///%s' % cls.dbfile.name + #resultsdb.app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://resultsdb:resultsdb@localhost:5432/resultsdb' resultsdb.app.config['MESSAGE_BUS_PUBLISH'] = True resultsdb.app.config['MESSAGE_BUS_PLUGIN'] = 'dummy' @@ -857,6 +858,127 @@ class TestFuncApiV20(): assert data['data'][1]['testcase']['name'] == self.ref_testcase_name assert data['data'][1]['outcome'] == "FAILED" + def test_get_results_latest_distinct_on(self): + print("=============== HINT ===============\nThis test requires PostgreSQL, because DISTINCT ON does work differently in SQLite") + if os.getenv('NO_CAN_HAS_POSTGRES', None): + return + + self.helper_create_testcase() + + self.helper_create_result(outcome="PASSED", data={'scenario': 'scenario1'}, testcase=self.ref_testcase_name) + self.helper_create_result(outcome="FAILED", data={'scenario': 'scenario2'}, testcase=self.ref_testcase_name) + + r = self.app.get('/api/v2.0/results/latest?testcases=' + self.ref_testcase_name + '&_distinct_on=scenario') + data = json.loads(r.data) + assert len(data['data']) == 2 + assert data['data'][0]['data']['scenario'][0] == 'scenario2' + assert data['data'][1]['data']['scenario'][0] == 'scenario1' + + r = self.app.get('/api/v2.0/results/latest?testcases=' + self.ref_testcase_name) + data = json.loads(r.data) + assert len(data['data']) == 1 + assert data['data'][0]['data']['scenario'][0] == 'scenario2' + + def test_get_results_latest_distinct_on_more_specific_cases_1(self): + print("=============== HINT ===============\nThis test requires PostgreSQL, because DISTINCT ON does work differently in SQLite") + if os.getenv('NO_CAN_HAS_POSTGRES', None): + return + + ''' + | id | testcase | scenario | + |----|----------|----------| + | 1 | tc_1 | s_1 | + | 2 | tc_2 | s_1 | + | 3 | tc_2 | s_2 | + | 4 | tc_3 | | + ''' + self.helper_create_result(outcome="PASSED", testcase='tc_1', data={'item': 'grub', 'scenario': 's_1'}) + self.helper_create_result(outcome="PASSED", testcase='tc_2', data={'item': 'grub', 'scenario': 's_1'}) + self.helper_create_result(outcome="PASSED", testcase='tc_2', data={'item': 'grub', 'scenario': 's_2'}) + self.helper_create_result(outcome="PASSED", testcase='tc_3', data={'item': 'grub'}) + + r = self.app.get('/api/v2.0/results/latest?item=grub&_distinct_on=scenario') + data = json.loads(r.data) + + assert len(data['data']) == 4 + + def test_get_results_latest_distinct_on_more_specific_cases_2(self): + print("=============== HINT ===============\nThis test requires PostgreSQL, because DISTINCT ON does work differently in SQLite") + if os.getenv('NO_CAN_HAS_POSTGRES', None): + return + + ''' + | id | testcase | scenario | + |----|----------|----------| + | 1 | tc_1 | s_1 | + | 2 | tc_2 | s_1 | + | 3 | tc_2 | s_2 | + | 4 | tc_3 | | + | 5 | tc_1 | | + ''' + self.helper_create_result(outcome="PASSED", testcase='tc_1', data={'item': 'grub', 'scenario': 's_1'}) + self.helper_create_result(outcome="PASSED", testcase='tc_2', data={'item': 'grub', 'scenario': 's_1'}) + self.helper_create_result(outcome="PASSED", testcase='tc_2', data={'item': 'grub', 'scenario': 's_2'}) + self.helper_create_result(outcome="PASSED", testcase='tc_3', data={'item': 'grub'}) + self.helper_create_result(outcome="FAILED", testcase='tc_1', data={'item': 'grub'}) + + r = self.app.get('/api/v2.0/results/latest?item=grub&_distinct_on=scenario') + data = json.loads(r.data) + + assert len(data['data']) == 5 + + def test_get_results_latest_distinct_on_more_specific_cases_2(self): + print("=============== HINT ===============\nThis test requires PostgreSQL, because DISTINCT ON does work differently in SQLite") + if os.getenv('NO_CAN_HAS_POSTGRES', None): + return + + ''' + | id | testcase | scenario | + |----|----------|----------| + | 1 | tc_1 | s_1 | + | 2 | tc_2 | s_1 | + | 3 | tc_2 | s_2 | + | 4 | tc_3 | | + | 5 | tc_1 | | + | 6 | tc_1 | s_1 | + ''' + self.helper_create_result(outcome="PASSED", testcase='tc_1', data={'item': 'grub', 'scenario': 's_1'}) + self.helper_create_result(outcome="PASSED", testcase='tc_2', data={'item': 'grub', 'scenario': 's_1'}) + self.helper_create_result(outcome="PASSED", testcase='tc_2', data={'item': 'grub', 'scenario': 's_2'}) + self.helper_create_result(outcome="PASSED", testcase='tc_3', data={'item': 'grub'}) + self.helper_create_result(outcome="FAILED", testcase='tc_1', data={'item': 'grub'}) + self.helper_create_result(outcome="INFO", testcase='tc_1', data={'item': 'grub', 'scenario': 's_1'}) + + r = self.app.get('/api/v2.0/results/latest?item=grub&_distinct_on=scenario') + data = json.loads(r.data) + + assert len(data['data']) == 5 + tc_1s = [r for r in data['data'] if r['testcase']['name'] == 'tc_1'] + assert len(tc_1s) == 2 + assert tc_1s[0]['outcome'] == 'INFO' + assert tc_1s[1]['outcome'] == 'FAILED' + + def test_get_results_latest_distinct_on_with_scenario_not_defined(self): + print("=============== HINT ===============\nThis test requires PostgreSQL, because DISTINCT ON does work differently in SQLite") + if os.getenv('NO_CAN_HAS_POSTGRES', None): + return + + self.helper_create_testcase() + self.helper_create_result(outcome="PASSED", testcase=self.ref_testcase_name) + self.helper_create_result(outcome="FAILED", testcase=self.ref_testcase_name) + + r = self.app.get('/api/v2.0/results/latest?testcases=' + self.ref_testcase_name + '&_distinct_on=scenario') + data = json.loads(r.data) + + assert len(data['data']) == 1 + assert data['data'][0]['outcome'] == 'FAILED' + + def test_get_results_latest_distinct_on_wrong_params(self): + r = self.app.get('/api/v2.0/results/latest?_distinct_on=scenario') + data = json.loads(r.data) + assert r.status_code == 400 + assert data['message'] == "Please, provide at least one filter beside '_distinct_on'" + def test_message_publication(self): self.helper_create_result() plugin = resultsdb.messaging.DummyPlugin diff --git a/tox.ini b/tox.ini index 0c8fe38..93ee516 100644 --- a/tox.ini +++ b/tox.ini @@ -14,7 +14,7 @@ max-line-length=99 minversion=2.0 python_functions=test should python_files=test_* functest_* -addopts=--functional testing/ --cov resultsdb --cov-report=term-missing +addopts=--functional -p no:warnings testing/ --cov resultsdb --cov-report=term-missing [tox] envlist = py27,py36,py37