#110 Make resultsdb Python3 compatible
Merged 5 years ago by frantisekz. Opened 5 years ago by jskladan.

file modified
+8 -3
@@ -32,6 +32,11 @@ 

  # the version as used in setup.py

  __version__ = "2.1.1"

  

+ try:

+     basestring

+ except NameError:

+     basestring = (str, bytes)

+ 

  

  # Flask App

  app = Flask(__name__)
@@ -97,7 +102,7 @@ 

      root_logger.setLevel(logging.DEBUG)

  

      if app.config['STREAM_LOGGING']:

-         print "doing stream logging"

+         print("doing stream logging")

          stream_handler = logging.StreamHandler()

          stream_handler.setLevel(loglevel)

          stream_handler.setFormatter(formatter)
@@ -105,7 +110,7 @@ 

          app.logger.addHandler(stream_handler)

  

      if app.config['SYSLOG_LOGGING']:

-         print "doing syslog logging"

+         print("doing syslog logging")

          syslog_handler = logging.handlers.SysLogHandler(address='/dev/log',

                                                          facility=logging.handlers.SysLogHandler.LOG_LOCAL4)

          syslog_handler.setLevel(loglevel)
@@ -114,7 +119,7 @@ 

          app.logger.addHandler(syslog_handler)

  

      if app.config['FILE_LOGGING'] and app.config['LOGFILE']:

-         print "doing file logging to %s" % app.config['LOGFILE']

+         print("doing file logging to %s" % app.config['LOGFILE'])

          file_handler = logging.handlers.RotatingFileHandler(

              app.config['LOGFILE'], maxBytes=500000, backupCount=5)

          file_handler.setLevel(loglevel)

file modified
+17 -18
@@ -39,7 +39,7 @@ 

  

  

  def upgrade_db(*args):

-     print "Upgrading Database to Latest Revision"

+     print("Upgrading Database to Latest Revision")

      alembic_cfg = get_alembic_config()

      al_command.upgrade(alembic_cfg, "head")

  
@@ -53,20 +53,20 @@ 

      current_rev = context.get_current_revision()

  

      if not current_rev:

-         print "Initializing alembic"

-         print " - Setting the current version to the first revision"

+         print("Initializing alembic")

+         print(" - Setting the current version to the first revision")

          al_command.stamp(alembic_cfg, "15f5eeb9f635")

      else:

-         print "Alembic already initialized"

+         print("Alembic already initialized")

  

  

  def initialize_db(destructive):

      alembic_cfg = get_alembic_config()

  

-     print "Initializing database"

+     print("Initializing database")

  

      if destructive:

-         print " - Dropping all tables"

+         print(" - Dropping all tables")

          db.drop_all()

  

      # check whether the table 'group' exists
@@ -74,9 +74,9 @@ 

      insp = reflection.Inspector.from_engine(db.engine)

      table_names = insp.get_table_names()

      if 'testcase' not in table_names and 'Testcase' not in table_names:

-         print " - Creating tables"

+         print(" - Creating tables")

          db.create_all()

-         print " - Stamping alembic's current version to 'head'"

+         print(" - Stamping alembic's current version to 'head'")

          al_command.stamp(alembic_cfg, "head")

  

      # check to see if the db has already been initialized by checking for an
@@ -84,18 +84,18 @@ 

      context = MigrationContext.configure(db.engine.connect())

      current_rev = context.get_current_revision()

      if current_rev:

-         print " - Database is currently at rev %s" % current_rev

+         print(" - Database is currently at rev %s" % current_rev)

          upgrade_db(destructive)

      else:

-         print "WARN: You need to have your db stamped with an alembic revision"

-         print "      Run 'init_alembic' sub-command first."

+         print("WARN: You need to have your db stamped with an alembic revision")

+         print("      Run 'init_alembic' sub-command first.")

  

  

  def mock_data(destructive):

-     print "Populating tables with mock-data"

+     print("Populating tables with mock-data")

  

      if destructive or not db.session.query(Testcase).count():

-         print " - Testcase, Job, Result, ResultData"

+         print(" - Testcase, Job, Result, ResultData")

          tc1 = Testcase(ref_url="http://example.com/depcheck", name="depcheck")

          tc2 = Testcase(ref_url="http://example.com/rpmlint", name="rpmlint")

  
@@ -130,7 +130,7 @@ 

  

          db.session.commit()

      else:

-         print " - skipped Testcase, Job, Result, ResultData"

+         print(" - skipped Testcase, Job, Result, ResultData")

  

  

  def main():
@@ -146,9 +146,9 @@ 

      (options, args) = parser.parse_args()

  

      if len(args) != 1 or args[0] not in possible_commands:

-         print usage

+         print(usage)

          print

-         print 'Please use one of the following commands: %s' % str(possible_commands)

+         print('Please use one of the following commands: %s' % str(possible_commands))

          sys.exit(1)

  

      command = {
@@ -159,8 +159,7 @@ 

      }[args[0]]

  

      if not options.destructive:

-         print "Proceeding in non-destructive mode. To perform destructive "\

-               "steps use -d option."

+         print("Proceeding in non-destructive mode. To perform destructive steps use -d option.")

  

      command(options.destructive)

  

@@ -44,9 +44,13 @@ 

  

  api = Blueprint('api_v1', __name__)

  

- # TODO: find out why error handler works for 404 but not fot 400

+ try:

+     unicode

+ except NameError:

+     unicode = str

  

  

+ # TODO: find out why error handler works for 404 but not fot 400

  @app.errorhandler(400)

  def bad_request(error):

      return jsonify({"message": "Bad request"}), 400
@@ -203,7 +207,7 @@ 

  

      # Filter by result_data

      if result_data is not None:

-         for key, values in result_data.iteritems():

+         for key, values in result_data.items():

              try:

                  key, modifier = key.split(':')

              except ValueError:  # no : in key
@@ -408,7 +412,7 @@ 

      #

      req_args = dict(request.args)  # this is important, do not delete ;)

      extra_data = {k: req_args[k] for k in req_args if k not in args}

-     for k, v in extra_data.iteritems():

+     for k, v in extra_data.items():

          for i, s in enumerate(v):

              extra_data[k][i] = s.split(',')

          # flatten the list

@@ -42,9 +42,17 @@ 

  

  api = Blueprint('api_v2', __name__)

  

- # TODO: find out why error handler works for 404 but not for 400

+ try:

+     basestring

+ except NameError:

+     basestring = (str, bytes)

  

+ try:

+     unicode

+ except NameError:

+     unicode = str

  

+ # TODO: find out why error handler works for 404 but not for 400

  @app.errorhandler(400)

  def bad_request(error):

      return jsonify({"message": "Bad request"}), 400
@@ -98,7 +106,7 @@ 

      the value required. Or if the value is not yet in the request-parser (which now

      realistically only applies to the `data.` values in result) it is added.

      """

-     for key, values in app.config.get('REQUIRED_DATA', {}).iteritems():

+     for key, values in app.config.get('REQUIRED_DATA', {}).items():

          if key not in RP:

              app.logger.error("Error in config: REQUIRED_DATA contains unknown endpoint %r.", key)

              continue
@@ -354,7 +362,7 @@ 

  

      # Filter by result_data

      if result_data is not None:

-         for key, values in result_data.iteritems():

+         for key, values in result_data.items():

              try:

                  key, modifier = key.split(':')

              except ValueError:  # no : in key
@@ -417,8 +425,8 @@ 

  

      # req_args is a dict of lists, where keys are param names and values are param values

      #  the value is a list even if only one param value was specified

-     results_data = {key: req_args[key] for key in req_args.iterkeys() if key not in args}

-     for param, values in results_data.iteritems():

+     results_data = {key: req_args[key] for key in req_args.keys() if key not in args}

+     for param, values in results_data.items():

          for i, value in enumerate(values):

              results_data[param][i] = value.split(',')

          # flatten the list
@@ -584,7 +592,7 @@ 

          return jsonify({'message': "outcome must be one of %r" % (RESULT_OUTCOME,)}), 400

  

      if args['data']:

-         invalid_keys = [key for key in args['data'].iterkeys() if ':' in key]

+         invalid_keys = [key for key in args['data'].keys() if ':' in key]

          if invalid_keys:

              app.logger.warning("Colon not allowed in key name: %s", invalid_keys)

              return jsonify({'message': "Colon not allowed in key name: %r" % invalid_keys}), 400

@@ -1,5 +1,11 @@ 

  import numbers

  

+ try:

+     basestring

+ except NameError:

+     basestring = (str, bytes)

+ 

+ 

  

  def non_empty(typ, value, *args, **kwargs):

      if args or kwargs:

file modified
+9 -9
@@ -40,10 +40,10 @@ 

      Find previous result with the same testcase, item, type, and arch.

      Return None if no result is found.

  

-     Note that this logic is Taskotron-specific: it does not consider the 

-     possibility that a result may be distinguished by other keys in the data 

-     (for example 'scenario' which is used in OpenQA results). But this is only 

-     used for publishing Taskotron compatibility messages, thus we keep this 

+     Note that this logic is Taskotron-specific: it does not consider the

+     possibility that a result may be distinguished by other keys in the data

+     (for example 'scenario' which is used in OpenQA results). But this is only

+     used for publishing Taskotron compatibility messages, thus we keep this

      logic as is.

      """

      q = db.session.query(Result).filter(Result.id != result.id)
@@ -63,16 +63,16 @@ 

      """

      Publish a fedmsg on the taskotron topic with Taskotron-compatible structure.

  

-     These messages are deprecated, consumers should consume from the resultsdb 

+     These messages are deprecated, consumers should consume from the resultsdb

      topic instead.

      """

      prev_result = get_prev_result(result)

      if prev_result is not None and prev_result.outcome == result.outcome:

-         # If the previous result had the same outcome, skip publishing 

+         # If the previous result had the same outcome, skip publishing

          # a message for this new result.

-         # This was intended as a workaround to avoid spammy messages from the 

-         # dist.depcheck task, which tends to produce a very large number of 

-         # identical results for any given build, because of the way that it is 

+         # This was intended as a workaround to avoid spammy messages from the

+         # dist.depcheck task, which tends to produce a very large number of

+         # identical results for any given build, because of the way that it is

          # designed.

          log.debug("Skipping Taskotron message for result %d, outcome has not changed", result.id)

          return

@@ -19,6 +19,10 @@ 

  

  from datetime import date, datetime

  

+ try:

+     basestring

+ except NameError:

+     basestring = (str, bytes)

  

  class DBSerialize(object):

      pass
@@ -38,10 +42,14 @@ 

  

          if isinstance(value, dict):

              ret = {}

-             for k, v in value.iteritems():

+             for k, v in value.items():

                  ret[k] = self.serialize(v, **kwargs)

              return ret

  

+         #in py3 string-like types have __iter__ causing endless loops

+         if isinstance(value, basestring):

+             return value

+ 

          # convert iterables to list of serialized stuff

          if hasattr(value, '__iter__'):

              ret = []

@@ -42,7 +42,7 @@ 

          if job_load_results:

              rv['results'] = o.results

  

-         return {key: self.serialize(value) for key, value in rv.iteritems()}

+         return {key: self.serialize(value) for key, value in rv.items()}

  

      def _serialize_Testcase(self, o, **kwargs):

          rv = dict(
@@ -51,7 +51,7 @@ 

              href=self.get_uri(o)

          )

  

-         return {key: self.serialize(value) for key, value in rv.iteritems()}

+         return {key: self.serialize(value) for key, value in rv.items()}

  

      def _serialize_Result(self, o, **kwargs):

          result_data = {}
@@ -73,7 +73,7 @@ 

              href=self.get_uri(o),

          )

  

-         return {key: self.serialize(value) for key, value in rv.iteritems()}

+         return {key: self.serialize(value) for key, value in rv.items()}

  

      def _serialize_ResultData(self, o, **kwargs):

          rv = dict(
@@ -81,4 +81,4 @@ 

              value=o.value,

          )

  

-         return {key: self.serialize(value) for key, value in rv.iteritems()}

+         return {key: self.serialize(value) for key, value in rv.items()}

@@ -33,7 +33,7 @@ 

              href=url_for('api_v2.get_group', group_id=o.uuid, _external=True),

          )

  

-         return {key: self.serialize(value) for key, value in rv.iteritems()}

+         return {key: self.serialize(value) for key, value in rv.items()}

  

      def _serialize_Testcase(self, o, **kwargs):

          rv = dict(
@@ -42,7 +42,7 @@ 

              href=url_for('api_v2.get_testcase', testcase_name=o.name, _external=True),

          )

  

-         return {key: self.serialize(value) for key, value in rv.iteritems()}

+         return {key: self.serialize(value) for key, value in rv.items()}

  

      def _serialize_Result(self, o, **kwargs):

          result_data = {}
@@ -64,7 +64,7 @@ 

              href=url_for('api_v2.get_result', result_id=o.id, _external=True),

          )

  

-         return {key: self.serialize(value) for key, value in rv.iteritems()}

+         return {key: self.serialize(value) for key, value in rv.items()}

  

      def _serialize_ResultData(self, o, **kwargs):

          rv = dict(
@@ -72,4 +72,4 @@ 

              value=o.value,

          )

  

-         return {key: self.serialize(value) for key, value in rv.iteritems()}

+         return {key: self.serialize(value) for key, value in rv.items()}

file modified
+6 -1
@@ -28,6 +28,11 @@ 

  import resultsdb.messaging

  import resultsdb.controllers.api_v2 as api_v2

  

+ try:

+     basestring

+ except NameError:

+     basestring = (str, bytes)

+ 

  

  class AboutTime(object):

  
@@ -100,7 +105,7 @@ 

              'outcome': self.ref_result_outcome,

              'note': self.ref_result_note,

              'ref_url': self.ref_result_ref_url,

-             'data': dict(((key, [value] if isinstance(value, basestring) else value) for key, value in self.ref_result_data.iteritems())),

+             'data': dict(((key, [value] if isinstance(value, basestring) else value) for key, value in self.ref_result_data.items())),

              'href': self.ref_url_prefix + '/results/1',

          }

  

file modified
+12 -8
@@ -6,6 +6,10 @@ 

  import resultsdb.lib.helpers as helpers

  import resultsdb.messaging as messaging

  

+ try:

+     basestring

+ except NameError:

+     basestring = (str, bytes)

  

  class MyRequest(object):

  
@@ -102,7 +106,7 @@ 

          self.rq.url = 'URL'

          monkeypatch.setattr(apiv2, 'request', self.rq)

  

-         data, prev, next = apiv2.prev_next_urls(range(10), 1)

+         data, prev, next = apiv2.prev_next_urls(list(range(10)), 1)

          assert data == [0]

          assert prev is None

          assert next == 'URL?page=1'
@@ -111,7 +115,7 @@ 

          self.rq.url = 'URL?stuff=some'

          monkeypatch.setattr(apiv2, 'request', self.rq)

  

-         data, prev, next = apiv2.prev_next_urls(range(10), 1)

+         data, prev, next = apiv2.prev_next_urls(list(range(10)), 1)

          assert data == [0]

          assert prev is None

          assert next == 'URL?stuff=some&page=1'
@@ -120,7 +124,7 @@ 

          self.rq.url = 'URL?page=1&limit=1'

          monkeypatch.setattr(apiv2, 'request', self.rq)

  

-         data, prev, next = apiv2.prev_next_urls(range(10), 1)

+         data, prev, next = apiv2.prev_next_urls(list(range(10)), 1)

          assert data == [0]

          assert prev == 'URL?page=0&limit=1'

          assert next == 'URL?page=2&limit=1'
@@ -128,7 +132,7 @@ 

          self.rq.url = 'URL?limit=1&page=1'

          monkeypatch.setattr(apiv2, 'request', self.rq)

  

-         data, prev, next = apiv2.prev_next_urls(range(10), 1)

+         data, prev, next = apiv2.prev_next_urls(list(range(10)), 1)

          assert data == [0]

          assert prev == 'URL?limit=1&page=0'

          assert next == 'URL?limit=1&page=2'
@@ -136,7 +140,7 @@ 

          self.rq.url = 'URL&page=1&limit=1'

          monkeypatch.setattr(apiv2, 'request', self.rq)

  

-         data, prev, next = apiv2.prev_next_urls(range(10), 1)

+         data, prev, next = apiv2.prev_next_urls(list(range(10)), 1)

          assert data == [0]

          assert prev == 'URL&page=0&limit=1'

          assert next == 'URL&page=2&limit=1'
@@ -144,7 +148,7 @@ 

          self.rq.url = 'URL&limit=1&page=1'

          monkeypatch.setattr(apiv2, 'request', self.rq)

  

-         data, prev, next = apiv2.prev_next_urls(range(10), 1)

+         data, prev, next = apiv2.prev_next_urls(list(range(10)), 1)

          assert data == [0]

          assert prev == 'URL&limit=1&page=0'

          assert next == 'URL&limit=1&page=2'
@@ -184,7 +188,7 @@ 

              plugin = messaging.load_messaging_plugin('fedmsg', {})

          except KeyError as err:

              if "not found" in err.message:

-                 print """=============== HINT ===============

+                 print ("""=============== HINT ===============

  This exception can be caused by the fact, that you did not run

  `python setup.py develop` before executing the testsuite.

  
@@ -194,7 +198,7 @@ 

  

  If you ran `python setup.py develop` and are still seeing this error, then:

   - you might me missing the 'fedmsg' entrypoint in setup.py

-  - there can be an error in the plugin loading code"""

+  - there can be an error in the plugin loading code""")

              raise

          assert isinstance(plugin, messaging.FedmsgPlugin), "check whether `fedmsg` entrypoint in setup.py points to resultsdb.messaging:FedmsgPlugin"

  

file modified
+17 -1
@@ -1,7 +1,10 @@ 

  # This is a common file where different test suites/linters can be configured.

- # Phabricator uses this file when running `arc unit` or `arc lint`.

  

  [flake8]

+ # If you want to ignore a specific source code line, use '# noqa' comment. If

+ # you want to ignore the whole file, add '# flake8: noqa' comment. Read more

+ # documentation about flake8 at:

+ # https://flake8.readthedocs.org/

  max-line-length=99

  

  [pep8]
@@ -12,3 +15,16 @@ 

  python_functions=test should

  python_files=test_* functest_*

  addopts=--functional testing/ --cov resultsdb --cov-report=term-missing

+ 

+ [tox]

+ envlist = py27,py36

+ 

+ [testenv]

+ deps = -rrequirements.txt

+ commands = python -m pytest {posargs}

+ # setup.py has from utils import...

+ setenv = PYTHONPATH = {toxinidir}

+ # needs hawkey, koji

+ sitepackages = False

+ # tests read HOME

+ passenv = HOME