#16 Update Execdb to work with new buildbot
Closed 7 years ago by tflink. Opened 7 years ago by tflink.
taskotron/ tflink/execdb feature/new_buildbot  into  master

file modified
+80 -40
@@ -1,35 +1,70 @@ 

- #

- # Copyright 2013, Red Hat, Inc.

- #

- # This program is free software; you can redistribute it and/or modify

- # it under the terms of the GNU General Public License as published by

- # the Free Software Foundation; either version 2 of the License, or

- # (at your option) any later version.

- #

- # This program is distributed in the hope that it will be useful,

- # but WITHOUT ANY WARRANTY; without even the implied warranty of

- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

- # GNU General Public License for more details.

- #

- # You should have received a copy of the GNU General Public License along

- # with this program; if not, write to the Free Software Foundation, Inc.,

- # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

- #

- 

- # general variables

- VENV=test_env

- SRC=execdb

+ # Copyright 2018, Red Hat, Inc.

+ # License: GPL-2.0+ <http://spdx.org/licenses/GPL-2.0+>

+ # See the LICENSE file for more details on Licensing

+ 

+ #######################################################################

+ #      _____            _        _ _           _   _                  #

+ #     / ____|          | |      (_) |         | | (_)                 #

+ #    | |     ___  _ __ | |_ _ __ _| |__  _   _| |_ _ _ __   __ _      #

+ #    | |    / _ \| '_ \| __| '__| | '_ \| | | | __| | '_ \ / _` |     #

+ #    | |___| (_) | | | | |_| |  | | |_) | |_| | |_| | | | | (_| |     #

+ #     \_____\___/|_| |_|\__|_|  |_|_.__/ \__,_|\__|_|_| |_|\__, |     #

+ #                                                           __/ |     #

+ #     If you want to add/fix anything here, please create  |___/      #

+ #     PR at qa-make https://pagure.io/fedora-qa/qa-make               #

+ #                                                                     #

+ #######################################################################

+ 

+ # Allows to print variables, eg. make print-SRC

+ print-%  : ; @echo $* = $($*)

+ 

+ # Get variables from Makefile.cfg

+ SRC=$(shell grep -s SRC Makefile.cfg | sed 's/SRC=//')

+ VENV=$(shell grep -s VENV Makefile.cfg | sed 's/VENV=//')

+ MODULENAME=$(shell grep -s MODULENAME Makefile.cfg | sed 's/MODULENAME=//')

+ 

+ # Try to detect SRC in case we didn't find Makefile.cfg

+ ifeq ($(SRC),)

+ SRC=$(shell rpmspec -q --queryformat="%{NAME}\n" *.spec | head -1)

+ SPECNUM=$(shell ls -1 *.spec | wc -l)

+ ifneq ($(SPECNUM),1)

+ $(error Make sure you have either one spec file in the directory or configure it in Makefile.cfg)

+ endif

+ endif

  

  # Variables used for packaging

  SPECFILE=$(SRC).spec

  BASEARCH:=$(shell uname -i)

  DIST:=$(shell rpm --eval '%{dist}')

- VERSION:=$(shell rpmspec -q --queryformat="%{VERSION}\n" $(SPECFILE) | uniq)

- RELEASE:=$(subst $(DIST),,$(shell rpmspec -q --queryformat="%{RELEASE}\n" $(SPECFILE) | uniq))

+ TARGETVER:=$(shell lsb_release -r |grep -o '[0-9]*')

+ TARGETDIST:=fc$(TARGETVER)

+ VERSION:=$(shell rpmspec -q --queryformat="%{VERSION}\n" $(SPECFILE) | head -1)

+ RELEASE:=$(shell rpmspec -q --queryformat="%{RELEASE}\n" $(SPECFILE) | head -1 | sed 's/$(DIST)/\.$(TARGETDIST)/g')

  NVR:=$(SRC)-$(VERSION)-$(RELEASE)

  GITBRANCH:=$(shell git rev-parse --abbrev-ref HEAD)

- TARGETDIST:=fc25

- BUILDTARGET=fedora-25-x86_64

+ BUILDTARGET:=fedora-$(TARGETVER)-x86_64

+ KOJITARGET:=$(shell echo $(TARGETDIST) | sed 's/c//' | sed 's/el/epel-/')

+ 

+ .PHONY: update-makefile

+ update-makefile:

+ 	curl --fail https://pagure.io/fedora-qa/qa-make/raw/master/f/Makefile -o Makefile.new

+ 	if ! cmp Makefile Makefile.new ; then mv Makefile.new Makefile ; fi

+ 

+ .PHONY: test

+ .ONESHELL: test

+ test: $(VENV)

+ 	set -e

+ 	source $(VENV)/bin/activate;

+ 	TEST='true' py.test --cov-report=term-missing --cov $(MODULENAME);

+ 	deactivate

+ 

+ .PHONY: test-ci

+ .ONESHELL: test-ci

+ test-ci: $(VENV)

+ 	set -e

+ 	source $(VENV)/bin/activate

+ 	TEST='true' py.test --cov-report=xml --cov $(MODULENAME)

+ 	deactivate

  

  .PHONY: pylint

  pylint:
@@ -49,7 +84,7 @@ 

  .PHONY: clean

  clean:

  	rm -rf dist

- 	rm -rf execdb.egg-info

+ 	rm -rf $(SRC).egg-info

  	rm -rf build

  	rm -f pep8.out

  	rm -f pylint.out
@@ -60,20 +95,22 @@ 

  .PHONY: $(SRC)-$(VERSION).tar.gz

  $(SRC)-$(VERSION).tar.gz:

  	git archive $(GITBRANCH) --prefix=$(SRC)-$(VERSION)/ | gzip -c9 > $@

+ 	mkdir -p build/$(VERSION)-$(RELEASE)

+ 	mv $(SRC)-$(VERSION).tar.gz build/$(VERSION)-$(RELEASE)/

  

- .PHONY: mocksrpm

- mocksrpm: archive

- 	mock -r $(BUILDTARGET) --buildsrpm --spec $(SPECFILE) --sources .

- 	cp /var/lib/mock/$(BUILDTARGET)/result/$(NVR).$(TARGETDIST).src.rpm .

+ .PHONY: srpm

+ srpm: archive

+ 	mock -r $(BUILDTARGET) --buildsrpm --spec $(SPECFILE) --sources build/$(VERSION)-$(RELEASE)/

+ 	cp /var/lib/mock/$(BUILDTARGET)/result/$(NVR).src.rpm build/$(VERSION)-$(RELEASE)/

  

- .PHONY: mockbuild

- mockbuild: mocksrpm

- 	mock -r $(BUILDTARGET) --no-clean --rebuild $(NVR).$(TARGETDIST).src.rpm

- 	cp /var/lib/mock/$(BUILDTARGET)/result/$(NVR).$(TARGETDIST).noarch.rpm .

+ .PHONY: build

+ build: srpm

+ 	mock -r $(BUILDTARGET) --no-clean --rebuild build/$(VERSION)-$(RELEASE)/$(NVR).src.rpm

+ 	cp /var/lib/mock/$(BUILDTARGET)/result/*.rpm build/$(VERSION)-$(RELEASE)/

  

- #.PHONY: kojibuild

- #kojibuild: mocksrpm

- #	koji build --scratch dist-6E-epel-testing-candidate $(NVR).$(TARGETDIST).src.rpm

+ .PHONY: scratch

+ scratch: srpm

+ 	koji build --scratch $(KOJITARGET) build/$(VERSION)-$(RELEASE)/$(NVR).src.rpm

  

  .PHONY: nvr

  nvr:
@@ -87,7 +124,10 @@ 

  virtualenv: $(VENV)

  

  .PHONY: $(VENV)

+ .ONESHELL: $(VENV)

  $(VENV):

- 	virtualenv $(VENV)

- 	sh -c "set -e; . $(VENV)/bin/activate; pip install -r requirements.txt; \

- 	       deactivate"

+ 	virtualenv --system-site-packages $(VENV)

+ 	set -e

+ 	source $(VENV)/bin/activate

+ 	pip install -r requirements.txt

+ 	deactivate

file modified
+24 -24
@@ -1,6 +1,6 @@ 

  Name:           execdb

  # NOTE: if you update version, *make sure* to also update `execdb/__init__.py`

- Version:        0.0.10

+ Version:        0.0.11

  Release:        1%{?dist}

  Summary:        Execution status database for Taskotron

  
@@ -10,24 +10,18 @@ 

  

  BuildArch:      noarch

  

- %if 0%{?fedora} <= 27

- Requires:       python-alembic

- Requires:       python-flask

- Requires:       python-flask-sqlalchemy

- Requires:       python-flask-wtf

- Requires:       python-flask-login

- %else

- Requires:       python2-alembic

- Requires:       python2-flask

- Requires:       python2-flask-sqlalchemy

- Requires:       python2-flask-wtf

- Requires:       python2-flask-login

- %endif

- Requires:       python2-flask-restful

- Requires:       python2-six

- 

- BuildRequires:  python2-devel

- BuildRequires:  python2-setuptools

+ Requires:       python3-alembic

+ Requires:       python3-flask

+ Requires:       python3-flask-sqlalchemy

+ Requires:       python3-flask-wtf

+ Requires:       python3-flask-login

+ Requires:       python3-flask-restful

+ Requires:       python3-psycopg2

+ Requires:       python3-wtforms

+ Requires:       python3-six

+ 

+ BuildRequires:  python3-devel

+ BuildRequires:  python3-setuptools

  

  %description

  ExecDB is a database that stores the execution status of jobs running
@@ -43,10 +37,10 @@ 

  rm -f %{buildroot}%{_sysconfdir}/execdb/*.py{c,o}

  

  %build

- %py2_build

+ %py3_build

  

  %install

- %py2_install

+ %py3_install

  

  # apache and wsgi settings

  install -d %{buildroot}%{_datadir}/execdb/conf
@@ -64,16 +58,22 @@ 

  %files

  %doc README.md

  %license LICENSE

- %{python2_sitelib}/execdb

- %{python2_sitelib}/*.egg-info

+ %{python3_sitelib}/execdb

+ %{python3_sitelib}/*.egg-info

+ 

+ %{_bindir}/execdb

  

- %attr(755,root,root) %{_bindir}/execdb

  %dir %{_sysconfdir}/execdb

  %config(noreplace) %{_sysconfdir}/execdb/settings.py

+ 

  %dir %{_datadir}/execdb

  %{_datadir}/execdb/*

  

  %changelog

+ * Wed Nov 28 2018 Frantisek Zatloukal <fzatlouk@redhat.com> - 0.0.11-1

+ - Switch to Python 3

+ - Drop Fedora 27

+ 

  * Fri Apr 27 2018 Frantisek Zatloukal <fzatlouk@redhat.com> - 0.0.10-1

  - API

  - Fix show_job template to use right path for api

file modified
+6 -6
@@ -18,15 +18,15 @@ 

  #    Josef Skladanka <jskladan@redhat.com>

  

  from flask import Flask, render_template

- from flask.ext.login import LoginManager

- from flask.ext.sqlalchemy import SQLAlchemy

+ from flask_login import LoginManager

+ from flask_sqlalchemy import SQLAlchemy

  

  import logging

  import os

  

  

  # the version as used in setup.py

- __version__ = "0.0.10"

+ __version__ = "0.0.11"

  

  

  # Flask App
@@ -69,7 +69,7 @@ 

      root_logger.setLevel(logging.DEBUG)

  

      if app.config['STREAM_LOGGING']:

-         print "doing stream logging"

+         print("doing stream logging")

          stream_handler = logging.StreamHandler()

          stream_handler.setLevel(loglevel)

          stream_handler.setFormatter(formatter)
@@ -77,7 +77,7 @@ 

          app.logger.addHandler(stream_handler)

  

      if app.config['SYSLOG_LOGGING']:

-         print "doing syslog logging"

+         print("doing syslog logging")

          syslog_handler = logging.handlers.SysLogHandler(

              address='/dev/log',

              facility=logging.handlers.SysLogHandler.LOG_LOCAL4)
@@ -87,7 +87,7 @@ 

          app.logger.addHandler(syslog_handler)

  

      if app.config['FILE_LOGGING'] and app.config['LOGFILE']:

-         print "doing file logging to %s" % app.config['LOGFILE']

+         print("doing file logging to %s" % app.config['LOGFILE'])

          file_handler = logging.handlers.RotatingFileHandler(

              app.config['LOGFILE'],

              maxBytes=500000,

file modified
+19 -19
@@ -50,7 +50,7 @@ 

  

  

  def upgrade_db(*args):

-     print "Upgrading Database to Latest Revision"

+     print("Upgrading Database to Latest Revision")

      alembic_cfg = get_alembic_config()

      al_command.upgrade(alembic_cfg, "head")

  
@@ -64,20 +64,20 @@ 

      current_rev = context.get_current_revision()

  

      if not current_rev:

-         print "Initializing alembic"

-         print " - Setting the current version to the first revision"

+         print("Initializing alembic")

+         print(" - Setting the current version to the first revision")

          al_command.stamp(alembic_cfg, "1cefaba53e0")

      else:

-         print "Alembic already initialized"

+         print("Alembic already initialized")

  

  

  def initialize_db(destructive):

      alembic_cfg = get_alembic_config()

  

-     print "Initializing database"

+     print("Initializing database")

  

      if destructive:

-         print " - Dropping all tables"

+         print(" - Dropping all tables")

          db.drop_all()

  

      # check whether the table 'job' exists
@@ -85,9 +85,9 @@ 

      insp = reflection.Inspector.from_engine(db.engine)

      table_names = insp.get_table_names()

      if 'job' not in table_names and 'Job' not in table_names:

-         print " - Creating tables"

+         print(" - Creating tables")

          db.create_all()

-         print " - Stamping alembic's current version to 'head'"

+         print(" - Stamping alembic's current version to 'head'")

          al_command.stamp(alembic_cfg, "head")

  

      # check to see if the db has already been initialized by checking for an
@@ -95,18 +95,18 @@ 

      context = MigrationContext.configure(db.engine.connect())

      current_rev = context.get_current_revision()

      if current_rev:

-         print " - Database is currently at rev %s" % current_rev

+         print(" - Database is currently at rev %s" % current_rev)

          upgrade_db(destructive)

      else:

-         print "WARN: You need to have your db stamped with an alembic revision"

-         print "      Run 'init_alembic' sub-command first."

+         print("WARN: You need to have your db stamped with an alembic revision")

+         print("      Run 'init_alembic' sub-command first.")

  

  

  def mock_data(destructive):

-     print "Populating tables with mock-data"

+     print("Populating tables with mock-data")

  

      if destructive or not db.session.query(User).count():

-         print " - User"

+         print(" - User")

          data_users = [('admin', 'admin'), ('user', 'user')]

  

          for d in data_users:
@@ -115,7 +115,7 @@ 

  

          db.session.commit()

      else:

-         print " - skipped User"

+         print(" - skipped User")

  

  def mock_data_live(destructive):

      import time
@@ -168,9 +168,9 @@ 

      (options, args) = parser.parse_args()

  

      if len(args) != 1 or args[0] not in possible_commands:

-         print usage

-         print

-         print 'Please use one of the following commands: %s' % str(possible_commands)

+         print(usage)

+         print("\n")

+         print('Please use one of the following commands: %s' % str(possible_commands))

          sys.exit(1)

  

      command = {
@@ -181,8 +181,8 @@ 

          'init_alembic': init_alembic,

      }[args[0]]

      if not options.destructive:

-         print "Proceeding in non-destructive mode. To perform destructive "\

-               "steps use -d option."

+         print("Proceeding in non-destructive mode. To perform destructive "\

+               "steps use -d option.")

  

      command(options.destructive)

  

file modified
+1 -1
@@ -18,7 +18,7 @@ 

  #    Josef Skladanka <jskladan@redhat.com>

  

  from flask import Blueprint, render_template, flash, url_for

- from flask.ext.login import login_required

+ from flask_login import login_required

  

  

  admin = Blueprint('admin', __name__)

@@ -18,10 +18,13 @@ 

  #    Josef Skladanka <jskladan@redhat.com>

  

  from flask import Blueprint, render_template, redirect, flash, url_for, request

- from flask.ext.wtf import Form

+ try:

+     from flask_wtf import FlaskForm as Form

+ except ImportError:

+     from flask_wtf import Form

  from wtforms import TextField, PasswordField, HiddenField, RadioField

  from wtforms.validators import Required

- from flask.ext.login import login_user, logout_user, login_required, current_user, AnonymousUserMixin

+ from flask_login import login_user, logout_user, login_required, current_user, AnonymousUserMixin

  

  

  from execdb import app, login_manager

file modified
+45 -106
@@ -21,7 +21,7 @@ 

  import werkzeug.exceptions

  from sqlalchemy.orm import exc as orm_exc

  

- from flask.ext.restful import reqparse

+ from flask_restful import reqparse

  from werkzeug.exceptions import HTTPException

  from werkzeug.exceptions import BadRequest as JSONBadRequest

  
@@ -32,6 +32,7 @@ 

  

  import json

  import re

+ from datetime import datetime

  

  from pprint import pformat

  
@@ -121,6 +122,7 @@ 

          job = db.session.query(Job).filter(Job.uuid == uuid).one()

      except orm_exc.NoResultFound:

          return 'UUID not found', 404

+ 

      job.t_triggered = str(job.t_triggered).split('.')[0]

      return render_template('show_job.html',

                             job=job,
@@ -173,7 +175,6 @@ 

      return jsonify(steps)

  

  

- 

  @main.route('/jobs', methods=['POST'])

  def create_job():

      job = Job()
@@ -199,139 +200,77 @@ 

      return jsonify(retval), 201

  

  

- def process_event(data):

- 

-     def bb_convert_properties(prop):

-         """Converts list of lists to dict"""

-         return dict([(key, value) for key, value, _ in prop])

- 

-     # at the moment, we act just on these events

-     event = data['event']

-     known_events = ['changeAdded', 'buildStarted', 'stepStarted',

-                     'stepFinished', 'buildFinished']

+ def process_bb_status(status_data):

+     # grab uuid, build state, properties

+     build_properties = status_data['properties']

+     uuid = build_properties['uuid'][0]

+     build_complete = status_data['complete']

  

-     if event not in known_events:

-         # FIXME remove

-         if 'uuid' in json.dumps(data):

-             app.logger.debug("UUID found in %s", event)

- 

-         return 'Skipping event', 204

- 

-     # grab the 'properties' field

-     if event == 'changeAdded':

-         properties = bb_convert_properties(data['payload']['change']['properties'])

-     elif event in ['buildStarted', 'buildFinished']:

-         properties = bb_convert_properties(data['payload']['build']['properties'])

-     elif event in ['stepStarted', 'stepFinished']:

-         properties = bb_convert_properties(data['payload']['properties'])

- 

-     # abort if uuid is not provided

-     try:

-         uuid = properties['uuid']

-     except KeyError:

-         return 'Missing `uuid` field in properties', 400

- 

-     if uuid is None:

-         return 'UUID set to None', 400

+     app.logger.info("Processing data for job {} (complete: {})".format(uuid, build_complete))

  

+     # find job in db

      try:

          job = db.session.query(Job).filter(Job.uuid == uuid).one()

      except orm_exc.NoResultFound:

+         app.logger.info("UUID {} not found".format(uuid))

          return 'UUID not found', 400

  

-     if event == 'changeAdded':

-         # FIXME ?

-         pass

+     # if 'complete' is false, create job

+     if not build_complete:

+         app.logger.debug("%s -- adding job %s"% (uuid, status_data['number']))

  

-     elif event == 'buildStarted' and job.current_state == 'Triggered':

-         job.start()

+         job.t_build_started = datetime.fromtimestamp(status_data['started_at'])

  

-         job.taskname = properties['taskname']

-         job.item = properties['item']

-         job.item_type = properties['item_type']

-         job.arch = properties['arch']

-         job.slavename = properties['slavename']

+         job.taskname = build_properties['taskname'][0]

+         job.item = build_properties['item'][0]

+         job.item_type = build_properties['item_type'][0]

+         job.arch = build_properties['arch'][0]

+         job.slavename = build_properties['slavename'][0]

          job.link_build_log = '/builders/%s/builds/%s' % (

-             data['payload']['build']['builderName'],

-             properties['buildnumber'])

+             status_data['buildrequest']['builderid'],

+             status_data['number'])

  

          db.session.add(job)

  

-         # add 'empty' steps for the build (since we know them already)

- #        app.logger.debug("%s: %s" % (uuid, data['payload']['build']['steps']))

- #        app.logger.debug("%s - Build Started" % uuid)

-         for step_info in data['payload']['build']['steps']:

-             #            app.logger.debug("%s -- adding step %s"% (uuid, step_info['name']))

-             step = BuildStep(name=step_info['name'])

-             step.job = job

-             db.session.add(step)

- 

          db.session.commit()

  

-     elif event == 'stepStarted' and job.current_state == 'Running':

-         step_info = data['payload']['step']

- #        app.logger.debug("%s - Step Started -  %s"% (uuid, step_info['name']))

-         try:

-             step = job.get_build_step(step_info['name'])

-         except KeyError:

-             app.logger.debug("Job %s had missing step %s", job.uuid, step_info)

-             step = BuildStep(name=step_info['name'])

-             step.job = job

- 

-         step.start()

-         step.status = 'INPROGRESS'

-         step.data = json.dumps(data['payload'])  # FIXME - store sensible subset of data

-         db.session.add(step)

-         db.session.commit()

- #        app.logger.debug("%s - Step Started -  %s - written to db"% (uuid, step_info['name']))

+     # if 'complete' is true, fill in buildsteps, finish job

+     else:

+         # add the completed time and state

  

-     elif event == 'stepFinished' and job.current_state == 'Running':

-         step_info = data['payload']['step']

- #        app.logger.debug("%s - Step Finished -  %s"% (uuid, step_info['name']))

-         try:

-             step = job.get_build_step(step_info['name'])

-         except KeyError:

-             return 'StepFinished received for non-existing step: %r' % step_info['name'], 400

+         job.t_build_ended = datetime.fromtimestamp(status_data['complete_at'])

+         # add the build steps

+         for step_info in status_data['steps']:

  

-         step.finish()

+             app.logger.debug("%s -- adding step %s"% (uuid, step_info['name']))

+             app.logger.debug("%s -- adding step %s"% (uuid, step_info['name']))

+             step = BuildStep(name=step_info['name'])

+             step.job = job

+             step.started_at = datetime.fromtimestamp(step_info['started_at'])

+             step.finished_at = datetime.fromtimestamp(step_info['complete_at'])

+             step.data = step_info['state_string']

  

-         step.status = 'OK'

-         # results key is only present for non-ok results

-         if 'results' in step_info.keys():

-             step.status = 'NOT OK'

-         step.data = json.dumps(data['payload'])  # FIXME - store sensible subset of data

+             # there doesn't seem to be a really reasonable way to tell if a step has failed but

+             # this should work well enough for now

+             if 'failed' in step_info['state_string']:

+                 step.status = 'NOT OK'

+             else:

+                 step.status = 'OK'

  

-         db.session.add(step)

-         db.session.commit()

- #        app.logger.debug("%s - Step Finished -  %s - written to db" % (uuid, step_info['name']))

+             db.session.add(step)

  

-     elif event == 'buildFinished' and job.current_state == 'Running':

-         job.finish()

-         db.session.add(job)

          db.session.commit()

- #        app.logger.debug("%s - Build Finished " % uuid)

  

  

- @main.route('/buildbottest', methods=['POST'])

+ @main.route('/buildbot', methods=['POST'])

  def bb_push():

      """

      Receives the post-push notifications from buildbot and fills in

      the steps for the job.

      """

-     # data are embedded in form field 'packets'

-     data = request.form

-     try:

-         data = request.form['packets']

-     except werkzeug.exceptions.BadRequestKeyError:

-         return 'Field `packets` missing in request form.', 400

-     data = json.loads(data)

- 

-     # app.logger.debug(pformat(data))

  

-     # multiple messages may be present in one 'packet'

-     for entry in data:

-         process_event(entry)

- #        app.logger.debug("%s %s, %s", entry['id'], entry['event'], process_event(entry))

+     data = request.get_json()

+     process_bb_status(data)

  

      # plain 200 code needs to be returned - otherwise buildbot is

      # endlessly trying to re-send the message.

file modified
+1 -1
@@ -18,7 +18,7 @@ 

  #    Josef Skladanka <jskladan@redhat.com>

  

  from execdb import db

- from flask.ext.login import UserMixin

+ from flask_login import UserMixin

  from werkzeug.security import generate_password_hash, check_password_hash

  

  

file modified
+2 -2
@@ -8,7 +8,7 @@ 

      <th>Item</th>

      <th>State</th>

      <th>Build Steps</th>

-     <th>Moar</th>

+     <th></th>

    </thead>

    <tbody>

  {% for job in jobs -%}
@@ -39,7 +39,7 @@ 

      </a>

    </td>

    <td>

-     <a href="{{ url_for('main.show_job', uuid=job.uuid)}}">Detail</a>

+     <a href="{{ url_for('main.show_job', uuid=job.uuid)}}">Details</a>

    </td>

  </tr>

  {% endfor -%}

file modified
+2 -2
@@ -2,7 +2,7 @@ 

  # this is a simple script to aid in the setup of a new db

  

  # init db

- python run_cli.py init_db ${@}

+ python3 run_cli.py init_db ${@}

  

  # insert mock data

- python run_cli.py mock_data ${@}

+ python3 run_cli.py mock_data ${@}

file modified
+1 -1
@@ -1,4 +1,4 @@ 

- #!/usr/bin/python

+ #!/usr/bin/python3

  #

  # Copyright 2014, Red Hat, Inc

  #

file modified
+1 -1
@@ -1,4 +1,4 @@ 

- #!/usr/bin/python

+ #!/usr/bin/python3

  #

  # runapp.py - script to facilitate running the execdb app from the CLI

  #

The way that buildbot sends status has changed with newer versions. This is the minimal change to execdb so that it works with the new status push code

Not that it's necessarily an issue, but why did you decide to rewrite the t_triggered? I'd rather keep the actual "this is the time the job triggered", or have the t_triggered removed altogether, if the information is not relevant/interesting/important.

Other than the t_triggered nitpick, this looks OK to my eyes.

1 new commit added

  • removing extra reset of t_triggered
7 years ago

1 new commit added

  • removing correct extra reset of t_triggered
7 years ago

Bah, I set up this PR against the wrong branch and I'm not seeing any way to change this. I'll close this and open a new PR

Pull-Request has been closed by tflink

7 years ago