#1318 Action priorities
Merged 4 years ago by praiskup. Opened 4 years ago by frostyx.
copr/ frostyx/copr action-priorities  into  master

file modified
+55 -41
@@ -21,13 +21,15 @@ 

  

  from copr_common.rpm import splitFilename

  from copr_common.enums import ActionResult

+ 

+ from copr_backend.worker_manager import WorkerManager, QueueTask

+ 

  from .sign import create_user_keys, CoprKeygenRequestError

  from .exceptions import CreateRepoError, CoprSignError, FrontendClientException

  from .helpers import (get_redis_logger, silent_remove, ensure_dir_exists,

                        get_chroot_arch, cmd_debug, format_filename,

                        uses_devel_repo, call_copr_repo, build_chroot_log_name)

  from .sign import sign_rpms_in_dir, unsign_rpms_in_dir, get_pubkey

- from copr_backend.worker_manager import WorkerManager

  

  from .vm_manage.manager import VmManager

  
@@ -68,6 +70,7 @@ 

              ActionType.FORK: Fork,

              ActionType.BUILD_MODULE: BuildModule,

              ActionType.CANCEL_BUILD: CancelBuild,

+             ActionType.DELETE: Delete,

          }.get(action_type, None)

  

          if action_type == ActionType.DELETE:
@@ -80,7 +83,7 @@ 

              }.get(object_type, action_class)

  

          if not action_class:

-             raise ValueError("Unexpected action type")

+             raise ValueError("Unexpected action type: {}".format(action))

          return action_class

  

      # TODO: get more form opts, decrease number of parameters
@@ -227,7 +230,42 @@ 

          return result

  

  

- class DeleteProject(Action):

+ class Delete(Action):

+     """

+     Abstract class for all other Delete* classes.

+     """

+     # pylint: disable=abstract-method

+     def _handle_delete_builds(self, ownername, projectname, project_dirname,

+                               chroot_builddirs, build_ids):

+         """ call /bin/copr-repo --delete """

+         devel = uses_devel_repo(self.front_url, ownername, projectname)

+         result = ActionResult.SUCCESS

+         for chroot, subdirs in chroot_builddirs.items():

+             chroot_path = os.path.join(self.destdir, ownername, project_dirname,

+                                        chroot)

+             if not os.path.exists(chroot_path):

+                 self.log.error("%s chroot path doesn't exist", chroot_path)

+                 result = ActionResult.FAILURE

+                 continue

+ 

+             if not call_copr_repo(chroot_path, delete=subdirs, devel=devel):

+                 result = ActionResult.FAILURE

+ 

+             for build_id in build_ids or []:

+                 log_paths = [

+                     os.path.join(chroot_path, build_chroot_log_name(build_id)),

+                     # we used to create those before

+                     os.path.join(chroot_path, 'build-{}.rsync.log'.format(build_id)),

+                     os.path.join(chroot_path, 'build-{}.log'.format(build_id))]

+                 for log_path in log_paths:

+                     try:

+                         os.unlink(log_path)

+                     except OSError:

+                         self.log.debug("can't remove %s", log_path)

+         return result

+ 

+ 

+ class DeleteProject(Delete):

      def run(self):

          self.log.debug("Action delete copr")

          result = ActionResult.SUCCESS
@@ -285,7 +323,7 @@ 

          return result

  

  

- class DeleteMultipleBuilds(Action):

+ class DeleteMultipleBuilds(Delete):

      def run(self):

          self.log.debug("Action delete multiple builds.")

  
@@ -313,38 +351,8 @@ 

                  result = ActionResult.FAILURE

          return result

  

-     def _handle_delete_builds(self, ownername, projectname, project_dirname,

-                               chroot_builddirs, build_ids):

-         """ call /bin/copr-repo --delete """

-         devel = uses_devel_repo(self.front_url, ownername, projectname)

-         result = ActionResult.SUCCESS

-         for chroot, subdirs in chroot_builddirs.items():

-             chroot_path = os.path.join(self.destdir, ownername, project_dirname,

-                                        chroot)

-             if not os.path.exists(chroot_path):

-                 self.log.error("%s chroot path doesn't exist", chroot_path)

-                 result = ActionResult.FAILURE

-                 continue

  

-             if not call_copr_repo(chroot_path, delete=subdirs, devel=devel):

-                 result = ActionResult.FAILURE

- 

-             for build_id in build_ids or []:

-                 log_paths = [

-                     os.path.join(chroot_path, build_chroot_log_name(build_id)),

-                     # we used to create those before

-                     os.path.join(chroot_path, 'build-{}.rsync.log'.format(build_id)),

-                     os.path.join(chroot_path, 'build-{}.log'.format(build_id))]

-                 for log_path in log_paths:

-                     try:

-                         os.unlink(log_path)

-                     except OSError:

-                         self.log.debug("can't remove %s", log_path)

- 

-         return result

- 

- 

- class DeleteBuild(DeleteMultipleBuilds):

+ class DeleteBuild(Delete):

      def run(self):

          self.log.info("Action delete build.")

  
@@ -372,7 +380,7 @@ 

                                            build_ids)

  

  

- class DeleteChroot(Action):

+ class DeleteChroot(Delete):

      def run(self):

          self.log.info("Action delete project chroot.")

  
@@ -553,11 +561,17 @@ 

      CANCEL_BUILD = 10

  

  

- class ActionQueueTask():

-     def __init__(self, id):

-         self.id = id

-     def __repr__(self):

-         return str(self.id)

+ class ActionQueueTask(QueueTask):

+     def __init__(self, task):

+         self.task = task

+ 

+     @property

+     def id(self):

+         return self.task.data["id"]

+ 

+     @property

+     def frontend_priority(self):

+         return self.task.data.get("priority", 0)

  

  

  class ActionWorkerManager(WorkerManager):

@@ -7,7 +7,7 @@ 

  from copr_backend.frontend import FrontendClient

  from copr_backend.exceptions import FrontendClientException

  

- from ..actions import ActionWorkerManager, ActionQueueTask

+ from ..actions import ActionWorkerManager, ActionQueueTask, Action

  from ..helpers import get_redis_logger, get_redis_connection

  

  
@@ -44,7 +44,8 @@ 

                  error)

              return []

  

-         return [ActionQueueTask(action['id']) for action in raw_actions]

+         return [ActionQueueTask(Action(self.opts, action, log=self.log))

+                 for action in raw_actions]

  

  

      def run(self):

@@ -41,6 +41,27 @@ 

          raise KeyError('pop from an empty priority queue')

  

  

+ class QueueTask:

+     def __repr__(self):

+         return str(self.id)

+ 

+     @property

+     def id(self):

+         raise NotImplementedError

+ 

+     @property

+     def priority(self):

+         return sum([self.frontend_priority, self.backend_priority])

+ 

+     @property

+     def frontend_priority(self):

+         return 0

+ 

+     @property

+     def backend_priority(self):

+         return 0

+ 

+ 

  class WorkerManager():

      """

      Automatically process 'self.tasks' priority queue, and start background jobs
@@ -148,7 +169,7 @@ 

              return

  

          self.log.info("Adding task %s to queue", task_id)

-         self.tasks.add_task(task)

+         self.tasks.add_task(task, task.priority)

  

      def worker_ids(self):

          """

@@ -7,13 +7,16 @@ 

  import logging

  import subprocess

  from unittest.mock import MagicMock, patch

- from munch import Munch

  

- WORKDIR = os.path.dirname(__file__)

+ import pytest

+ from munch import Munch

+ from copr_common.enums import DefaultActionPriorityEnum

  

  from copr_backend.helpers import get_redis_connection

- from copr_backend.actions import ActionWorkerManager, ActionQueueTask

- from copr_backend.worker_manager import JobQueue

+ from copr_backend.actions import ActionWorkerManager, ActionQueueTask, Action

+ from copr_backend.worker_manager import JobQueue, WorkerManager, QueueTask

+ 

+ WORKDIR = os.path.dirname(__file__)

  

  REDIS_OPTS = Munch(

      redis_db=9,
@@ -24,7 +27,8 @@ 

  log.setLevel(logging.DEBUG)

  

  

- class ToyWorkerManager(ActionWorkerManager):

+ class ToyWorkerManager(WorkerManager):

+     # pylint: disable=abstract-method

      process_counter = 0

      task_sleep = 0

  
@@ -48,6 +52,19 @@ 

          subprocess.check_call(list(map(str, cmd)), env=environ)

  

  

+ class ToyActionWorkerManager(ToyWorkerManager, ActionWorkerManager):

+     pass

+ 

+ 

+ class ToyQueueTask(QueueTask):

+     def __init__(self, _id):

+         self._id = _id

+ 

+     @property

+     def id(self):

+         return self._id

+ 

+ 

  class TestPrioQueue(object):

      def setup_method(self, method):

          raw_actions = [0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 8, 9]
@@ -87,30 +104,28 @@ 

          assert self.get_tasks() == [6, 7, 9, 0, 1, 2, 3, 4, 5, 8]

  

  

- class TestWorkerManager(object):

+ class BaseTestWorkerManager:

      redis = None

      worker_manager = None

  

      def setup_method(self, method):

+         self.setup_redis()

+         self.setup_worker_manager()

+         self.setup_tasks()

+ 

+     def setup_redis(self):

          self.redis = get_redis_connection(REDIS_OPTS)

          self.redis.flushall()

  

+     def setup_worker_manager(self):

          self.worker_manager = ToyWorkerManager(

              redis_connection=self.redis,

              max_workers=5,

              log=log)

  

-         prefix = 'toy:' + str(time.time())

-         self.worker_manager.worker_prefix = prefix

-         prefix += ':'

-         self.wprefix = prefix

-         self.w0 = prefix + '0'

-         self.w1 = prefix + '1'

- 

-         self.worker_manager.frontend_client = MagicMock()

- 

+     def setup_tasks(self):

          raw_actions = [0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 8, 9]

-         actions = [ActionQueueTask(action) for action in raw_actions]

+         actions = [ToyQueueTask(action) for action in raw_actions]

          for action in actions:

              self.worker_manager.add_task(action)

  
@@ -127,6 +142,8 @@ 

                  break

          return count

  

+ 

+ class TestWorkerManager(BaseTestWorkerManager):

      def test_worker_starts(self):

          task = self.worker_manager.tasks.pop_task()

          assert task.id == 0
@@ -137,6 +154,29 @@ 

      def test_number_of_tasks(self):

          assert self.remaining_tasks() == 10

  

+ 

+ class TestActionWorkerManager(BaseTestWorkerManager):

+     # pylint: disable=attribute-defined-outside-init

+     def setup_worker_manager(self):

+         self.worker_manager = ToyActionWorkerManager(

+             redis_connection=self.redis,

+             max_workers=5,

+             log=log)

+ 

+         prefix = 'toy:' + str(time.time())

+         self.worker_manager.worker_prefix = prefix

+         prefix += ':'

+         self.wprefix = prefix

+         self.w0 = prefix + '0'

+         self.w1 = prefix + '1'

+         self.worker_manager.frontend_client = MagicMock()

+ 

+     def setup_tasks(self):

+         raw_actions = [0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 8, 9]

+         for action in raw_actions:

+             action = ToyQueueTask(action)

+             self.worker_manager.add_task(action)

+ 

      def test_run_starts_the_workers(self):

          self.worker_manager.run(timeout=0.0001)

          workers = self.workers()
@@ -211,7 +251,7 @@ 

          self.worker_manager.run(timeout=0.0001)

  

          queue = copy.deepcopy(self.worker_manager.tasks)

-         self.worker_manager.add_task(ActionQueueTask(0))

+         self.worker_manager.add_task(ToyQueueTask(0))

          assert len(queue.prio_queue) == len(self.worker_manager.tasks.prio_queue)

          assert ('root', logging.WARNING, "Task 0 has worker, skipped") in caplog.record_tuples

  
@@ -222,7 +262,7 @@ 

  

          # only one task, but it will take some time.

          self.worker_manager.task_sleep = 0.5

-         self.worker_manager.add_task(ActionQueueTask(0))

+         self.worker_manager.add_task(ToyQueueTask(0))

  

          # start the worker

          self.worker_manager.run(timeout=0.0001) # start them task
@@ -250,7 +290,7 @@ 

          """

          self.worker_manager.task_sleep = 3 # assure task takes some time

          self.worker_manager.clean_tasks()

-         self.worker_manager.add_task(ActionQueueTask(0))

+         self.worker_manager.add_task(ToyQueueTask(0))

          self.worker_manager.worker_timeout_start = 1

          self.worker_manager.worker_timeout_deadcheck = 1.5

  
@@ -283,3 +323,87 @@ 

          self.worker_manager.run(timeout=0.0001)

  

          assert len(self.worker_manager.worker_ids()) == 0

+ 

+ 

+ class TestActionWorkerManagerPriorities(BaseTestWorkerManager):

+     def setup_worker_manager(self):

+         self.worker_manager = ToyActionWorkerManager(

+             redis_connection=self.redis,

+             max_workers=5,

+             log=log)

+ 

+     def setup_tasks(self):

+         pass

+ 

+     def pop(self):

+         return self.worker_manager.tasks.pop_task()

+ 

+     def test_actions_priorities(self):

+         frontend_data = [

+             {"id": 10, "priority": DefaultActionPriorityEnum("delete")},

+             {"id": 11, "priority": DefaultActionPriorityEnum("delete")},

+             {"id": 12, "priority": DefaultActionPriorityEnum("createrepo")},

+             {"id": 13, "priority": DefaultActionPriorityEnum("update_comps")},

+             {"id": 14, "priority": DefaultActionPriorityEnum("rawhide_to_release")},

+             {"id": 15, "priority": DefaultActionPriorityEnum("rawhide_to_release")},

+             {"id": 16, "priority": DefaultActionPriorityEnum("build_module")},

+             {"id": 17, "priority": DefaultActionPriorityEnum("cancel_build")},

+             {"id": 18, "priority": DefaultActionPriorityEnum("fork")},

+             {"id": 19, "priority": DefaultActionPriorityEnum("gen_gpg_key")},

+             {"id": 20, "priority": 0},

+         ]

+         for action in frontend_data:

+             queue_task = ActionQueueTask(Action(MagicMock(), action, log=log))

+             self.worker_manager.add_task(queue_task)

+ 

+         assert self.pop().id == 19

+         assert self.pop().id == 17

+ 

+         # These have the same priority and the queue is FIFO

+         assert self.pop().id == 12

+         assert self.pop().id == 13

+         assert self.pop().id == 16

+         assert self.pop().id == 18

+         assert self.pop().id == 20

+ 

+         assert self.pop().id == 10

+         assert self.pop().id == 11

+ 

+         assert self.pop().id == 14

+         assert self.pop().id == 15

+ 

+         # Tasks queue is empty now

+         with pytest.raises(KeyError) as ex:

+             self.pop()

+         assert "empty" in str(ex)

+ 

+     def test_backend_priority_adjustments(self):

+         """

+         Test that backend still can adjust or ultimately override priorities

+         """

+         frontend_data = [

+             {"id": 10, "priority": DefaultActionPriorityEnum("delete")},

+             {"id": 11, "priority": DefaultActionPriorityEnum("delete")},

+             {"id": 12, "priority": DefaultActionPriorityEnum("createrepo")},

+             {"id": 13, "priority": DefaultActionPriorityEnum("gen_gpg_key")},

+             {"id": 14, "priority": DefaultActionPriorityEnum("fork")},

+         ]

+         actions = [ActionQueueTask(Action(MagicMock(), action, log)) for action in frontend_data]

+ 

+         # QueueTask.backend_priority is a property which should be

+         # overriden when in the class descendants.

+         delattr(QueueTask, "backend_priority")

+         actions[0].backend_priority = 0

+         actions[1].backend_priority = -999

+         actions[2].backend_priority = 999

+         actions[3].backend_priority = 0

+         actions[4].backend_priority = -900

+ 

+         for action in actions:

+             self.worker_manager.add_task(action)

+ 

+         assert self.pop().id == 11

+         assert self.pop().id == 14

+         assert self.pop().id == 13

+         assert self.pop().id == 10

+         assert self.pop().id == 12

@@ -44,6 +44,33 @@ 

      }

  

  

+ class DefaultActionPriorityEnum(with_metaclass(EnumType, object)):

+     """

+     The higher the 'priority' is, the later the task is taken.

+     Keep actions priority in range -100 to 100

+     """

+     vals = {

+         "gen_gpg_key": -70,

+         "cancel_build": -10,

+         "createrepo": 0,

+         "fork": 0,

+         "build_module": 0,

+         "update_comps": 0,

+         "delete": 60,

+         "rawhide_to_release": 70,

+     }

+ 

+ 

+ class ActionPriorityEnum(with_metaclass(EnumType, object)):

+     """

+     Naming/assigning the values is a little bit tricky because

+     how the current implementation works (i.e. it is inverted).

+     However, from the most abstract point of view,

+     "highest priority" means "do this as soon as possible"

+     """

+     vals = {"highest": -99, "lowest": 99}

+ 

+ 

  class BackendResultEnum(with_metaclass(EnumType, object)):

      vals = {"waiting": 0, "success": 1, "failure": 2}

  

@@ -0,0 +1,20 @@ 

+ """

+ Add priority column for actions

+ 

+ Revision ID: 58eab04e5afc

+ Revises: 67ba91dda3e3

+ Create Date: 2020-04-07 11:02:52.871921

+ """

+ 

+ import sqlalchemy as sa

+ from alembic import op

+ 

+ 

+ revision = '58eab04e5afc'

+ down_revision = '67ba91dda3e3'

+ 

+ def upgrade():

+     op.add_column('action', sa.Column('priority', sa.Integer(), nullable=True))

+ 

+ def downgrade():

+     op.drop_column('action', 'priority')

@@ -0,0 +1,15 @@ 

+ import functools

+ import sqlalchemy

+ from coprs import models

+ 

+ 

+ def deprioritize_actions(f):

+     @functools.wraps(f)

+     def wrapper(*args, **kwargs):

+         sqlalchemy.event.listen(models.Action, "before_insert", _deprioritize_action)

+         return f(*args, **kwargs)

+     return wrapper

+ 

+ 

+ def _deprioritize_action(mapper, connection, target):

+     target.priority = 99

@@ -1,9 +1,11 @@ 

  import click

+ from . import deprioritize_actions

  from coprs import db_session_scope

  from coprs.logic.complex_logic import ComplexLogic

  

  

  @click.command()

+ @deprioritize_actions

  def clean_expired_projects():

      """

      Clean all the expired temporary projects.  This command is meant to be

@@ -1,9 +1,11 @@ 

  import click

+ from . import deprioritize_actions

  from coprs import db_session_scope

  from coprs.logic.builds_logic import BuildsLogic

  

  

  @click.command()

+ @deprioritize_actions

  def clean_old_builds():

      """

      This garbage collects all builds which are "obsoleted" per user

@@ -73,6 +73,7 @@ 

                                              BackendResultEnum("failure")]:

              action.ended_on = time.time()

          db.session.add(action)

+         return action

  

      @classmethod

      def send_createrepo(cls, copr, dirnames=None):
@@ -100,6 +101,7 @@ 

              created_on=int(time.time()),

          )

          db.session.add(action)

+         return action

  

      @classmethod

      def send_delete_copr(cls, copr):
@@ -113,6 +115,7 @@ 

                                 data=json.dumps(data_dict),

                                 created_on=int(time.time()))

          db.session.add(action)

+         return action

  

      @classmethod

      def get_chroot_builddirs(cls, build):
@@ -164,6 +167,7 @@ 

              created_on=int(time.time())

          )

          db.session.add(action)

+         return action

  

      @classmethod

      def send_delete_multiple_builds(cls, builds):
@@ -209,11 +213,15 @@ 

              created_on=int(time.time())

          )

          db.session.add(action)

+         return action

  

      @classmethod

      def send_cancel_build(cls, build):

          """ Schedules build cancel action

          :type build: models.Build

+ 

+         @TODO refactor this method generate only one action and then

+               add a return statement

          """

          for chroot in build.build_chroots:

              if chroot.state != "running":
@@ -253,6 +261,7 @@ 

              created_on=int(time.time())

          )

          db.session.add(action)

+         return action

  

      @classmethod

      def send_create_gpg_key(cls, copr):
@@ -272,6 +281,7 @@ 

              created_on=int(time.time()),

          )

          db.session.add(action)

+         return action

  

      @classmethod

      def send_rawhide_to_release(cls, data):
@@ -282,6 +292,7 @@ 

              created_on=int(time.time()),

          )

          db.session.add(action)

+         return action

  

      @classmethod

      def send_fork_copr(cls, src, dst, builds_map):
@@ -300,6 +311,7 @@ 

              created_on=int(time.time()),

          )

          db.session.add(action)

+         return action

  

      @classmethod

      def send_build_module(cls, copr, module):
@@ -324,6 +336,7 @@ 

              created_on=int(time.time()),

          )

          db.session.add(action)

+         return action

  

      @classmethod

      def send_delete_chroot(cls, copr_chroot):
@@ -346,6 +359,7 @@ 

              created_on=int(time.time())

          )

          db.session.add(action)

+         return action

  

      @classmethod

      def cache_action_graph_data(cls, type, time, waiting, success, failure):
@@ -364,7 +378,8 @@ 

                  failed = failure

              )

              db.session.add(cached_data)

-             db.session.commit()

+             db.session.commit()  # @FIXME We should not commit here

+             return action

          except IntegrityError: # other process already calculated the graph data and cached it

              db.session.rollback()

  

@@ -10,7 +10,7 @@ 

  from sqlalchemy.orm.exc import NoResultFound

  from sqlalchemy.orm.attributes import get_history

  

- from copr_common.enums import ActionTypeEnum, BackendResultEnum

+ from copr_common.enums import ActionTypeEnum, BackendResultEnum, ActionPriorityEnum

  from coprs import db

  from coprs import exceptions

  from coprs import helpers
@@ -627,7 +627,8 @@ 

              db.session.add(

                  models.CoprChroot(copr=copr, mock_chroot=mock_chroot))

  

-         ActionsLogic.send_createrepo(copr)

+         action = ActionsLogic.send_createrepo(copr)

+         action.priority = ActionPriorityEnum("highest")

  

      @classmethod

      def create_chroot(cls, user, copr, mock_chroot, buildroot_pkgs=None, repos=None, comps=None, comps_name=None,

@@ -16,7 +16,8 @@ 

  

  from flask import url_for

  

- from copr_common.enums import ActionTypeEnum, BackendResultEnum, FailTypeEnum, ModuleStatusEnum, StatusEnum

+ from copr_common.enums import (ActionTypeEnum, BackendResultEnum, FailTypeEnum,

+                                ModuleStatusEnum, StatusEnum, DefaultActionPriorityEnum)

  from coprs import db

  from coprs import helpers

  from coprs import app
@@ -1484,6 +1485,9 @@ 

      # old and new values of the changed property

      old_value = db.Column(db.String(255))

      new_value = db.Column(db.String(255))

+     # the higher the 'priority' is, the later the task is taken.

+     # Keep actions priority in range -100 to 100

+     priority = db.Column(db.Integer, nullable=True, default=0)

      # additional data

      data = db.Column(db.Text)

      # result of the action, see BackendResultEnum
@@ -1521,6 +1525,11 @@ 

              d["data"] = json.dumps(data)

          return d

  

+     @property

+     def default_priority(self):

+         action_type_str = ActionTypeEnum(self.action_type)

+         return DefaultActionPriorityEnum.vals.get(action_type_str, 0)

+ 

  

  class Krb5Login(db.Model, helpers.Serializer):

      """

@@ -185,9 +185,14 @@ 

  @backend_ns.route("/pending-actions/")

  def pending_actions():

      'get the list of actions backand should take care of'

-     actions = actions_logic.ActionsLogic.get_waiting()

-     data = [{'id': action.id} for action in actions]

-     return flask.jsonify(data)

+     data = []

+     for action in actions_logic.ActionsLogic.get_waiting():

+         data.append({

+             'id': action.id,

+             'priority': action.priority or action.default_priority,

+         })

+     return flask.json.dumps(data)

+ 

  

  

  @backend_ns.route("/action/<int:action_id>/")

@@ -2,7 +2,7 @@ 

  

  from unittest import mock, skip

  

- from copr_common.enums import BackendResultEnum, StatusEnum

+ from copr_common.enums import BackendResultEnum, StatusEnum, DefaultActionPriorityEnum

  from tests.coprs_test_case import CoprsTestCase, new_app_context

  from coprs.logic.builds_logic import BuildsLogic

  
@@ -271,7 +271,10 @@ 

      def test_pending_actions_list(self, f_users, f_coprs, f_actions, f_db):

          r = self.tc.get("/backend/pending-actions/", headers=self.auth_header)

          actions = json.loads(r.data.decode("utf-8"))

-         assert actions == [{'id': 1}, {'id': 2}]

+         assert actions == [

+             {'id': 1, 'priority': DefaultActionPriorityEnum("delete")},

+             {'id': 2, 'priority': DefaultActionPriorityEnum("cancel_build")}

+         ]

  

          self.delete_action.result = BackendResultEnum("success")

          self.db.session.add(self.delete_action)

@@ -7,7 +7,7 @@ 

  

  from sqlalchemy import desc

  

- from copr_common.enums import ActionTypeEnum

+ from copr_common.enums import ActionTypeEnum, ActionPriorityEnum

  from coprs import app, cache, models

  

  from coprs.logic.coprs_logic import CoprsLogic, CoprDirsLogic
@@ -891,3 +891,25 @@ 

              r2 = self.tc.get(url.format(chroot="rhelbeta-8", **kwargs))

              assert "baseurl=https://foo/results/user1/foocopr/rhelbeta-8-$basearch/" in r1.data.decode("utf-8")

              assert "baseurl=https://foo/results/user1/foocopr/rhelbeta-8-$basearch/" in r2.data.decode("utf-8")

+ 

+ 

+ class TestCoprActionsGeneration(CoprsTestCase):

+ 

+     @TransactionDecorator("u1")

+     def test_createrepo_priority(self, f_users, f_mock_chroots, f_db):

+         # When creating a project the initial createrepo action should be prioritized

+         self.test_client.post("/coprs/{0}/new/".format(self.u1.name),

+             data={"name": "foo",

+                   "fedora-rawhide-i386": "y",

+                   "arches": ["i386"]})

+ 

+         copr = CoprsLogic.get(self.u1.username, "foo").one()

+         actions = ActionsLogic.get_many(ActionTypeEnum("createrepo")).all()

+         assert len(actions) == 1

+         assert actions[0].priority == ActionPriorityEnum("highest")

+ 

+         # User-requested createrepo actions should have normal priority

+         self.test_client.post("/coprs/id/{0}/createrepo/".format(copr.id), data={})

+         actions = ActionsLogic.get_many(ActionTypeEnum("createrepo")).all()

+         assert len(actions) == 2

+         assert actions[1].priority == 0

Metadata Update from @frostyx:
- Pull-request tagged with: wip

4 years ago

Thanks for the PR! The first commit is something I wanted to do, but didn't want to because I was afraid that nobody will be OK to review.

The get_actoin_class introduction should be in first commit, to make it easier to review.

This indeed deserves fix before we merge.

Mixin has concrete meaning in python ... IMO it is confusing.

delete and delete multiple builds are not compatible (different task json), I don't think this inherit will work.

We must have ID here, otherwise we are doomed. I'd prefer 'assert self.caction["id"]'.

The priority must be set at top-level, to make it less magic. Ideally in ActionDispatcher.run() or in ActionDispatcher.get_frontend_actions().

I welcome the idea to set "default" priority in concrete Action class;
but primarily frontend should be responsible for setting the priority.
That will give us much more flexibility for the future.

The dispatchers (either action or build) should be responsible for
re-adjusting the priority (e.g. when too many tasks are coming from one
user).

What about to change that to priority => penalty and
add

ActionQueueTask.priority = fe_priority      # given from json
                         + backend_penalty  # per load
                         + action.penalty   # default per action

Otherwise awesome, thanks again - I know this is still WIP but I couldn't wait
with the pre-review :-)

Thank you for the pre-review!

The first commit is something I wanted to do, but didn't want to because I was afraid that nobody will be OK to review.

I also wanted to do it for a while but didn't want to waste time on refactoring just for the sake of refactoring. Now I thought that it will allow me to easily implement the priorities but it is starting to turn out that I was probably mistaken and in fact didn't need it. But it is done now, so we can as well use it.

Since it is a pretty big diff and it makes sense by itself ... what do you say that I would separate it to its own pull request, so we can review the refactorization properly and then discuss just the priorities here?

what do you say that I would separate it to its own pull request

Good idea.

Metadata Update from @frostyx:
- Pull-request tagged with: blocked

4 years ago

Metadata Update from @praiskup:
- Pull-request untagged with: blocked

4 years ago

rebased onto fb0e6e2ba63e9268fa891095905a240e6c72f84f

4 years ago

The rebased version doesn't seem to be updated - please ping me once you
have some progress here and I'll take a look.

4 new commits added

  • backend: move _handle_delete_builds to Delete class
  • priority from frontend
  • another iteration with working tests
  • wip with working tests
4 years ago

This is still WIP, commit history is a total mess and I still need to add alembic migrations. I wasn't sure if I should wait until PR#1323 is finished, so we don't unnecessarily conflict the migration hashes.

Anyway, I would like your feedback on this approach.

  • We have already established that frontend needs its say when determining priority, so we can't leave it on backend entirely
  • However, I still think that frontend should not be responsible for knowing how priorities are calculated at the lowest level (JobQueue)
  • The proposed approach is a compromise between those two
    • It allows frontend to say that an action is either "highest", "lowest" or normal priority. If it doesn't set the priority, then it is considered to be a normal priority.
    • Meanwhile, backend defines default priority for specific action types. It is a number in the 0-100 range, which is ideal for JobQueue
    • The lowest value (the lower the value, the higher the priority) has GenerateGpgKey with the value of 10, the highest has RawhideToRelease with the value of 90. When a priority is specified by frontend, then that one action gets either 1 or 99 value which is lower/higher than anything else.

IMHO this should work fine for our use cases.

Of course, if it is more complicated than necessary or just doesn't work for all use-cases, we can just move the 0-100 priority values definitions to frontend ...

Yes, FE should setup the priority, and BE should somehow adjust (because
FE has no idea about number of workers (or builder VMs). Note that all the
priority logic is going to be re-used for builds (not only actions).

It allows frontend to say that an action is either "highest", "lowest" or normal priority. If it doesn't set the priority, then it is considered to be a normal priority.

I'd prefer just number here.

Meanwhile, backend defines default priority for specific action types. It is a number in the 0-100 range, which is ideal for JobQueue

I don't think we have to say it is 0-100 range here, it is somewhat artificial. But
it is your choice. I see priority as integer number (can be negative, from what I
remember) and time would show what values we'll need.

The lowest value (the lower the value, the higher the priority) has GenerateGpgKey with the value of 10, the highest has RawhideToRelease with the value of 90.

Agreed in a sense that generating GPG keys (or initial createrepo) actions should be
the most prioritized.

Of course, if it is more complicated than necessary or just doesn't work for all use-cases, we can just move the 0-100 priority values definitions to frontend ...

KISS is preferred. I'd prefer integer number on FE (which can be anytime adjusted
to tweak the priority, for now in-code is enough). And integer number on BE, and
the final priority can be sum([FE, BE, ... others? ...]).

I'd prefer just number here.

I probably wrote it wrong. I mean, it works with numbers. When FE sends -1 it means that the task has the lowest possible priority and when FE sends 1 it means that the task has the highest possible priority.

I don't think we have to say it is 0-100 range here, it is somewhat artificial.
...
I see priority as integer number (can be negative, from what I
remember) and time would show what values we'll need.

I inspired myself in linux config.d directories. They contain config files that I believe can be prefixed with the 0-100 prefix, which will determine which of the config files has the highest priority.

The reason why I go with such big ranges (and why it is IMHO done this way for config directories) is that we can easily adjust priorities and moving some type of actions before another type of actions without running into conflicts and needing to reassign everything. I hope it makes sense.

We can go with negative numbers, e.g. <-50;50> or <-100;100>, I have absolutely no preference here.

KISS is preferred. I'd prefer integer number on FE (which can be anytime adjusted
to tweak the priority, for now in-code is enough). And integer number on BE, and
the final priority can be sum([FE, BE, ... others? ...]).

We can go even more KISS. We could just define the default priority value for each action type on frontend, and send either it or some adjusted value for background actions. Backend would then just take the number and use it when putting the action into JobQueue.

But the version that you are suggesting is actually only a small adjustment of my current implementation, so I see no problem in that.

On Thursday, April 2, 2020 11:57:25 PM CEST Jakub Kadl=C4=8D=C3=ADk wrote:

We can go even more KISS. We could just define the default priority
value for each action type on frontend, and send either it or some
adjusted value for background actions.

Yeah, background actions can have priority penalty adjusted on FE so
backend doesn't have to care.

Agreed. The easier the better, this sounds good. I guess that with
builds it won't be that easy.. I'll have to try to de-prioritize builds
for users that have a lot of tasks in queue (let's say that user's first
10 builds will have normal priority, and e.g. 11+ will have penalty +20,
or some progressive +1 for each one task in queue). I guess this have to
be re-calculated on backend depending on how many workers are taken,
dunno now.

Backend would then just take the number and use it when putting the
action into JobQueue.

Sure. It is at least good enough. I'd prefer to hold BJ call so we can
assure each other that we understand the proposal(s).

1 new commit added

  • another working iteration - with sum
4 years ago

1 new commit added

  • use default value if nothing was sent
4 years ago

This is still WIP unfortunately, but I believe we are almost approaching the finish line. The current implementation should comply with the specifications that we discussed with @praiskup off-list. The only thing that is missing is setting the priority number for those special cases (cron, etc) when storing the action into the database

The sort_keys doesn't seem to be needed. We sent array over network, not dict.

I don't see any problem in keeping the old variant here.

Not a problem in particular, but note that we were punished a lot by data loss when
the delete action was done wrong. Having None there instead of tracebacking can
be a bit problematic if the following code doesn't check for None.

Can we have another BJ call about this?

This is very promising, thank you for the update!

Should createrepo be really prioritized that much? It seems like similar to "delete build(s)" category to me (unless it is the initial createrepo action).

Here it would be nice to have a note what numbers make higher/lower priority (I am start to think we should invert the numbers - the higher number, the higher priority to make it more readable).

rebased onto 10ca7f465039d76d020de548f83524f270d04151

4 years ago

Metadata Update from @frostyx:
- Pull-request untagged with: wip

4 years ago

but I believe we are almost approaching the finish line

Yea, so it turned out I was overly optimistic again. The frontend part came with its own set of difficulties that needed to be solved. But I finally got to a version that I am happy with. Please see the commit messages for more information. I think I've never reimplemented something so many times in my life before. Feels like I went through all the possible options just to figure out that they are not good enough and settled with the last possible one.

... What I am trying to say is, sorry that it took me so long.

The sort_keys doesn't seem to be needed. We sent array over network, not dict.
I don't see any problem in keeping the old variant here.

Not a problem in particular, but note that we were punished a lot by data loss when
the delete action was done wrong. Having None there instead of tracebacking can
be a bit problematic if the following code doesn't check for None.

I've fixed all of these now.

Should createrepo be really prioritized that much? It seems like similar to "delete build(s)"
category to me (unless it is the initial createrepo action).

I wanted to avoid dealing with this special case (initial createrepo) so I just prioritized all of them. My guess is that is should be trivial to do, but the PR is really long as is, so I don't know if you want me to do it here or rather in a follow-up PR.

I am start to think we should invert the numbers - the higher number, the higher priority to make it more readable

I agree with you, it is the exact opposite of what I would expect. But AFAIK the JobQueue is copy-pasted from official python documentation, so possibly other people are doing it the same way? And it took just a few minutes to wrap my head around, that lower number is going to be processed sooner, so personally I don't feel the need to invert the numbers. But I am not against it either.

I wasn't sure if the commit

frontend, backend: minimalize the transfered ammount of action information

is a wanted change so I didn't squash it to previous commits and left it separated, so I can throw it away if needed. If we want it, I can squash it since it reverts some changes from previous commits. It's up to you.

Since the WorkerManager object can now only add QueueTask objects, we probably don't need the priority argument here (it is directly in the task itself).

... , you can extract priority from task here.

Commit message typos:
- s/ammount/amount/
- s/Frontend send/Frontend sent/

Small leftover:
I think that "initial createrepo" action should be prioritized over normal
user-defined createrepo requests (one of the main motivations for this PR).

The current approach doesn't let Backend to change the priority of the task,
because at the time of reading the queue from frontend it has no semantic info
about the task except for the ID. This sounds very trivial, and thus correct.
Thanks for this.

I originally thought that we could basically let most of the actions on normal
(thus "0") priority, and only adjust those that we have to adjust (e.g. the
gpg key and initial createrepo) but I don't think we actually have to. This
can be changed trivially in future. The only weird thing is that there's no
"normal" value (yes, 0 is, but none of the actions actually has this value).

Nice patches, thank you.

If we want it, I can squash it since it reverts some changes from previous commits. It's up to you.

I usually prefer squashed than ping-pong in one pull-request.

10 new commits added

  • frontend: prioritize initial createrepo action
  • return generated action
  • createrepo shouldn't be prioritized that much
  • theese actions were default-priority actions with non-default value
  • add_task doesn't take priority anymore
  • frontend, backend: minimalize the transfered amount of action information
  • backend: process actions in regard to their priority
  • frontend: set lower priority for some actions
  • common: define default priority value based on action types
  • backend: move _handle_delete_builds to Delete class
4 years ago

10 new commits added

  • frontend: prioritize initial createrepo action
  • frontend: make ActionsLogic.send_* methods to return the generated action
  • createrepo shouldn't be prioritized that much
  • theese actions were default-priority actions with non-default value
  • add_task doesn't take priority anymore
  • frontend, backend: minimalize the transfered amount of action information
  • backend: process actions in regard to their priority
  • frontend: set lower priority for some actions
  • common: define default priority value based on action types
  • backend: move _handle_delete_builds to Delete class
4 years ago

I've rebased the branch to fix those commit message typos and added these new commits

* a9a69050 frontend: prioritize initial createrepo action
* 92e4cbf4 frontend: make ActionsLogic.send_* methods to return the generated action
* ce974aca createrepo shouldn't be prioritized that much
* 13c5de03 theese actions were default-priority actions with non-default value
* b16f1888 add_task doesn't take priority anymore

I will squash them before merging. I am not doing it now because it is a big PR and I would like to spare you from reviewing it all again.

Since the WorkerManager object can now only add QueueTask objects, we probably don't need the priority argument here (it is directly in the task itself).
... , you can extract priority from task here.

Fixed

Commit message typos:

Fixed

I think that "initial createrepo" action should be prioritized over normal
user-defined createrepo requests (one of the main motivations for this PR).

Done

The current approach doesn't let Backend to change the priority of the task,
because at the time of reading the queue from frontend it has no semantic info
about the task except for the ID. This sounds very trivial, and thus correct.

Which is ... good?

The only weird thing is that there's no
"normal" value (yes, 0 is, but none of the actions actually has this value).

True, there was a set of actions that were in the ~middle of the range and had the same priority value. Which basically meant that they had normal priority. However, they didn't have priority value 0 but they had value 20 instead. It didn't make any sense, you were right about that. I've set them to zero.

rebased onto fdf31b0643712a4f58e0aed9faf8c1e6742e6be2

4 years ago

One more rebase ^^ to fix merge conflicts from PR#1335

Some of the commits are missing frontend: etc. tags. Dunno whether you plan
to squash them or fix. But generally looks good to me.

There are some new pylint errors (I tested the linter from the other PR):

/backend/copr_backend/actions.py:22: C0413[pylint]: Import "from copr_common.enums import DefaultActionPriorityEnum, ActionTypeEnum" should be placed at the top of the module
/backend/copr_backend/actions.py:29: C0413[pylint]: Import "from copr_backend.worker_manager import WorkerManager, QueueTask" should be placed at the top of the module
/backend/copr_backend/actions.py:230: W0223[pylint]: Method 'run' is abstract in class 'Action' but is not overridden
/backend/copr_backend/actions.py:577:4: W0235[pylint]: Useless super delegation in method 'add_task'
/backend/copr_backend/actions.py:22: W0611[pylint]: Unused DefaultActionPriorityEnum imported from copr_common.enums
/backend/copr_backend/actions.py:22: W0611[pylint]: Unused ActionTypeEnum imported from copr_common.enums
/backend/copr_backend/actions.py:29: C0411[pylint]: first party import "from copr_backend.worker_manager import WorkerManager, QueueTask" should be placed before "from .sign import create_user_keys, CoprKeygenRequestError"
/backend/copr_backend/worker_manager.py:44: R0205[pylint]: Class 'QueueTask' inherits from object, can be safely removed from bases in python3
/backend/tests/test_worker_manager.py:17: C0413[pylint]: Import "from copr_backend.actions import ActionWorkerManager, ActionQueueTask, Action, ActionType" should be placed at the top of the module
/backend/tests/test_worker_manager.py:18: C0413[pylint]: Import "from copr_backend.worker_manager import JobQueue, WorkerManager, QueueTask" should be placed at the top of the module
/backend/tests/test_worker_manager.py:29: W0223[pylint]: Method 'finish_task' is abstract in class 'WorkerManager' but is not overridden
/backend/tests/test_worker_manager.py:29: W0223[pylint]: Method 'has_worker_ended' is abstract in class 'WorkerManager' but is not overridden
/backend/tests/test_worker_manager.py:29: W0223[pylint]: Method 'is_worker_alive' is abstract in class 'WorkerManager' but is not overridden
/backend/tests/test_worker_manager.py:169:8: W0201[pylint]: Attribute 'frontend_client' defined outside __init__
/backend/tests/test_worker_manager.py:196:8: W0201[pylint]: Attribute 'environ' defined outside __init__
/backend/tests/test_worker_manager.py:214:8: W0201[pylint]: Attribute 'environ' defined outside __init__
/backend/tests/test_worker_manager.py:58:23: W0622[pylint]: Redefining built-in 'id'
/backend/tests/test_worker_manager.py:58:4: W0231[pylint]: __init__ method from base class 'QueueTask' is not called
/backend/tests/test_worker_manager.py:105: R0205[pylint]: Class 'BaseTestWorkerManager' inherits from object, can be safely removed from bases in python3
/backend/tests/test_worker_manager.py:17: W0611[pylint]: Unused ActionType imported from copr_backend.actions
/backend/tests/test_worker_manager.py:8: C0411[pylint]: standard import "import logging" should be placed before "import pytest"
/backend/tests/test_worker_manager.py:9: C0411[pylint]: standard import "import subprocess" should be placed before "import pytest"
/backend/tests/test_worker_manager.py:10: C0411[pylint]: standard import "from unittest.mock import MagicMock, patch" should be placed before "import pytest"

Please consider this:
https://pagure.io/fork/praiskup/copr/copr/c/a440162bd032c9a5258a61b101c3908b53a7f852

The task back-reference is not needed actually in the parent QueueTask. I noticed
this while re-working BuildJob to inherit from QueueTask.

The task back-reference is not needed actually in the parent QueueTask. I noticed
this while re-working BuildJob to inherit from QueueTask.

I thought it will make the implementation of BuildQueueTask. But I can move the __init__ method directly to ActionQueueTask if you want.

You think you will need a different constructor for BuildQueueTask?

You think you will need a different constructor for BuildQueueTask?

I'm not 100% sure, but very likely. It is even more nice if it is moved, because
we don't need the constructor it from WorkerManager perspective.

If you don't want to waste the time, I can move it in the separate PR.

I'm not 100% sure

Btw., I noticed this problem when pylint warned me that I didn't call super().__init__()when I should. This looked like unnecessary work at that time ... but also the reference to task
can hold unnecessary blob memory for no reason.

rebased onto fc4e6dd0331409fcb7b3564fb77db54020d9123f

4 years ago

7 new commits added

  • frontend: prioritize initial createrepo action
  • frontend: make ActionsLogic.send_* methods to return the generated action
  • frontend, backend: minimalize the transfered amount of action information
  • backend: process actions in regard to their priority
  • frontend: set lower priority for some actions
  • common: define default priority value based on action types
  • backend: move _handle_delete_builds to Delete class
4 years ago

I moved the __init__ as you suggested and rebased to squash some of the commits and to resolve merge commits from PR#1333.

Can you please rebase once more against master, or can I?

rebased onto 52a83fa

4 years ago

Thanks, from the rest non-test pylint issues:

/copr_backend/actions.py:231: W0223[pylint]: Method 'run' is abstract in class 'Action' but is not overridden
/copr_backend/actions.py:575:4: W0235[pylint]: Useless super delegation in method 'add_task'
/copr_backend/actions.py:23: W0611[pylint]: Unused DefaultActionPriorityEnum imported from copr_common.enums
/copr_backend/actions.py:23: W0611[pylint]: Unused ActionTypeEnum imported from copr_common.enums
/copr_backend/worker_manager.py:44: R0205[pylint]: Class 'QueueTask' inherits from object, can be safely removed from bases in python3
/copr_backend/actions.py:30: C0411[pylint]: first party import "from copr_backend.worker_manager import WorkerManager, QueueTask" should be placed before "from .sign import create_user_keys, CoprKeygenRequestError"

Except fort the last one, don't you want to consider fixing them?

BE/FE seems to fail to build. You can potentially build updated common into @copr/copr-dev to fix the build?

7 new commits added

  • frontend: prioritize initial createrepo action
  • frontend: make ActionsLogic.send_* methods to return the generated action
  • frontend, backend: minimalize the transfered amount of action information
  • backend: process actions in regard to their priority
  • frontend: set lower priority for some actions
  • common: define default priority value based on action types
  • backend: move _handle_delete_builds to Delete class
4 years ago

Thanks, from the rest non-test pylint issues:

I fixed some (the unused imports and useless super call), but I think we should all talk about this and figure out some consensus, because

/copr_backend/actions.py:231: W0223[pylint]: Method 'run' is abstract in class 'Action' but is not overridden

Which is what I want. The class Delete(Action) is still an abstract class that DeleteProject, DeleteBuild, etc use as their parent. Those classes override the run method.

/copr_backend/worker_manager.py:44: R0205[pylint]: Class 'QueueTask' inherits from object, can be safely removed from bases in python3

Not sure if I want to do that. The only thing that will come from this is an inconsistency between existing code and new code. I would much rather change all existing classes and then comply with this. If we want this change in the first place.

/tests/test_worker_manager.py:196:8: W0201[pylint]: Attribute 'environ' defined outside init

There is no way how this PR could cause this warning. I think.

/copr_backend/actions.py:30: C0411[pylint]: first party import "from copr_backend.worker_manager import WorkerManager, QueueTask" should be placed before "from .sign import create_user_keys, CoprKeygenRequestError"

This will happen in every PR because the imports are already in a wrong order and you are just appending to that line.

My point (probably) is, we should brainstorm about this and globally ignore some warnings.

Which is what I want. The class Delete(Action) is still an abstract class that DeleteProject, DeleteBuild, etc use as their parent. Those classes override the run method.

Sounds like proper reason to put inline ignore:

# This is semi-abstract class.
# pylint: disable=...

It's useful for readers as well, not only pylint.

the 'class Blah(object)' thing
The only thing that will come from this is an inconsistency between existing code and new code.

My preference will be:
- don't add code which brings new issues
- don't change existing code just because it produces warnings

That said, I personally don't care about consistency. This case is not about style, the
object inheritance has no reason nowadays. That said, we definitely can talk about
this on meeting, feel free to add a ticket.

I don't push you to change this, so I'll ignore this one.

Attribute 'environ' defined outside init
There is no way how this PR could cause this warning. I think.

This is because csdiff ignores line numbers to not produce too much output.
It means that there were N errors like that before, and now there's N+1, and
csdiff is printing the last occurrence in the file :-( this is painful. If we know
how to handle these kind of problems, it would be appreciated addition to
csdiff I believe.

This will happen in every PR because the imports are already in a wrong order and you are just appending to that line.

In-line # pylint: disable= ?

My point (probably) is, we should brainstorm about this and globally ignore some warnings.

Sure, grab the list of things you want to ignore to the meeting. But we don't have to
wait with this PR (it is sort of blocking my work on BuildWorkerManager, as I bulid on
top of this).

While I'm on it, I'm really thinking about removing several ignores from ./backend/pylintrc,
because e.g. C0111 is something which we should pay attention to.

Personally I would go with this PR as is, make a card for the next meeting to discuss the particular warnings and start complying with it. Of course, I fixed the obvious ones for this PR, I am not ignorant. But I feel that the rest of them deserves a proper discussion.

Sure, the list of detect types is crucial for the meeting (we already have
large ugly blacklists, and those be made smaller, not larger).

That said, all of those defects we discussed here are IMO useful in
general. So disabling them per-project is wrong idea.

Personally I would go with this PR as is

I'd presonally prefer this to be applied on top of this PR:
https://paste.centos.org/view/4300ef67

I'll go through this PR later today and give +1 anyway, though.

1 new commit added

  • backend: fix pylint for PR#1318
4 years ago

Pull-Request has been merged by praiskup

4 years ago
Metadata
Flags
jenkins
failure
Build #39 failed (commit: 3804551d)
4 years ago
jenkins
failure
Build #38 failed (commit: 3804551d)
4 years ago
jenkins
failure
Build #37 failed (commit: 3804551d)
4 years ago
Copr build
success (100%)
#1339690
4 years ago
Copr build
failure
#1339689
4 years ago
Copr build
success (100%)
#1339688
4 years ago
jenkins
failure
Build #36 failed (commit: 8156ef90)
4 years ago
Copr build
success (100%)
#1339544
4 years ago
Copr build
failure
#1339543
4 years ago
Copr build
success (100%)
#1339542
4 years ago
Copr build
success (100%)
#1339527
4 years ago
Copr build
failure
#1339526
4 years ago
Copr build
success (100%)
#1339525
4 years ago
jenkins
failure
Build #35 failed (commit: 4783495c)
4 years ago
Copr build
success (100%)
#1339507
4 years ago
Copr build
failure
#1339506
4 years ago
Copr build
failure
#1339505
4 years ago
jenkins
failure
Build #32 failed (commit: e00fadfb)
4 years ago
Copr build
success (100%)
#1339494
4 years ago
Copr build
failure
#1339493
4 years ago
Copr build
failure
#1339492
4 years ago
jenkins
failure
Build #31 failed (commit: 7c21f97c)
4 years ago
Copr build
success (100%)
#1339474
4 years ago
Copr build
failure
#1339473
4 years ago
Copr build
failure
#1339472
4 years ago
jenkins
failure
Build #29 failed (commit: 43c3691f)
4 years ago
Copr build
success (100%)
#1337239
4 years ago
Copr build
failure
#1337238
4 years ago
Copr build
failure
#1337237
4 years ago
jenkins
failure
Build #28 failed (commit: a9a69050)
4 years ago
Copr build
pending (50%)
#1337221
4 years ago
Copr build
failure
#1337220
4 years ago
Copr build
failure
#1337219
4 years ago
jenkins
failure
Build #27 failed (commit: 309a7546)
4 years ago
Copr build
success (100%)
#1337209
4 years ago
Copr build
failure
#1337208
4 years ago
Copr build
failure
#1337207
4 years ago
Copr build
success (100%)
#1331877
4 years ago
Copr build
failure
#1331876
4 years ago
Copr build
failure
#1331875
4 years ago
Copr build
success (100%)
#1329243
4 years ago
Copr build
failure
#1329242
4 years ago
Copr build
failure
#1329241
4 years ago
Copr build
success (100%)
#1329237
4 years ago
Copr build
failure
#1329236
4 years ago
Copr build
failure
#1329234
4 years ago
Copr build
failure
#1326216
4 years ago
Copr build
success (100%)
#1326215
4 years ago
Copr build
failure
#1324752
4 years ago
Copr build
success (100%)
#1324751
4 years ago
Copr build
failure
#1323593
4 years ago
Copr build
success (100%)
#1323592
4 years ago
Copr build
failure
#1315153
4 years ago
Copr build
pending (50%)
#1315152
4 years ago