#1124 Add retire command to mbs-manager
Merged 5 years ago by mprahl. Opened 5 years ago by lucarval.
lucarval/fm-orchestrator retire-builds-cli  into  master

@@ -22,7 +22,7 @@ 

  # Written by Matt Prahl <mprahl@redhat.com> except for the test functions

  

  from __future__ import print_function

- from flask_script import Manager

+ from flask_script import Manager, prompt_bool

  from functools import wraps

  import flask_migrate

  import logging
@@ -167,6 +167,55 @@ 

              raise RuntimeError('Module build failed')

  

  

+ @manager.option('identifier', metavar='NAME:STREAM[:VERSION[:CONTEXT]]',

+                 help='Identifier for selecting module builds to retire')

+ @manager.option('--confirm', action='store_true', default=False,

+                 help='Perform retire operation without prompting')

+ def retire(identifier, confirm=False):

+     """ Retire module build(s) by placing them into 'garbage' state.

+     """

+     # Parse identifier and build query

+     parts = identifier.split(':')

+     if len(parts) < 2:

+         raise ValueError('Identifier must contain at least NAME:STREAM')

+     if len(parts) >= 5:

+         raise ValueError('Too many parts in identifier')

+ 

+     filter_by_kwargs = {

+         'state': models.BUILD_STATES['ready'],

+         'name': parts[0],

+         'stream': parts[1],

+     }

+ 

+     if len(parts) >= 3:

+         filter_by_kwargs['version'] = parts[2]

+     if len(parts) >= 4:

+         filter_by_kwargs['context'] = parts[3]

+ 

+     # Find module builds to retire

+     module_builds = db.session.query(models.ModuleBuild).filter_by(**filter_by_kwargs).all()

+ 

+     if not module_builds:

+         logging.info('No module builds found.')

+         return

+ 

+     logging.info('Found %d module builds:', len(module_builds))

+     for build in module_builds:

+         logging.info('\t%s', ':'.join((build.name, build.stream, build.version, build.context)))

+ 

+     # Prompt for confirmation

+     is_confirmed = confirm or prompt_bool('Retire {} module builds?'.format(len(module_builds)))

+     if not is_confirmed:

+         logging.info('Module builds were NOT retired.')

+         return

+ 

+     # Retire module builds

+     for build in module_builds:

+         build.transition(conf, models.BUILD_STATES['garbage'], 'Module build retired')

+     db.session.commit()

+     logging.info('Module builds retired.')

+ 

+ 

  @console_script_help

  @manager.command

  def run(host=None, port=None, debug=None):

file added
+114
@@ -0,0 +1,114 @@ 

+ # Copyright (c) 2019  Red Hat, Inc.

+ #

+ # Permission is hereby granted, free of charge, to any person obtaining a copy

+ # of this software and associated documentation files (the "Software"), to deal

+ # in the Software without restriction, including without limitation the rights

+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

+ # copies of the Software, and to permit persons to whom the Software is

+ # furnished to do so, subject to the following conditions:

+ #

+ # The above copyright notice and this permission notice shall be included in all

+ # copies or substantial portions of the Software.

+ #

+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE

+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

+ # SOFTWARE.

+ import pytest

+ from mock import patch

+ 

+ from module_build_service import conf

+ from module_build_service.manage import retire

+ from module_build_service.models import BUILD_STATES, ModuleBuild, make_session

+ from tests.test_models import init_data

+ 

+ 

+ class TestMBSManage:

+     def setup_method(self, test_method):

+         init_data()

+ 

+     @pytest.mark.parametrize(('identifier', 'is_valid'), (

+         ('', False),

+         ('spam', False),

+         ('spam:bacon', True),

+         ('spam:bacon:eggs', True),

+         ('spam:bacon:eggs:ham', True),

+         ('spam:bacon:eggs:ham:sausage', False),

+     ))

+     def test_retire_identifier_validation(self, identifier, is_valid):

+         if is_valid:

+             retire(identifier)

+         else:

+             with pytest.raises(ValueError):

+                 retire(identifier)

+ 

+     @pytest.mark.parametrize(('overrides', 'identifier', 'changed_count'), (

+         ({'name': 'pickme'}, 'pickme:eggs', 1),

+         ({'stream': 'pickme'}, 'spam:pickme', 1),

+         ({'version': 'pickme'}, 'spam:eggs:pickme', 1),

+         ({'context': 'pickme'}, 'spam:eggs:ham:pickme', 1),

+ 

+         ({}, 'spam:eggs', 3),

+         ({'version': 'pickme'}, 'spam:eggs', 3),

+         ({'context': 'pickme'}, 'spam:eggs:ham', 3),

+     ))

+     @patch('module_build_service.manage.prompt_bool')

+     def test_retire_build(self, prompt_bool, overrides, identifier, changed_count):

+         prompt_bool.return_value = True

+ 

+         with make_session(conf) as session:

+             module_builds = session.query(ModuleBuild).filter_by(state=BUILD_STATES['ready']).all()

+             # Verify our assumption of the amount of ModuleBuilds in database

+             assert len(module_builds) == 3

+ 

+             for x, build in enumerate(module_builds):

+                 build.name = 'spam'

+                 build.stream = 'eggs'

+                 build.version = 'ham'

+                 build.context = str(x)

+ 

+             for attr, value in overrides.items():

+                 setattr(module_builds[0], attr, value)

+ 

+             session.commit()

+ 

+             retire(identifier)

+             retired_module_builds = (

+                 session.query(ModuleBuild).filter_by(state=BUILD_STATES['garbage']).all())

+ 

+         assert len(retired_module_builds) == changed_count

+         for x in range(changed_count):

+             assert retired_module_builds[x].id == module_builds[x].id

+             assert retired_module_builds[x].state == BUILD_STATES['garbage']

+ 

+     @pytest.mark.parametrize(('confirm_prompt', 'confirm_arg', 'confirm_expected'), (

+         (True, False, True),

+         (True, True, True),

+         (False, False, False),

+         (False, True, True),

+     ))

+     @patch('module_build_service.manage.prompt_bool')

+     def test_retire_build_confirm_prompt(self, prompt_bool, confirm_prompt, confirm_arg,

+                                          confirm_expected):

+         prompt_bool.return_value = confirm_prompt

+ 

+         with make_session(conf) as session:

+             module_builds = session.query(ModuleBuild).filter_by(state=BUILD_STATES['ready']).all()

+             # Verify our assumption of the amount of ModuleBuilds in database

+             assert len(module_builds) == 3

+ 

+             for x, build in enumerate(module_builds):

+                 build.name = 'spam'

+                 build.stream = 'eggs'

+ 

+             session.commit()

+ 

+             retire('spam:eggs', confirm_arg)

+             retired_module_builds = (

+                 session.query(ModuleBuild).filter_by(state=BUILD_STATES['garbage']).all())

+ 

+         expected_changed_count = 3 if confirm_expected else 0

+         assert len(retired_module_builds) == expected_changed_count

With this command, admins can retire module builds that should no longer
be used as a dependency for other module builds.

Fixes #1021

Signed-off-by: Luiz Carvalho lucarval@redhat.com

Any tips on how to add unit tests for this would be greatly appreciated.

Instead of setting the state directly, we should consider using build.transition since it will also publish a message of the state change among other things.

I think we should also target builds in the failed state since a user is capable of resuming a failed module build.

Edit:
One of the issues about doing that is the poller would never catch these in the cleanup_stale_failed_builds method. In that method, the poller will untag all components from the module's tag. This is so that the RPMs can eventually get garbage collected by Koji. A hacky way around this is to set time_modified on those failed builds to be before the configured value for conf.cleanup_failed_builds_time. Then the poller would pick these up on its next run.

Edit2:
Never mind, MBS guards against this by check if the module is EOL in PDC before starting a build.

This is nice, but perhaps you should the ability to bypass the confirmation as well (e.g. --force). That way a script running this doesn't have to deal with passing in the right values to STDIN.

Any tips on how to add unit tests for this would be greatly appreciated.

An easy way might be to move the logic (minus the argument parsing) to a separate function that you can test. Then the command can just call that separate function.

I think we should also target builds in the failed state since a user is capable of resuming a failed module build.
Edit:
One of the issues about doing that is the poller would never catch these in the cleanup_stale_failed_builds method. In that method, the poller will untag all components from the module's tag. This is so that the RPMs can eventually get garbage collected by Koji. A hacky way around this is to set time_modified on those failed builds to be before the configured value for conf.cleanup_failed_builds_time. Then the poller would pick these up on its next run.

Will the poller also perform this operation if the build is in "garbage" state? If not, then we can't change the state from "failed" to "garbage" until the poller runs.

I think we should also target builds in the failed state since a user is capable of resuming a failed module build.
Edit:
One of the issues about doing that is the poller would never catch these in the cleanup_stale_failed_builds method. In that method, the poller will untag all components from the module's tag. This is so that the RPMs can eventually get garbage collected by Koji. A hacky way around this is to set time_modified on those failed builds to be before the configured value for conf.cleanup_failed_builds_time. Then the poller would pick these up on its next run.

Will the poller also perform this operation if the build is in "garbage" state? If not, then we can't change the state from "failed" to "garbage" until the poller runs.

Does it make sense to enhance the poller to also take into account builds in "garbage" state?

I think we should also target builds in the failed state since a user is capable of resuming a failed module build.
Edit:
One of the issues about doing that is the poller would never catch these in the cleanup_stale_failed_builds method. In that method, the poller will untag all components from the module's tag. This is so that the RPMs can eventually get garbage collected by Koji. A hacky way around this is to set time_modified on those failed builds to be before the configured value for conf.cleanup_failed_builds_time. Then the poller would pick these up on its next run.

Will the poller also perform this operation if the build is in "garbage" state? If not, then we can't change the state from "failed" to "garbage" until the poller runs.

The poller will only process failed builds. This is because we don't want to mess with tags of successful builds.

I think we should also target builds in the failed state since a user is capable of resuming a failed module build.
Edit:
One of the issues about doing that is the poller would never catch these in the cleanup_stale_failed_builds method. In that method, the poller will untag all components from the module's tag. This is so that the RPMs can eventually get garbage collected by Koji. A hacky way around this is to set time_modified on those failed builds to be before the configured value for conf.cleanup_failed_builds_time. Then the poller would pick these up on its next run.
Will the poller also perform this operation if the build is in "garbage" state? If not, then we can't change the state from "failed" to "garbage" until the poller runs.

Does it make sense to enhance the poller to also take into account builds in "garbage" state?

I don't think so since we don't want to cleanup previously successful builds since they might be in use elsewhere. Did you have something else in mind?

rebased onto 8f031d2c61293359321e779195a351f8a3755ae0

5 years ago

Added unit tests, I'll address comments next.

rebased onto 650d32705f9eb51ede627971bb495195bf70251a

5 years ago

rebased onto 7a38d7fb6edbc555ff5b243577489b033c8c977f

5 years ago

@mprahl, I think I've addressed all your comments. PTAL.

Another thought as well as a suggestion on this is to use regular expression like this:

match = re.match('^([^:]+):([^:]+)(:(\d+))?(:(.+))?$', 'module:1')
if not match:
    raise ValueError('error: ...')
name, stream, _, version, _, context = match.groups()

With this method, it is easier to validate input N:S:V:C in more details by just modifying the regular expression.

pretty please pagure-ci rebuild

5 years ago

Can you use a different variable here? It's confusing whenever function parameters are overridden.

This import should be in the group with pytest.

This import should be with the group of local imports.

@lucarval I left a few minor comments. Once addressed, this is good to merge!

Another thought as well as a suggestion on this is to use regular expression like this:
match = re.match('^([^:]+):([^:]+)(:(\d+))?(:(.+))?$', 'module:1')
if not match:
raise ValueError('error: ...')
name, stream, , version, , context = match.groups()

With this method, it is easier to validate input N:S:V:C in more details by just modifying the regular expression.

I try to avoid regular expression whenever possible. They are very powerful, but can quickly become very difficult to maintain. Personally, I still prefer the existing approach.

rebased onto aae56fcbe7e51d4fb6e2a7f3c09c2414dd29794c

5 years ago

@mprahl, thanks! Comments addressed.

rebased onto 64c2e7ab703496a26bca01eb586745db659c6e38

5 years ago

rebased onto e2b70bdd7f2a115936eb00b933ae5f06cc750eda

5 years ago

pretty please pagure-ci rebuild

5 years ago

rebased onto 5921c10

5 years ago

Jenkins job is failing due to merge conflicts. I don't really see a conflict though.

Pull-Request has been merged by mprahl

5 years ago