#1762 frontend: unique name constraint in Copr
Merged a year ago by praiskup. Opened a year ago by praiskup.
copr/ praiskup/copr unique-copr-name-constraint  into  main

@@ -0,0 +1,35 @@ 

+ """

+ Add a Copr "name" unique constraint, issue #172

+ 

+ Revision ID: 808912fe46d3

+ Revises: 6b48324e9264

+ Create Date: 2021-04-07 12:21:29.814687

+ """

+ 

+ import sqlalchemy as sa

+ from alembic import op

+ 

+ 

+ revision = '808912fe46d3'

+ down_revision = '6b48324e9264'

+ 

+ def upgrade():

+     text = sa.text('deleted is not true and group_id is null')

+     op.create_index(

+         'copr_name_for_user_uniq', 'copr', ['user_id', 'name'],

+         unique=True,

+         postgresql_where=text,

+         sqlite_where=text,

+     )

+ 

+     text = sa.text('deleted is not true and group_id is not null')

+     op.create_index(

+         'copr_name_in_group_uniq', 'copr', ['group_id', 'name'],

+         unique=True,

+         postgresql_where=text,

+         sqlite_where=text,

+     )

+ 

+ def downgrade():

+     op.drop_index('copr_name_in_group_uniq', table_name='copr')

+     op.drop_index('copr_name_for_user_uniq', table_name='copr')

@@ -7,7 +7,7 @@ 

  from fnmatch import fnmatch

  import modulemd_tools.yaml

  

- from sqlalchemy import outerjoin

+ from sqlalchemy import outerjoin, text

  from sqlalchemy.ext.associationproxy import association_proxy

  from sqlalchemy.orm import column_property, validates

  from sqlalchemy.event import listens_for
@@ -251,6 +251,8 @@ 

                              name="copr_score_copr_id_user_id_uniq"),

      )

  

+ _group_unique_where = text("deleted is not true and group_id is not null")

+ _user_unique_where = text("deleted is not true and group_id is null")

  

  class _CoprPublic(db.Model, helpers.Serializer, CoprSearchRelatedData):

      """
@@ -262,6 +264,16 @@ 

      __table_args__ = (

          db.Index('copr_name_group_id_idx', 'name', 'group_id'),

Shouldn't this index be removed now that we will have the unique one?

I'm not sure that the partial indexes (we are adding now) would perform as well as this one -- added here 71cf833 , if you want feel free to experiment with "explain analyze"
and drop it? In general, these all are per-copr indexes, which are pretty cheap (small sets) so for me, I think it is safer to keep all.

          db.Index('copr_deleted_name', 'deleted', 'name'),

+         db.Index("copr_name_in_group_uniq",

+                  "group_id", "name",

+                  unique=True,

+                  postgresql_where=_group_unique_where,

+                  sqlite_where=_group_unique_where),

+         db.Index("copr_name_for_user_uniq",

+                  "user_id", "name",

+                  unique=True,

+                  postgresql_where=_user_unique_where,

+                  sqlite_where=_user_unique_where),

      )

  

      id = db.Column(db.Integer, primary_key=True)

@@ -354,15 +354,19 @@ 

          for build_id in [self.b1.id, self.b2.id, self.b3.id, self.b4.id]:

              BuildsLogic.get(build_id).one()

  

-         self.c1.user = self.u2

          build_ids = [self.b1.id, self.b4.id]

+         with pytest.raises(BadRequest) as err_msg:

+             BuildsLogic.delete_builds(self.u1, build_ids)

+         assert "Can not delete builds from more project" in str(err_msg.value)

+ 

+         self.b3.source_status = StatusEnum("failed")

+         build_ids = [self.b3.id, self.b4.id]

          BuildsLogic.delete_builds(self.u2, build_ids)

-         self.db.session.commit()

  

          assert len(ActionsLogic.get_many().all()) == 1

  

          with pytest.raises(NoResultFound):

-             BuildsLogic.get(self.b1.id).one()

+             BuildsLogic.get(self.b3.id).one()

          with pytest.raises(NoResultFound):

              BuildsLogic.get(self.b4.id).one()

  

Build succeeded.

Will it work the way we need it? If I do not have a unique combination of user_id, group_id, and name, then the index will not be created, but it doesn't mean that such data will not be stored in the database, or am I wrong?

Will it work the way we need it?

Actually ... it's wrong, we need two indexes, one for group and one for user_id (when group_id is null).

but it doesn't mean that such data will not be stored in the database, or am I wrong?

Do you mean the existing databases?
Such data shouldn't exist hopefully, as we have software checks against that.
The point is to install DB-side protection ... so we can e.g. safely play with DB
using manage.py shell etc.

rebased onto 7704279de015d5e122d0ea2f4e074adaa433e008

a year ago

Build succeeded.

rebased onto b8a64fa96ba02692dcde03550cbd2deb2c7ca216

a year ago

Build succeeded.

rebased onto b68d383

a year ago

Build succeeded.

Can be tested e.g. by script from https://github.com/praiskup/copr-maint :
./copr-new-project --owner praiskup some-name --admin frostyx --admin schlupov
./copr-new-project --owner praiskup some-name
...
(psycopg2.errors.UniqueViolation) duplicate key value violates unique constraint "ownername_copr_dir_uniq"
...

Shouldn't this index be removed now that we will have the unique one?

I'm not sure that the partial indexes (we are adding now) would perform as well as this one -- added here 71cf833 , if you want feel free to experiment with "explain analyze"
and drop it? In general, these all are per-copr indexes, which are pretty cheap (small sets) so for me, I think it is safer to keep all.

In general, I like the solution of the problem, I will not stop this PR from merging, at worst I will remove the second index in another PR.

Ok, but, ... is that +1? :-)

Ok, but, ... is that +1? :-)

yes :)
+1

Pull-Request has been merged by praiskup

a year ago