#49558 Issue 49043 - Add a test suite
Closed 3 years ago by spichugi. Opened 5 years ago by spichugi.
spichugi/389-ds-base conflict_repl  into  master

@@ -13,14 +13,8 @@ 

  from lib389.topologies import topology_m4 as topo_m4

  from . import get_repl_entries

  from lib389.idm.user import UserAccount

- 

  from lib389.replica import ReplicationManager

- 

- from lib389._constants import (BACKEND_NAME, DEFAULT_SUFFIX, LOG_REPLICA, REPLICA_RUV_FILTER,

-                               ReplicaRole, REPLICATION_BIND_DN, REPLICATION_BIND_PW,

-                               REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT, defaultProperties,

-                               RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT,

-                               DN_DM, PASSWORD, LOG_DEFAULT, RA_ENABLED, RA_SCHEDULE)

+ from lib389._constants import *

  

  TEST_ENTRY_NAME = 'mmrepl_test'

  TEST_ENTRY_DN = 'uid={},{}'.format(TEST_ENTRY_NAME, DEFAULT_SUFFIX)
@@ -418,7 +412,7 @@ 

      m2.deleteErrorLogs()

  

      log.info('Set replication loglevel')

-     m2.setLogLevel(LOG_REPLICA)

+     m2.config.loglevel((ErrorLog.REPLICA,))

  

      log.info('Modifying entry {} - change userpassword on master 2'.format(TEST_ENTRY_DN))

      test_user_m1 = UserAccount(topo_m4.ms["master1"], TEST_ENTRY_DN)

@@ -30,7 +30,7 @@ 

      """

      master = topo.ms["master1"]

  

-     master.config.loglevel((LOG_REPLICA,), 'error')

+     master.config.loglevel((ErrorLog.REPLICA,), 'error')

  

      cl = Changelog5(master)

      cl.set_max_entries('2')
@@ -41,7 +41,7 @@ 

      """Configure logging and changelog max age

      """

      master = topo.ms["master1"]

-     master.config.loglevel((LOG_REPLICA,), 'error')

+     master.config.loglevel((ErrorLog.REPLICA,), 'error')

  

      cl = Changelog5(master)

      cl.set_max_age('5')

@@ -0,0 +1,872 @@ 

+ # --- BEGIN COPYRIGHT BLOCK ---

+ # Copyright (C) 2018 Red Hat, Inc.

+ # All rights reserved.

+ #

+ # License: GPL (version 3 or any later version).

+ # See LICENSE for details.

+ # --- END COPYRIGHT BLOCK ---

+ #

+ import time

+ import logging

+ import ldap

+ import pytest

+ from itertools import permutations

+ from lib389._constants import *

+ from lib389.idm.nscontainer import nsContainers

+ from lib389.idm.user import UserAccounts

+ from lib389.idm.group import Groups

+ from lib389.idm.organisationalunit import OrganisationalUnits

+ from lib389.replica import ReplicationManager

+ from lib389.agreement import Agreements

+ from lib389.plugins import MemberOfPlugin

+ 

+ DEBUGGING = os.getenv("DEBUGGING", default=False)

+ if DEBUGGING:

+     logging.getLogger(__name__).setLevel(logging.DEBUG)

+ else:

+     logging.getLogger(__name__).setLevel(logging.INFO)

+ log = logging.getLogger(__name__)

+ 

+ 

+ def _create_user(users, user_num, group_num=2000, sleep=False):

+     """Creates user entry"""

+ 

+     user = users.create_test_user(user_num, group_num)

+     if sleep:

+         time.sleep(1)

+     return user

+ 

+ 

+ def _rename_user(users, user_num, new_num, sleep=False):

+     """Rename user entry"""

+ 

+     assert user_num != new_num, "New user number should not be the same as the old one"

+ 

+     user = users.get('test_user_{}'.format(user_num))

+     user.rename('uid=test_user_{}'.format(new_num))

+     if sleep:

+         time.sleep(1)

+ 

+ 

+ def _modify_user(users, user_num, sleep=False):

+     """Modify user entry"""

+ 

+     user = users.get('test_user_{}'.format(user_num))

+     user.replace("homeDirectory", "/home/test_user0{}".format(user_num))

+     if sleep:

+         time.sleep(1)

+     time.sleep(1)

+ 

+ 

+ def _delete_user(users, user_num, sleep=False):

+     """Delete user entry"""

+ 

+     user = users.get('test_user_{}'.format(user_num))

+     user.delete()

+     if sleep:

+         time.sleep(1)

+     time.sleep(1)

+ 

+ 

+ def _create_group(groups, num, member, sleep=False):

+     """Creates group entry"""

+ 

+     group_props = {'cn': 'test_group_{}'.format(num),

+                    'member': member}

+     group = groups.create(properties=group_props)

+     if sleep:

+         time.sleep(1)

+     return group

+ 

+ 

+ def _delete_group(groups, num, sleep=False):

+     """Delete group entry"""

+ 

+     group = groups.get('test_group_{}'.format(num))

+     group.delete()

+     if sleep:

+         time.sleep(1)

+ 

+ 

+ def _create_container(inst, dn, name, sleep=False):

+     """Creates container entry"""

+ 

+     conts = nsContainers(inst, dn)

+     cont = conts.create(properties={'cn': name})

+     if sleep:

+         time.sleep(1)

+     return cont

+ 

+ 

+ def _delete_container(cont, sleep=False):

+     """Deletes container entry"""

+ 

+     cont.delete()

+     if sleep:

+         time.sleep(1)

+ 

+ 

+ def _test_base(topology):

+     """Add test container for entries, enable plugin logging,

+     audit log, error log for replica and access log for internal

+     """

+ 

+     M1 = topology.ms["master1"]

+ 

+     conts = nsContainers(M1, SUFFIX)

+     test_base = conts.create(properties={'cn': 'test_container'})

+ 

+     for inst in topology:

+         inst.config.loglevel([ErrorLog.DEFAULT, ErrorLog.REPLICA], service='error')

+         inst.config.loglevel([AccessLog.DEFAULT, AccessLog.INTERNAL], service='access')

+         inst.config.set('nsslapd-plugin-logging', 'on')

+         inst.config.enable_log('audit')

+         inst.restart()

+ 

+     return test_base

+ 

+ 

+ def _delete_test_base(inst, test_base_dn):

+     """Delete test container with entries and entry conflicts"""

+ 

+     ents = inst.search_s(test_base_dn, ldap.SCOPE_SUBTREE, filterstr="(|(objectclass=*)(objectclass=ldapsubentry))")

+ 

+     for ent in sorted(ents, key=lambda e: len(e.dn), reverse=True):

+         log.debug("Delete entry children {}".format(ent.dn))

+         try:

+             inst.delete_ext_s(ent.dn)

+         except ldap.NO_SUCH_OBJECT:  # For the case with objectclass: glue entries

+             pass

+ 

+ 

+ @pytest.fixture

+ def test_base(topology_m2, request):

+     tb = _test_base(topology_m2)

+ 

+     def fin():

+         if not DEBUGGING:

+             _delete_test_base(topology_m2.ms["master1"], tb.dn)

+     request.addfinalizer(fin)

+ 

+     return tb

+ 

+ 

+ @pytest.fixture

+ def test_base_m3(topology_m3, request):

+     tb = _test_base(topology_m3)

+ 

+     def fin():

+         if not DEBUGGING:

+             _delete_test_base(topology_m3.ms["master1"], tb.dn)

+     request.addfinalizer(fin)

+ 

+     return tb

+ 

+ 

+ class TestTwoMasters:

+     def test_add_modrdn(self, topology_m2, test_base):

+         """Check that conflict properly resolved for create - modrdn operations

+ 

+         :id: 77f09b18-03d1-45da-940b-1ad2c2908ebb

+         :setup: Two master replication, test container for entries, enable plugin logging,

+                 audit log, error log for replica and access log for internal

+         :steps:

+             1. Add five users to m1 and wait for replication to happen

+             2. Pause replication

+             3. Create an entry on m1 and m2

+             4. Create an entry on m1 and rename on m2

+             5. Rename an entry on m1 and create on m2

+             6. Rename an entry on m1 and rename on m2

+             7. Rename an entry on m1 and rename on m2. Use different entries

+                but rename them to the same entry

+             8. Resume replication

+             9. Check that the entries on both masters are the same and replication is working

+         :expectedresults:

+             1. It should pass

+             2. It should pass

+             3. It should pass

+             4. It should pass

+             5. It should pass

+             6. It should pass

+             7. It should pass

+             8. It should pass

+         """

+ 

+         M1 = topology_m2.ms["master1"]

+         M2 = topology_m2.ms["master2"]

+         test_users_m1 = UserAccounts(M1, test_base.dn, rdn=None)

+         test_users_m2 = UserAccounts(M2, test_base.dn, rdn=None)

+         repl = ReplicationManager(SUFFIX)

+ 

+         for user_num in range(1000, 1005):

+             _create_user(test_users_m1, user_num)

+ 

+         repl.test_replication(M1, M2)

+         topology_m2.pause_all_replicas()

+ 

+         log.info("Test create - modrdn")

+         user_num += 1

+         _create_user(test_users_m1, user_num, sleep=True)

+         _create_user(test_users_m2, user_num, sleep=True)

+ 

+         user_num += 1

+         _create_user(test_users_m1, user_num, sleep=True)

+         _rename_user(test_users_m2, 1000, user_num, sleep=True)

+ 

+         user_num += 1

+         _rename_user(test_users_m1, 1001, user_num, sleep=True)

+         _create_user(test_users_m2, user_num, sleep=True)

+ 

+         user_num += 1

+         _rename_user(test_users_m1, 1002, user_num, sleep=True)

+         _rename_user(test_users_m2, 1002, user_num, sleep=True)

+ 

+         user_num += 1

+         _rename_user(test_users_m1, 1003, user_num, sleep=True)

+         _rename_user(test_users_m2, 1004, user_num)

+ 

+         topology_m2.resume_all_replicas()

+ 

+         repl.test_replication_topology(topology_m2)

+ 

+         user_dns_m1 = [user.dn for user in test_users_m1.list()]

+         user_dns_m2 = [user.dn for user in test_users_m2.list()]

+         assert set(user_dns_m1) == set(user_dns_m2)

+ 

+     def test_complex_add_modify_modrdn_delete(self, topology_m2, test_base):

+         """Check that conflict properly resolved for complex operations

+         which involve add, modify, modrdn and delete

+ 

+         :id: 77f09b18-03d1-45da-940b-1ad2c2908eb1

+         :setup: Two master replication, test container for entries, enable plugin logging,

+                 audit log, error log for replica and access log for internal

+         :steps:

+             1. Add ten users to m1 and wait for replication to happen

+             2. Pause replication

+             3. Test add-del on m1 and add on m2

+             4. Test add-mod on m1 and add on m2

+             5. Test add-modrdn on m1 and add on m2

+             6. Test multiple add, modrdn

+             7. Test Add-del on both masters

+             8. Test modrdn-modrdn

+             9. Test modrdn-del

+             10. Resume replication

+             11. Check that the entries on both masters are the same and replication is working

+         :expectedresults:

+             1. It should pass

+             2. It should pass

+             3. It should pass

+             4. It should pass

+             5. It should pass

+             6. It should pass

+             7. It should pass

+             8. It should pass

+             9. It should pass

+             10. It should pass

+             11. It should pass

+         """

+ 

+         M1 = topology_m2.ms["master1"]

+         M2 = topology_m2.ms["master2"]

+ 

+         test_users_m1 = UserAccounts(M1, test_base.dn, rdn=None)

+         test_users_m2 = UserAccounts(M2, test_base.dn, rdn=None)

+         repl = ReplicationManager(SUFFIX)

+ 

+         for user_num in range(1100, 1110):

+             _create_user(test_users_m1, user_num)

+ 

+         repl.test_replication(M1, M2)

+         topology_m2.pause_all_replicas()

+ 

+         log.info("Test add-del on M1 and add on M2")

+         user_num += 1

+         _create_user(test_users_m1, user_num)

+         _delete_user(test_users_m1, user_num, sleep=True)

+         _create_user(test_users_m2, user_num, sleep=True)

+ 

+         user_num += 1

+         _create_user(test_users_m1, user_num, sleep=True)

+         _create_user(test_users_m2, user_num, sleep=True)

+         _delete_user(test_users_m1, user_num, sleep=True)

+ 

+         user_num += 1

+         _create_user(test_users_m2, user_num, sleep=True)

+         _create_user(test_users_m1, user_num)

+         _delete_user(test_users_m1, user_num)

+ 

+         log.info("Test add-mod on M1 and add on M2")

+         user_num += 1

+         _create_user(test_users_m1, user_num)

+         _modify_user(test_users_m1, user_num, sleep=True)

+         _create_user(test_users_m2, user_num, sleep=True)

+ 

+         user_num += 1

+         _create_user(test_users_m1, user_num, sleep=True)

+         _create_user(test_users_m2, user_num, sleep=True)

+         _modify_user(test_users_m1, user_num, sleep=True)

+ 

+         user_num += 1

+         _create_user(test_users_m2, user_num, sleep=True)

+         _create_user(test_users_m1, user_num)

+         _modify_user(test_users_m1, user_num)

+ 

+         log.info("Test add-modrdn on M1 and add on M2")

+         user_num += 1

+         _create_user(test_users_m1, user_num)

+         _rename_user(test_users_m1, user_num, user_num+20, sleep=True)

+         _create_user(test_users_m2, user_num, sleep=True)

+ 

+         user_num += 1

+         _create_user(test_users_m1, user_num, sleep=True)

+         _create_user(test_users_m2, user_num, sleep=True)

+         _rename_user(test_users_m1, user_num, user_num+20, sleep=True)

+ 

+         user_num += 1

+         _create_user(test_users_m2, user_num, sleep=True)

+         _create_user(test_users_m1, user_num)

+         _rename_user(test_users_m1, user_num, user_num+20)

+ 

+         log.info("Test multiple add, modrdn")

+         user_num += 1

+         _create_user(test_users_m1, user_num, sleep=True)

+         _create_user(test_users_m2, user_num, sleep=True)

+         _rename_user(test_users_m1, user_num, user_num+20)

+         _create_user(test_users_m1, user_num, sleep=True)

+         _modify_user(test_users_m2, user_num, sleep=True)

+ 

+         log.info("Add - del on both masters")

+         user_num += 1

+         _create_user(test_users_m1, user_num)

+         _delete_user(test_users_m1, user_num, sleep=True)

+         _create_user(test_users_m2, user_num)

+         _delete_user(test_users_m2, user_num, sleep=True)

+ 

+         log.info("Test modrdn - modrdn")

+         user_num += 1

+         _rename_user(test_users_m1, 1109, 1129, sleep=True)

+         _rename_user(test_users_m2, 1109, 1129, sleep=True)

+ 

+         log.info("Test modrdn - del")

+         user_num += 1

+         _rename_user(test_users_m1, 1100, 1120, sleep=True)

+         _delete_user(test_users_m2, 1100)

+ 

+         user_num += 1

+         _delete_user(test_users_m2, 1101, sleep=True)

+         _rename_user(test_users_m1, 1101, 1121)

+ 

+         topology_m2.resume_all_replicas()

+ 

+         repl.test_replication_topology(topology_m2)

+         time.sleep(30)

+ 

+         user_dns_m1 = [user.dn for user in test_users_m1.list()]

+         user_dns_m2 = [user.dn for user in test_users_m2.list()]

+         assert set(user_dns_m1) == set(user_dns_m2)

+ 

+     def test_memberof_groups(self, topology_m2, test_base):

+         """Check that conflict properly resolved for operations

+         with memberOf and groups

+ 

+         :id: 77f09b18-03d1-45da-940b-1ad2c2908eb3

+         :setup: Two master replication, test container for entries, enable plugin logging,

+                 audit log, error log for replica and access log for internal

+         :steps:

+             1. Enable memberOf plugin

+             2. Add 30 users to m1 and wait for replication to happen

+             3. Pause replication

+             4. Create a group on m1 and m2

+             5. Create a group on m1 and m2, delete from m1

+             6. Create a group on m1, delete from m1, and create on m2,

+             7. Create a group on m2 and m1, delete from m1

+             8. Create two different groups on m2

+             9. Resume replication

+             10. Check that the entries on both masters are the same and replication is working

+         :expectedresults:

+             1. It should pass

+             2. It should pass

+             3. It should pass

+             4. It should pass

+             5. It should pass

+             6. It should pass

+             7. It should pass

+             8. It should pass

+             9. It should pass

+             10. It should pass

+         """

+ 

+         M1 = topology_m2.ms["master1"]

+         M2 = topology_m2.ms["master2"]

+         test_users_m1 = UserAccounts(M1, test_base.dn, rdn=None)

+         test_groups_m1 = Groups(M1, test_base.dn, rdn=None)

+         test_groups_m2 = Groups(M2, test_base.dn, rdn=None)

+ 

+         repl = ReplicationManager(SUFFIX)

+ 

+         for inst in topology_m2.ms.values():

+             memberof = MemberOfPlugin(inst)

+             memberof.enable()

+             agmt = Agreements(inst).list()[0]

+             agmt.replace_many(('nsDS5ReplicatedAttributeListTotal',

+                                '(objectclass=*) $ EXCLUDE '),

+                               ('nsDS5ReplicatedAttributeList',

+                                '(objectclass=*) $ EXCLUDE memberOf'))

+             inst.restart()

+         user_dns = []

+         for user_num in range(10):

+             user_trio = []

+             for num in range(0, 30, 10):

+                 user = _create_user(test_users_m1, 1200 + user_num + num)

+                 user_trio.append(user.dn)

+             user_dns.append(user_trio)

+ 

+         repl.test_replication(M1, M2)

+         topology_m2.pause_all_replicas()

+ 

+         log.info("Check a simple conflict")

+         group_num = 0

+         _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True)

+         _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True)

+ 

+         log.info("Check a add - del")

+         group_num += 1

+         _create_group(test_groups_m1, group_num, user_dns[group_num], sleep=True)

+         _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True)

+         _delete_group(test_groups_m1, group_num)

+ 

+         group_num += 1

+         _create_group(test_groups_m1, group_num, user_dns[group_num])

+         _delete_group(test_groups_m1, group_num, sleep=True)

+         _create_group(test_groups_m2, group_num, user_dns[group_num])

+ 

+         group_num += 1

+         _create_group(test_groups_m2, group_num, user_dns[group_num], sleep=True)

+         _create_group(test_groups_m1, group_num, user_dns[group_num])

+         _delete_group(test_groups_m1, group_num, sleep=True)

+ 

+         group_num += 1

+         _create_group(test_groups_m2, group_num, user_dns[group_num])

+         group_num += 1

+         _create_group(test_groups_m2, group_num, user_dns[group_num])

+ 

+         topology_m2.resume_all_replicas()

+ 

+         repl.test_replication_topology(topology_m2)

+ 

+         group_dns_m1 = [group.dn for group in test_groups_m1.list()]

+         group_dns_m2 = [group.dn for group in test_groups_m2.list()]

+         assert set(group_dns_m1) == set(group_dns_m2)

+ 

+     def test_managed_entries(self, topology_m2):

+         """Check that conflict properly resolved for operations

+         with managed entries

+ 

+         :id: 77f09b18-03d1-45da-940b-1ad2c2908eb4

+         :setup: Two master replication, test container for entries, enable plugin logging,

+                 audit log, error log for replica and access log for internal

+         :steps:

+             1. Create ou=managed_users and ou=managed_groups under test container

+             2. Configure managed entries plugin and add a template to test container

+             3. Add a user to m1 and wait for replication to happen

+             4. Pause replication

+             5. Create a user on m1 and m2 with a same group ID on both master

+             6. Create a user on m1 and m2 with a different group ID on both master

+             7. Resume replication

+             8. Check that the entries on both masters are the same and replication is working

+         :expectedresults:

+             1. It should pass

+             2. It should pass

+             3. It should pass

+             4. It should pass

+             5. It should pass

+             6. It should pass

+             7. It should pass

+             8. It should pass

+         """

+ 

+         M1 = topology_m2.ms["master1"]

+         M2 = topology_m2.ms["master2"]

+         repl = ReplicationManager(SUFFIX)

+ 

+         ous = OrganisationalUnits(M1, DEFAULT_SUFFIX)

+         ou_people = ous.create(properties={'ou': 'managed_people'})

+         ou_groups = ous.create(properties={'ou': 'managed_groups'})

+ 

+         test_users_m1 = UserAccounts(M1, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn))

+         test_users_m2 = UserAccounts(M2, DEFAULT_SUFFIX, rdn='ou={}'.format(ou_people.rdn))

+ 

+         # TODO: Refactor ManagedPlugin class  functionality (also add configs and templates)

+         conts = nsContainers(M1, SUFFIX)

+         template = conts.create(properties={

+                                  'objectclass': 'top mepTemplateEntry extensibleObject'.split(),

+                                  'cn': 'MEP Template',

+                                  'mepRDNAttr': 'cn',

+                                  'mepStaticAttr': ['objectclass: posixGroup', 'objectclass: extensibleObject'],

+                                  'mepMappedAttr': ['cn: $uid', 'uid: $cn', 'gidNumber: $uidNumber']

+                                 })

+         repl.test_replication(M1, M2)

+ 

+         for inst in topology_m2.ms.values():

+             conts = nsContainers(inst, "cn={},{}".format(PLUGIN_MANAGED_ENTRY, DN_PLUGIN))

+             conts.create(properties={'objectclass': 'top extensibleObject'.split(),

+                                      'cn': 'config',

+                                      'originScope': ou_people.dn,

+                                      'originFilter': 'objectclass=posixAccount',

+                                      'managedBase': ou_groups.dn,

+                                      'managedTemplate': template.dn})

+             inst.restart()

+ 

+         _create_user(test_users_m1, 1, 1)

+ 

+         topology_m2.pause_all_replicas()

+ 

+         _create_user(test_users_m1, 2, 2, sleep=True)

+         _create_user(test_users_m2, 2, 2, sleep=True)

+ 

+         _create_user(test_users_m1, 3, 3, sleep=True)

+         _create_user(test_users_m2, 3, 33)

+ 

+         topology_m2.resume_all_replicas()

+ 

+         repl.test_replication_topology(topology_m2)

+ 

+         user_dns_m1 = [user.dn for user in test_users_m1.list()]

+         user_dns_m2 = [user.dn for user in test_users_m2.list()]

+         assert set(user_dns_m1) == set(user_dns_m2)

+ 

+     def test_nested_entries_with_children(self, topology_m2, test_base):

+         """Check that conflict properly resolved for operations

+         with nested entries with children

+ 

+         :id: 77f09b18-03d1-45da-940b-1ad2c2908eb5

+         :setup: Two master replication, test container for entries, enable plugin logging,

+                 audit log, error log for replica and access log for internal

+         :steps:

+             1. Add 15 containers to m1 and wait for replication to happen

+             2. Pause replication

+             3. Create parent-child on master2 and master1

+             4. Create parent-child on master1 and master2

+             5. Create parent-child on master1 and master2 different child rdn

+             6. Create parent-child on master1 and delete parent on master2

+             7. Create parent on master1, delete it and parent-child on master2, delete them

+             8. Create parent on master1, delete it and parent-two children on master2

+             9. Create parent-two children on master1 and parent-child on master2, delete them

+             10. Create three subsets inside existing container entry, applying only part of changes on m2

+             11. Create more combinations of the subset with parent-child on m1 and parent on m2

+             12. Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2

+             13. Resume replication

+             14. Check that the entries on both masters are the same and replication is working

+         :expectedresults:

+             1. It should pass

+             2. It should pass

+             3. It should pass

+             4. It should pass

+             5. It should pass

+             6. It should pass

+             7. It should pass

+             8. It should pass

+             9. It should pass

+             10. It should pass

+             11. It should pass

+             12. It should pass

+             13. It should pass

+             14. It should pass

+         """

+ 

+         M1 = topology_m2.ms["master1"]

+         M2 = topology_m2.ms["master2"]

+         repl = ReplicationManager(SUFFIX)

+         test_users_m1 = UserAccounts(M1, test_base.dn, rdn=None)

+         test_users_m2 = UserAccounts(M2, test_base.dn, rdn=None)

+         _create_user(test_users_m1, 4000)

+         _create_user(test_users_m1, 4001)

+ 

+         cont_list = []

+         for num in range(15):

+             cont = _create_container(M1, test_base.dn, 'sub{}'.format(num))

+             cont_list.append(cont)

+ 

+         repl.test_replication(M1, M2)

+ 

+         topology_m2.pause_all_replicas()

+ 

+         log.info("Create parent-child on master2 and master1")

+         _create_container(M2, test_base.dn, 'p0', sleep=True)

+         cont_p = _create_container(M1, test_base.dn, 'p0', sleep=True)

+         _create_container(M1, cont_p.dn, 'c0', sleep=True)

+         _create_container(M2, cont_p.dn, 'c0', sleep=True)

+ 

+         log.info("Create parent-child on master1 and master2")

+         cont_p = _create_container(M1, test_base.dn, 'p1', sleep=True)

+         _create_container(M2, test_base.dn, 'p1', sleep=True)

+         _create_container(M1, cont_p.dn, 'c1', sleep=True)

+         _create_container(M2, cont_p.dn, 'c1', sleep=True)

+ 

+         log.info("Create parent-child on master1 and master2 different child rdn")

+         cont_p = _create_container(M1, test_base.dn, 'p2', sleep=True)

+         _create_container(M2, test_base.dn, 'p2', sleep=True)

+         _create_container(M1, cont_p.dn, 'c2', sleep=True)

+         _create_container(M2, cont_p.dn, 'c3', sleep=True)

+ 

+         log.info("Create parent-child on master1 and delete parent on master2")

+         cont_num = 0

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True)

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True)

+         _create_container(M1, cont_p_m1.dn, 'c0', sleep=True)

+         _delete_container(cont_p_m2)

+ 

+         cont_num += 1

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True)

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')

+         _create_container(M1, cont_p_m1.dn, 'c0', sleep=True)

+         _delete_container(cont_p_m2, sleep=True)

+ 

+         log.info("Create parent on master1, delete it and parent-child on master2, delete them")

+         cont_num += 1

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')

+         _delete_container(cont_p_m1, sleep=True)

+ 

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')

+         cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')

+         _delete_container(cont_c_m2)

+         _delete_container(cont_p_m2)

+ 

+         cont_num += 1

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')

+         cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')

+         _delete_container(cont_c_m2)

+         _delete_container(cont_p_m2, sleep=True)

+ 

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')

+         _delete_container(cont_p_m1)

+ 

+         log.info("Create parent on master1, delete it and parent-two children on master2")

+         cont_num += 1

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')

+         _delete_container(cont_p_m1, sleep=True)

+ 

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')

+         _create_container(M2, cont_p_m2.dn, 'c0')

+         _create_container(M2, cont_p_m2.dn, 'c1')

+ 

+         cont_num += 1

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')

+         _create_container(M2, cont_p_m2.dn, 'c0')

+         _create_container(M2, cont_p_m2.dn, 'c1', sleep=True)

+ 

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')

+         _delete_container(cont_p_m1, sleep=True)

+ 

+         log.info("Create parent-two children on master1 and parent-child on master2, delete them")

+         cont_num += 1

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')

+         cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')

+         _delete_container(cont_c_m2)

+         _delete_container(cont_p_m2, sleep=True)

+ 

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')

+         _create_container(M1, cont_p_m1.dn, 'c0')

+         _create_container(M1, cont_p_m1.dn, 'c1')

+ 

+         cont_num += 1

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')

+         _create_container(M1, cont_p_m1.dn, 'c0')

+         _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)

+ 

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')

+         cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')

+         _delete_container(cont_c_m2)

+         _delete_container(cont_p_m2, sleep=True)

+ 

+         log.info("Create three subsets inside existing container entry, applying only part of changes on m2")

+         cont_num += 1

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')

+         _create_container(M1, cont_p_m1.dn, 'c0')

+         _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)

+         _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True)

+ 

+         cont_num += 1

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')

+         _create_container(M1, cont_p_m1.dn, 'c0')

+         _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')

+         _create_container(M2, cont_p_m2.dn, 'c0', sleep=True)

+ 

+         cont_num += 1

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0')

+         _create_container(M1, cont_p_m1.dn, 'c0')

+         _create_container(M1, cont_p_m1.dn, 'c1', sleep=True)

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0')

+         cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')

+         _delete_container(cont_c_m2, sleep=True)

+ 

+         log.info("Create more combinations of the subset with parent-child on m1 and parent on m2")

+         cont_num += 1

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True)

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True)

+         _delete_container(cont_p_m1, sleep=True)

+         cont_c_m2 = _create_container(M2, cont_p_m2.dn, 'c0')

+         _delete_container(cont_c_m2)

+         _delete_container(cont_p_m2, sleep=True)

+ 

+         cont_num += 1

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True)

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True)

+         _delete_container(cont_p_m1, sleep=True)

+         _create_container(M2, cont_p_m2.dn, 'c0', sleep=True)

+ 

+         cont_num += 1

+         cont_p_m1 = _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True)

+         cont_p_m2 = _create_container(M2, cont_list[cont_num].dn, 'p0', sleep=True)

+         cont_c_m1 = _create_container(M1, cont_p_m1.dn, 'c0', sleep=True)

+         _create_container(M2, cont_p_m2.dn, 'c0', sleep=True)

+         _delete_container(cont_c_m1, sleep=True)

+         _create_container(M2, cont_p_m2.dn, 'c1', sleep=True)

+         _delete_container(cont_p_m1, sleep=True)

+ 

+         log.info("Delete container on m1, modify user1 on m1, create parent on m2 and modify user2 on m2")

+         cont_num += 1

+         _delete_container(cont_list[cont_num])

+         _modify_user(test_users_m1, 4000, sleep=True)

+         _create_container(M2, cont_list[cont_num].dn, 'p0')

+         _modify_user(test_users_m2, 4001)

+ 

+         topology_m2.resume_all_replicas()

+ 

+         repl.test_replication_topology(topology_m2, timeout=60)

+ 

+         conts_dns = {}

+         for num in range(1, 3):

+             inst = topology_m2.ms["master{}".format(num)]

+             conts_dns[inst.serverid] = []

+             conts = nsContainers(inst, test_base.dn)

+             for cont in conts.list():

+                 conts_p = nsContainers(inst, cont.dn)

+                 for cont_p in conts_p.list():

+                     conts_c = nsContainers(inst, cont_p.dn)

+                     conts_dns[inst.serverid].extend([cont_c.dn for cont_c in conts_c.list()])

+                 conts_dns[inst.serverid].extend([cont_p.dn for cont_p in conts_p.list()])

+             conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()])

+ 

+         assert set(conts_dns[M1.serverid]) == set(conts_dns[M2.serverid])

+ 

+         user_dns_m1 = [user.dn for user in test_users_m1.list()]

+         user_dns_m2 = [user.dn for user in test_users_m2.list()]

+         assert set(user_dns_m1) == set(user_dns_m2)

+ 

+ 

+ class TestThreeMasters:

+     def test_nested_entries(self, topology_m3, test_base_m3):

+         """Check that conflict properly resolved for operations

+         with nested entries with children

+ 

+         :id: 77f09b18-03d1-45da-940b-1ad2c2908eb6

+         :setup: Three master replication, test container for entries, enable plugin logging,

+                 audit log, error log for replica and access log for internal

+         :steps:

+             1. Add 15 containers to m1 and wait for replication to happen

+             2. Pause replication

+             3. Create two child entries under each of two entries

+             4. Create three child entries under each of three entries

+             5. Create two parents on m1 and m2, then on m1 - create a child and delete one parent,

+                on m2 - delete one parent and create a child

+             6. Test a few more parent-child combinations with three instances

+             7. Resume replication

+             8. Check that the entries on both masters are the same and replication is working

+         :expectedresults:

+             1. It should pass

+             2. It should pass

+             3. It should pass

+             4. It should pass

+             5. It should pass

+             6. It should pass

+             7. It should pass

+             8. It should pass

+         """

+ 

+         M1 = topology_m3.ms["master1"]

+         M2 = topology_m3.ms["master2"]

+         M3 = topology_m3.ms["master3"]

+         repl = ReplicationManager(SUFFIX)

+ 

+         cont_list = []

+         for num in range(11):

+             cont = _create_container(M1, test_base_m3.dn, 'sub{}'.format(num))

+             cont_list.append(cont)

+ 

+         repl.test_replication(M1, M2)

+         repl.test_replication(M1, M3)

+ 

+         topology_m3.pause_all_replicas()

+ 

+         log.info("Create two child entries under each of two entries")

+         cont_num = -1

+         for num in range(2):

+             cont_num += 1

+             _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True)

+             _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True)

+ 

+         log.info("Create three child entries under each of three entries")

+         for num in range(3):

+             cont_num += 1

+             _create_container(M1, cont_list[cont_num].dn, 'p0', sleep=True)

+             _create_container(M2, cont_list[cont_num].dn, 'p1', sleep=True)

+             _create_container(M3, cont_list[cont_num].dn, 'p2', sleep=True)

+ 

+         log.info("Create two parents on m1 and m2, then on m1 - create a child and delete one parent,"

+                  "on m2 - delete one parent and create a child")

+         for inst1, inst2 in ((M1, M2), (M2, M1)):

+             cont_num += 1

+             cont_p_m1_1 = _create_container(inst1, cont_list[cont_num].dn, 'p0')

+             cont_p_m1_2 = _create_container(inst1, cont_list[cont_num].dn, 'p1', sleep=True)

+             cont_p_m2_1 = _create_container(inst2, cont_list[cont_num].dn, 'p0')

+             cont_p_m2_2 = _create_container(inst2, cont_list[cont_num].dn, 'p1', sleep=True)

+             _create_container(inst1, cont_p_m1_1.dn, 'c0', sleep=True)

+             _delete_container(cont_p_m2_1, sleep=True)

+             _delete_container(cont_p_m1_2, sleep=True)

+             _create_container(inst2, cont_p_m2_2.dn, 'c0', sleep=True)

+ 

+         log.info("Test a few more parent-child combinations on three instances")

+         for inst1, inst2, inst3 in ((M1, M2, M3), (M2, M1, M3), (M3, M1, M2)):

+             cont_num += 1

+             cont_p_m1 = _create_container(inst1, cont_list[cont_num].dn, 'p0')

+             _delete_container(cont_p_m1, sleep=True)

+ 

+             cont_p_m2 = _create_container(inst2, cont_list[cont_num].dn, 'p0')

+             cont_c_m2 = _create_container(inst2, cont_p_m2.dn, 'c0')

+             _delete_container(cont_c_m2)

+             _delete_container(cont_p_m2, sleep=True)

+ 

+             cont_p_m3 = _create_container(inst3, cont_list[cont_num].dn, 'p0')

+             _create_container(inst3, cont_p_m3.dn, 'c0')

+             _create_container(inst3, cont_p_m3.dn, 'c1', sleep=True)

+ 

+         topology_m3.resume_all_replicas()

+ 

+         repl.test_replication_topology(topology_m3)

+ 

+         conts_dns = {}

+         for num in range(1, 4):

+             inst = topology_m3.ms["master{}".format(num)]

+             conts_dns[inst.serverid] = []

+             conts = nsContainers(inst, test_base_m3.dn)

+             for cont in conts.list():

+                 conts_p = nsContainers(inst, cont.dn)

+                 for cont_p in conts_p.list():

+                     conts_c = nsContainers(inst, cont_p.dn)

+                     conts_dns[inst.serverid].extend([cont_c.dn for cont_c in conts_c.list()])

+                 conts_dns[inst.serverid].extend([cont_p.dn for cont_p in conts_p.list()])

+             conts_dns[inst.serverid].extend([cont.dn for cont in conts.list()])

+ 

+         for conts1, conts2 in permutations(conts_dns.values(), 2):

+             assert set(conts1) == set(conts2)

+ 

+ 

+ if __name__ == '__main__':

+     # Run isolated

+     # -s for DEBUG mode

+     CURRENT_FILE = os.path.realpath(__file__)

+     pytest.main("-s %s" % CURRENT_FILE)

+ 

+ 

@@ -0,0 +1,53 @@ 

+ # --- BEGIN COPYRIGHT BLOCK ---

+ # Copyright (C) 2018 Red Hat, Inc.

+ # All rights reserved.

+ #

+ # License: GPL (version 3 or any later version).

+ # See LICENSE for details.

+ # --- END COPYRIGHT BLOCK ---

+ #

+ import os

+ import logging

+ import pytest

+ from lib389.topologies import create_topology

+ from lib389._constants import ReplicaRole

+ 

+ DEBUGGING = os.getenv('DEBUGGING', default=False)

+ if DEBUGGING:

+     logging.getLogger(__name__).setLevel(logging.DEBUG)

+ else:

+     logging.getLogger(__name__).setLevel(logging.INFO)

+ log = logging.getLogger(__name__)

+ 

+ 

+ # Redefine some fixtures so we can use them with class scope

+ @pytest.fixture(scope="class")

+ def topology_m2(request):

+     """Create Replication Deployment with two masters"""

+ 

+     topology = create_topology({ReplicaRole.MASTER: 2})

+ 

+     def fin():

+         if DEBUGGING:

+             [inst.stop() for inst in topology]

+         else:

+             [inst.delete() for inst in topology]

+     request.addfinalizer(fin)

+ 

+     return topology

+ 

+ 

+ @pytest.fixture(scope="class")

+ def topology_m3(request):

+     """Create Replication Deployment with three masters"""

+ 

+     topology = create_topology({ReplicaRole.MASTER: 3})

+ 

+     def fin():

+         if DEBUGGING:

+             [inst.stop() for inst in topology]

+         else:

+             [inst.delete() for inst in topology]

+     request.addfinalizer(fin)

+ 

+     return topology

@@ -186,7 +186,7 @@ 

      m2.deleteErrorLogs()

  

      log.info('Set replication loglevel')

-     m2.setLogLevel(LOG_REPLICA)

+     m2.config.loglevel((ErrorLog.REPLICA,))

  

      log.info('Modifying entry {} - change userpassword on master 1'.format(test_entry.dn))

  
@@ -208,7 +208,7 @@ 

          assert not m2.ds_error_log.match('.*can.t add a change for {}.*'.format(test_entry.dn))

      finally:

          log.info('Set the default loglevel')

-         m2.setLogLevel(LOG_DEFAULT)

+         m2.config.loglevel((ErrorLog.DEFAULT,))

  

  

  def test_invalid_agmt(topo_m2):

@@ -13,7 +13,7 @@ 

  from lib389.utils import *

  from lib389.topologies import topology_m2

  

- from lib389._constants import SUFFIX, DEFAULT_SUFFIX, LOG_REPLICA

+ from lib389._constants import SUFFIX, DEFAULT_SUFFIX, ErrorLog

  

  from lib389.agreement import Agreements

  from lib389.idm.organisationalunit import OrganisationalUnits
@@ -164,8 +164,8 @@ 

      master2 = topology_m2.ms["master2"]

  

      log.info("Set Replication Debugging loglevel for the errorlog")

-     master1.setLogLevel(LOG_REPLICA)

-     master2.setLogLevel(LOG_REPLICA)

+     master1.config.loglevel((ErrorLog.REPLICA,))

+     master2.config.loglevel((ErrorLog.REPLICA,))

  

      sync_dict = Counter()

      min_ap = waitfor_async_attr[1][0]

@@ -3339,10 +3339,9 @@ 

              raise ValueError(status)

          return status

  

-     # This could be made to delete by filter ....

-     def delete_branch_s(self, basedn, scope):

-         ents = self.search_s(basedn, scope)

+     def delete_branch_s(self, basedn, scope, filterstr="(objectclass=*)", serverctrls=None, clientctrls=None):

+         ents = self.search_s(basedn, scope, filterstr)

  

          for ent in sorted(ents, key=lambda e: len(e.dn), reverse=True):

              self.log.debug("Delete entry children %s" % (ent.dn))

-             self.delete_s(ent.dn)

+             self.delete_ext_s(ent.dn, serverctrls=serverctrls, clientctrls=clientctrls)

@@ -7,7 +7,7 @@ 

  # --- END COPYRIGHT BLOCK ---

  

  import os

- from enum import Enum

+ from enum import Enum, IntEnum

  from lib389.properties import *

  

  (
@@ -215,6 +215,7 @@ 

  # _Server/10/html/Administration_Guide/Configuring_Logs.html

  # The default log level is 16384

  #

+ # It is legacy constants. Please, use IntEnum version (ErrorLog and AccessLog)

  (LOG_TRACE,

   LOG_TRACE_PACKETS,

   LOG_TRACE_HEAVY,
@@ -233,6 +234,32 @@ 

   LOG_ACL_SUMMARY) = [1 << x for x in (list(range(8)) + list(range(11, 19)))]

  

  

+ class ErrorLog(IntEnum):

+     (TRACE,

+      TRACE_PACKETS,

+      TRACE_HEAVY,

+      CONNECT,

+      PACKET,

+      SEARCH_FILTER,

+      CONFIG_PARSER,

+      ACL,

+      ENTRY_PARSER,

+      HOUSEKEEPING,

+      REPLICA,

+      DEFAULT,

+      CACHE,

+      PLUGIN,

+      MICROSECONDS,

+      ACL_SUMMARY) = [1 << x for x in (list(range(8)) + list(range(11, 19)))]

+ 

+ 

+ class AccessLog(IntEnum):

+     NONE = 0

+     INTERNAL = 4

+     DEFAULT = 256  # Default log level

+     ENTRY = 512

+     MICROSECONDS = 131072

+ 

  #

  # Constants for individual tests

  #

file modified
+9 -5
@@ -89,15 +89,19 @@ 

          """

          self._alter_log_enabled(service, 'off')

  

-     def loglevel(self, vals=(LOG_DEFAULT,), service='error', update=False):

+     def loglevel(self, vals=(ErrorLog.DEFAULT,), service='error', update=False):

          """Set the access or error log level.

-         @param vals - a list of log level codes (eg. lib389.LOG_*)

+ 

+         :param vals: a list of log level codes (eg. lib389.ErrorLogLevelLOG_*)

                        defaults to LOG_DEFAULT

-         @param service - 'access' or 'error'. There is no 'audit' log level.

+         :type vals: list

+         :param service: 'access' or 'error'. There is no 'audit' log level.

                           use enable_log or disable_log.

-         @param update  - False for replace (default), True for update

+         :type service: str

+         :param update: False for replace (default), True for update

+         :type update: bool

  

-         ex. loglevel([lib389.LOG_DEFAULT, lib389.LOG_ENTRY_PARSER])

+         ex. loglevel([lib389.ErrorLogLevel.DEFAULT, lib389.ErrorLogLevel.ENTRY_PARSER])

          """

          if service not in ('access', 'error'):

              self._log.error('Attempted to set level on invalid log service "%s"' % service)

@@ -183,3 +183,25 @@ 

          else:

              self._basedn = '{},{}'.format(rdn, basedn)

  

+     def create_test_user(self, uid=1000, gid=2000):

+         """Create a test user with uid=test_user_UID rdn

+ 

+         :param uid: User id

+         :type uid: int

+         :param gid: Group id

+         :type gid: int

+ 

+         :returns: DSLdapObject of the created entry

+         """

+ 

+         rdn_value = "test_user_{}".format(uid)

+         rdn = "uid={}".format(rdn_value)

+         properties = {

+             'uid': rdn_value,

+             'cn': rdn_value,

+             'sn': rdn_value,

+             'uidNumber': str(uid),

+             'gidNumber': str(gid),

+             'homeDirectory': '/home/{}'.format(rdn_value)

+         }

+         return super(UserAccounts, self).create(rdn, properties)

file modified
+18 -18
@@ -290,9 +290,9 @@ 

  

      def fin():

          if DEBUGGING:

-             map(lambda inst: inst.stop(), topology.all_insts.values())

+             [inst.stop() for inst in topology]

          else:

-             map(lambda inst: inst.delete(), topology.all_insts.values())

+             [inst.delete() for inst in topology]

      request.addfinalizer(fin)

  

      return topology
@@ -306,9 +306,9 @@ 

  

      def fin():

          if DEBUGGING:

-             map(lambda inst: inst.stop(), topology.all_insts.values())

+             [inst.stop() for inst in topology]

          else:

-             map(lambda inst: inst.delete(), topology.all_insts.values())

+             [inst.delete() for inst in topology]

      request.addfinalizer(fin)

  

      return topology
@@ -321,9 +321,9 @@ 

  

      def fin():

          if DEBUGGING:

-             map(lambda inst: inst.stop(), topology.all_insts.values())

+             [inst.stop() for inst in topology]

          else:

-             map(lambda inst: inst.delete(), topology.all_insts.values())

+             [inst.delete() for inst in topology]

      request.addfinalizer(fin)

  

      return topology
@@ -337,9 +337,9 @@ 

  

      def fin():

          if DEBUGGING:

-             map(lambda inst: inst.stop(), topology.all_insts.values())

+             [inst.stop() for inst in topology]

          else:

-             map(lambda inst: inst.delete(), topology.all_insts.values())

+             [inst.delete() for inst in topology]

      request.addfinalizer(fin)

  

      return topology
@@ -353,9 +353,9 @@ 

  

      def fin():

          if DEBUGGING:

-             map(lambda inst: inst.stop(), topology.all_insts.values())

+             [inst.stop() for inst in topology]

          else:

-             map(lambda inst: inst.delete(), topology.all_insts.values())

+             [inst.delete() for inst in topology]

      request.addfinalizer(fin)

  

      return topology
@@ -369,9 +369,9 @@ 

  

      def fin():

          if DEBUGGING:

-             map(lambda inst: inst.stop(), topology.all_insts.values())

+             [inst.stop() for inst in topology]

          else:

-             map(lambda inst: inst.delete(), topology.all_insts.values())

+             [inst.delete() for inst in topology]

      request.addfinalizer(fin)

  

      return topology
@@ -385,9 +385,9 @@ 

  

      def fin():

          if DEBUGGING:

-             map(lambda inst: inst.stop(), topology.all_insts.values())

+             [inst.stop() for inst in topology]

          else:

-             map(lambda inst: inst.delete(), topology.all_insts.values())

+             [inst.delete() for inst in topology]

      request.addfinalizer(fin)

  

      return topology
@@ -402,9 +402,9 @@ 

  

      def fin():

          if DEBUGGING:

-             map(lambda inst: inst.stop(), topology.all_insts.values())

+             [inst.stop() for inst in topology]

          else:

-             map(lambda inst: inst.delete(), topology.all_insts.values())

+             [inst.delete() for inst in topology]

      request.addfinalizer(fin)

  

      return topology
@@ -480,9 +480,9 @@ 

  

      def fin():

          if DEBUGGING:

-             map(lambda inst: inst.stop(), instances)

+             [inst.stop() for inst in instances]

          else:

-             map(lambda inst: inst.delete(), instances)

+             [inst.delete() for inst in instances]

      request.addfinalizer(fin)

  

      return TopologyMain(masters={"master1": master}, hubs={"hub1": hub}, consumers={"consumer1": consumer})

Description: Add a test suite which checks replication conflict resolution
for basic operations like add, delete, modrdn, modify, operations on groups
with memberOf plugin enabled, managed entries operations and nested entries.
idm/user.py - add create_test_user method which allows default user creation
with given uid and guid.
_mapped_object.py - add delete_tree() method for DSLdaObject class
_constants.py - add access log level constants AccessLog(IntEnum) and ErrorLog(IntEnum)

https://pagure.io/389-ds-base/issue/49043

Reviewed by: ?

rebased onto bd76202e87063a5e51e97460ae6a5fb8ad858f7e

5 years ago

Add an assertion that user_num != new_num so that it's clear that a change has to occur?

Given this logic and the end you have a resume + test_replica, you don't need any sleeps on create/rename user.

you need the sleeps to ensure that in each run really the same csn order is generated for the operations. In csn comparison you compare the time stamp and sequence number and if they are identical you compare the replicaid.

So if you run

op1(m2)
op2(m1)
you expect that op1 gets the smaller csn, but in fact if both ops are done in the same second op2 might have the smaller csn.

For a general replication test this wouldn't matter, just ensure that replication is working. But for conflict tests the test scenario are designed to execute a specific order and we need to eliminate random order.

Can this be a python enum instead perhaps?

Really not sure how I feel about this method. It's a bit of a dangerous one, and has really limited use generally as an API. As well you only use it during test finalisation. A better idea could be to limit this to specifically the container type (to enable it to be deleted cleanly), to mark it private (_delete_tree). I'll need to think about it a bit to really know to comment on it properly.

you need the sleeps to ensure that in each run really the same csn order is generated for the operations. In csn comparison you compare the time stamp and sequence number and if they are identical you compare the replicaid.

So if you run
op1(m2)
op2(m1)
you expect that op1 gets the smaller csn, but in fact if both ops are done in the same second op2 might have the smaller csn.

For a general replication test this wouldn't matter, just ensure that replication is working. But for conflict tests the test scenario are designed to execute a specific order and we need to eliminate random order.

rebased onto 762d56e37b97617aa2ad1b4e0fa1f2a059444824

5 years ago

Thank you for the review!
I've added the check to _rename_user function and I've added enum log levels.

Regarding the sleeps, Ludwig has explained his point. We need it for this particular test case.

And about delete_tree method. I agree that it is a dangerous method and we need to think about it carefully.
My arguments for keeping it:
- We need it in API not only for containers but also for organizational units, domains, and organizations.
- It still can be useful for the end user (for instance, ldapdelete has the '-r' functionality).

I think we can add additional warnings for the CLI usage.
And for API, we can still use it for DSLdapObject when we need it.

rebased onto 89980d2260df2ce592ca94649188a7974ecb95f7

5 years ago

rebased onto e8e372a121a5a1e463b89df2cc18d0db72cb2049

5 years ago

Add a filterstr option for delete_branch_s. The 'delete_tree' uses "(|(objectclass=*)(objectclass=ldapsubentry))" now.

rebased onto c42e675fc6f4c754398132a216cfa5686782920d

5 years ago

rebased onto 8f8bee92095a7528d77a0df4bfb5fd25424c74aa

5 years ago

rebased onto 716486ece7feab8c8051f0f8a85d64a19b2d8069

5 years ago

I've fixed issues mentioned by Ludwig: https://pagure.io/389-ds-base/issue/49551#comment-493717

And @firstyear , I removed delete_tree() method. I've moved the functionality to the test suite because it makes more sense to have it there.

rebased onto de4fd1f2662dace4bab9b100883f67a8fab23a4f

5 years ago

Besides the time.sleep(1) you have in here, I'm happy with this. What purpose does the time.sleep here serve? You either need to wait for replication, or you don't need it :)

I tried to explain the need for sleeps ih a comment 16 days ago. It is to ensure the same order of of csns in each run. Since replication is disabled in most of the actions we cannot wait for replication

but, this is needed only when switching from one master to another, not for consecutive changes on the same master. (in my original test had a sleep argument, which was false by default)

rebased onto 3b947b75b63eb9941a4a0a2891aa1251b819f754

5 years ago

I've put the sleeps as you said, @lkrispen

By the way, in the test suite for two masters, you have sleep=True by default...
https://pagure.io/389-ds-base/issue/raw/00571921ea43decbc7d643449783871aefe323fd3c314427161fb228dc22a463-ticket49043_1_test.py

rebased onto 14e413a

5 years ago

Pull-Request has been merged by spichugi

5 years ago

389-ds-base is moving from Pagure to Github. This means that new issues and pull requests
will be accepted only in 389-ds-base's github repository.

This pull request has been cloned to Github as issue and is available here:
- https://github.com/389ds/389-ds-base/issues/2617

If you want to continue to work on the PR, please navigate to the github issue,
download the patch from the attachments and file a new pull request.

Thank you for understanding. We apologize for all inconvenience.

Pull-Request has been closed by spichugi

3 years ago