#49626 Issue 49581 - Fix dynamic plugins test suite
Merged 5 months ago by spichugi. Opened 7 months ago by spichugi.
spichugi/389-ds-base dynamic_plugins  into  master

@@ -16,78 +16,83 @@ 

  import ldap.sasl

  import pytest

  from lib389.tasks import *

- from lib389 import DirSrv

- from lib389.properties import *

- import plugin_tests

- import stress_tests

- from lib389.topologies import topology_st

- from lib389._constants import (DN_CONFIG, DEFAULT_SUFFIX, DN_LDBM, defaultProperties,

-                                PLUGIN_MEMBER_OF, PLUGIN_LINKED_ATTRS, PLUGIN_REFER_INTEGRITY,

-                                ReplicaRole, REPLICATION_BIND_DN, REPLICATION_BIND_PW,

-                                REPLICATION_BIND_METHOD, REPLICATION_TRANSPORT,

-                                LOCALHOST, REPLICA_RUV_FILTER, args_instance,

-                                RA_NAME, RA_BINDDN, RA_BINDPW, RA_METHOD, RA_TRANSPORT_PROT)

+ from lib389.replica import ReplicationManager

+ from lib389.config import LDBMConfig

+ from lib389._constants import *

+ from lib389.topologies import topology_m2

+ from ..plugins import acceptance_test

+ from . import stress_tests

  

  log = logging.getLogger(__name__)

  

  

- def repl_fail(replica):

-     """Remove replica instance, and assert failure"""

+ def check_replicas(topology_m2):

+     """Check that replication is in sync and working"""

  

-     replica.delete()

-     assert False

+     m1 = topology_m2.ms["master1"]

+     m2 = topology_m2.ms["master2"]

  

+     log.info('Checking if replication is in sync...')

+     repl = ReplicationManager(DEFAULT_SUFFIX)

+     repl.test_replication_topology(topology_m2)

+     #

+     # Verify the databases are identical. There should not be any "user, entry, employee" entries

+     #

+     log.info('Checking if the data is the same between the replicas...')

  

- def test_dynamic_plugins(topology_st):

-     """

-         Test Dynamic Plugins - exercise each plugin and its main features, while

-         changing the configuration without restarting the server.

- 

-         Need to test: functionality, stability, and stress.  These tests need to run

-                       with replication disabled, and with replication setup with a

-                       second instance.  Then test if replication is working, and we have

-                       same entries on each side.

- 

-         Functionality - Make sure that as configuration changes are made they take

-                         effect immediately.  Cross plugin interaction (e.g. automember/memberOf)

-                         needs to tested, as well as plugin tasks.  Need to test plugin

-                         config validation(dependencies, etc).

- 

-         Memory Corruption - Restart the plugins many times, and in different orders and test

-                             functionality, and stability.  This will excerise the internal

-                             plugin linked lists, dse callbacks, and task handlers.

- 

-         Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)

-                  Restart various plugins while these operations are going on.  Perform this test

-                  5 times(stress_max_run).

+     # Check the master

+     try:

+         entries = m1.search_s(DEFAULT_SUFFIX,

+                               ldap.SCOPE_SUBTREE,

+                               "(|(uid=person*)(uid=entry*)(uid=employee*))")

+         if len(entries) > 0:

+             log.error('Master database has incorrect data set!\n')

+             assert False

+     except ldap.LDAPError as e:

+         log.fatal('Unable to search db on master: ' + e.message['desc'])

+         assert False

+ 

+     # Check the consumer

+     try:

+         entries = m2.search_s(DEFAULT_SUFFIX,

+                               ldap.SCOPE_SUBTREE,

+                               "(|(uid=person*)(uid=entry*)(uid=employee*))")

+         if len(entries) > 0:

+             log.error('Consumer database in not consistent with master database')

+             assert False

+     except ldap.LDAPError as e:

+         log.fatal('Unable to search db on consumer: ' + e.message['desc'])

+         assert False

+ 

+     log.info('Data is consistent across the replicas.\n')

+ 

+ 

+ def test_acceptance(topology_m2):

+     """Exercise each plugin and its main features, while

+     changing the configuration without restarting the server.

+ 

+     Make sure that as configuration changes are made they take

+     effect immediately.  Cross plugin interaction (e.g. automember/memberOf)

+     needs to tested, as well as plugin tasks.  Need to test plugin

+     config validation(dependencies, etc).

      """

  

-     REPLICA_PORT = 33334

-     RUV_FILTER = REPLICA_RUV_FILTER

-     master_maxcsn = 0

-     replica_maxcsn = 0

+     m1 = topology_m2.ms["master1"]

      msg = ' (no replication)'

      replication_run = False

-     stress_max_runs = 5

+ 

+     # First part of the test should be without replication

+     topology_m2.pause_all_replicas()

  

      # First enable dynamic plugins

-     try:

-         topology_st.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-dynamic-plugins', 'on')])

-     except ldap.LDAPError as e:

-         log.fatal('Failed to enable dynamic plugin!' + e.message['desc'])

-         assert False

+     m1.config.replace('nsslapd-dynamic-plugins', 'on')

  

      # Test that critical plugins can be updated even though the change might not be applied

-     try:

-         topology_st.standalone.modify_s(DN_LDBM, [(ldap.MOD_REPLACE, 'description', 'test')])

-     except ldap.LDAPError as e:

-         log.fatal('Failed to apply change to critical plugin' + e.message['desc'])

-         assert False

+     ldbm_config = LDBMConfig(m1)

+     ldbm_config.replace('description', 'test')

  

-     while 1:

-         #

+     while True:

          # First run the tests with replication disabled, then rerun them with replication set up

-         #

  

          ############################################################################

          #  Test plugin functionality

@@ -97,12 +102,53 @@ 

          log.info('Testing Dynamic Plugins Functionality' + msg + '...')

          log.info('####################################################################\n')

  

-         plugin_tests.test_all_plugins(topology_st.standalone)

+         acceptance_test.check_all_plugins(topology_m2)

  

          log.info('####################################################################')

          log.info('Successfully Tested Dynamic Plugins Functionality' + msg + '.')

          log.info('####################################################################\n')

  

+         if replication_run:

+             # We're done.

+             break

+         else:

+             log.info('Resume replication and run everything one more time')

+             topology_m2.resume_all_replicas()

+ 

+             replication_run = True

+             msg = ' (replication enabled)'

+             time.sleep(1)

+ 

+     ############################################################################

+     # Check replication, and data are in sync

+     ############################################################################

+     check_replicas(topology_m2)

+ 

+ 

+ def test_memory_corruption(topology_m2):

+     """Memory Corruption - Restart the plugins many times, and in different orders and test

+     functionality, and stability.  This will excerise the internal

+     plugin linked lists, dse callbacks, and task handlers.

+     """

+ 

+ 

+     m1 = topology_m2.ms["master1"]

+     msg = ' (no replication)'

+     replication_run = False

+ 

+     # First part of the test should be without replication

+     topology_m2.pause_all_replicas()

+ 

+     # First enable dynamic plugins

+     m1.config.replace('nsslapd-dynamic-plugins', 'on')

+ 

+     # Test that critical plugins can be updated even though the change might not be applied

+     ldbm_config = LDBMConfig(m1)

+     ldbm_config.replace('description', 'test')

+ 

+     while True:

+         # First run the tests with replication disabled, then rerun them with replication set up

+ 

          ############################################################################

          # Test the stability by exercising the internal lists, callabcks, and task handlers

          ############################################################################

@@ -113,24 +159,24 @@ 

          prev_plugin_test = None

          prev_prev_plugin_test = None

  

-         for plugin_test in plugin_tests.func_tests:

+         for plugin_test in acceptance_test.func_tests:

              #

              # Restart the plugin several times (and prev plugins) - work that linked list

              #

-             plugin_test(topology_st.standalone, "restart")

+             plugin_test(topology_m2, "restart")

  

              if prev_prev_plugin_test:

-                 prev_prev_plugin_test(topology_st.standalone, "restart")

+                 prev_prev_plugin_test(topology_m2, "restart")

  

-             plugin_test(topology_st.standalone, "restart")

+             plugin_test(topology_m2, "restart")

  

              if prev_plugin_test:

-                 prev_plugin_test(topology_st.standalone, "restart")

+                 prev_plugin_test(topology_m2, "restart")

  

-             plugin_test(topology_st.standalone, "restart")

+             plugin_test(topology_m2, "restart")

  

              # Now run the functional test

-             plugin_test(topology_st.standalone)

+             plugin_test(topology_m2, "dynamic")

  

              # Set the previous tests

              if prev_plugin_test:

@@ -141,17 +187,58 @@ 

          log.info('Successfully Tested Dynamic Plugins for Memory Corruption' + msg + '.')

          log.info('####################################################################\n')

  

-         ############################################################################

-         # Stress two plugins while restarting it, and while restarting other plugins.

-         # The goal is to not crash, and have the plugins work after stressing them.

-         ############################################################################

+         if replication_run:

+             # We're done.

+             break

+         else:

+             log.info('Resume replication and run everything one more time')

+             topology_m2.resume_all_replicas()

+ 

+             replication_run = True

+             msg = ' (replication enabled)'

+             time.sleep(1)

+ 

+     ############################################################################

+     # Check replication, and data are in sync

+     ############################################################################

+     check_replicas(topology_m2)

+ 

+ 

+ def test_stress(topology_m2):

+     """Test dynamic plugins got

+ 

+     Stress - Put the server under load that will trigger multiple plugins(MO, RI, DNA, etc)

+     Restart various plugins while these operations are going on.  Perform this test

+     5 times(stress_max_run).

+     """

+ 

+     m1 = topology_m2.ms["master1"]

+     msg = ' (no replication)'

+     replication_run = False

+     stress_max_runs = 5

+ 

+     # First part of the test should be without replication

+     topology_m2.pause_all_replicas()

+ 

+     # First enable dynamic plugins

+     m1.config.replace('nsslapd-dynamic-plugins', 'on')

+ 

+     # Test that critical plugins can be updated even though the change might not be applied

+     ldbm_config = LDBMConfig(m1)

+     ldbm_config.replace('description', 'test')

+ 

+     while True:

+         # First run the tests with replication disabled, then rerun them with replication set up

+ 

+         log.info('Do one run through all tests ' + msg + '...')

+         acceptance_test.check_all_plugins(topology_m2)

  

          log.info('####################################################################')

          log.info('Stressing Dynamic Plugins' + msg + '...')

          log.info('####################################################################\n')

  

-         stress_tests.configureMO(topology_st.standalone)

-         stress_tests.configureRI(topology_st.standalone)

+         stress_tests.configureMO(m1)

+         stress_tests.configureRI(m1)

  

          stress_count = 0

          while stress_count < stress_max_runs:

@@ -159,100 +246,94 @@ 

              log.info('Running stress test' + msg + '.  Run (%d/%d)...' % (stress_count + 1, stress_max_runs))

              log.info('####################################################################\n')

  

-             try:

-                 # Launch three new threads to add a bunch of users

-                 add_users = stress_tests.AddUsers(topology_st.standalone, 'employee', True)

-                 add_users.start()

-                 add_users2 = stress_tests.AddUsers(topology_st.standalone, 'entry', True)

-                 add_users2.start()

-                 add_users3 = stress_tests.AddUsers(topology_st.standalone, 'person', True)

-                 add_users3.start()

-                 time.sleep(1)

- 

-                 # While we are adding users restart the MO plugin and an idle plugin

-                 topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

-                 time.sleep(2)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

- 

-                 # Wait for the 'adding' threads to complete

-                 add_users.join()

-                 add_users2.join()

-                 add_users3.join()

- 

-                 # Now launch three threads to delete the users

-                 del_users = stress_tests.DelUsers(topology_st.standalone, 'employee')

-                 del_users.start()

-                 del_users2 = stress_tests.DelUsers(topology_st.standalone, 'entry')

-                 del_users2.start()

-                 del_users3 = stress_tests.DelUsers(topology_st.standalone, 'person')

-                 del_users3.start()

-                 time.sleep(1)

- 

-                 # Restart both the MO, RI plugins during these deletes, and an idle plugin

-                 topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)

-                 time.sleep(2)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_REFER_INTEGRITY)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_MEMBER_OF)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_MEMBER_OF)

-                 time.sleep(1)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY)

-                 topology_st.standalone.plugins.disable(name=PLUGIN_LINKED_ATTRS)

-                 topology_st.standalone.plugins.enable(name=PLUGIN_LINKED_ATTRS)

- 

-                 # Wait for the 'deleting' threads to complete

-                 del_users.join()

-                 del_users2.join()

-                 del_users3.join()

- 

-                 # Now make sure both the MO and RI plugins still work correctly

-                 plugin_tests.func_tests[8](topology_st.standalone)  # RI plugin

-                 plugin_tests.func_tests[5](topology_st.standalone)  # MO plugin

- 

-                 # Cleanup the stress tests

-                 stress_tests.cleanup(topology_st.standalone)

- 

-             except:

-                 log.info('Stress test failed!')

-                 if replication_run:

-                     repl_fail(replica_inst)

+             # Launch three new threads to add a bunch of users

+             add_users = stress_tests.AddUsers(m1, 'employee', True)

+             add_users.start()

+             add_users2 = stress_tests.AddUsers(m1, 'entry', True)

+             add_users2.start()

+             add_users3 = stress_tests.AddUsers(m1, 'person', True)

+             add_users3.start()

+             time.sleep(1)

+ 

+             # While we are adding users restart the MO plugin and an idle plugin

+             m1.plugins.disable(name=PLUGIN_MEMBER_OF)

+             m1.plugins.enable(name=PLUGIN_MEMBER_OF)

+             time.sleep(1)

+             m1.plugins.disable(name=PLUGIN_MEMBER_OF)

+             time.sleep(1)

+             m1.plugins.enable(name=PLUGIN_MEMBER_OF)

+             m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)

+             m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)

+             time.sleep(1)

+             m1.plugins.disable(name=PLUGIN_MEMBER_OF)

+             m1.plugins.enable(name=PLUGIN_MEMBER_OF)

+             time.sleep(2)

+             m1.plugins.disable(name=PLUGIN_MEMBER_OF)

+             time.sleep(1)

+             m1.plugins.enable(name=PLUGIN_MEMBER_OF)

+             m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)

+             m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)

+             m1.plugins.disable(name=PLUGIN_MEMBER_OF)

+             time.sleep(1)

+             m1.plugins.enable(name=PLUGIN_MEMBER_OF)

+             m1.plugins.disable(name=PLUGIN_MEMBER_OF)

+             m1.plugins.enable(name=PLUGIN_MEMBER_OF)

+ 

+             # Wait for the 'adding' threads to complete

+             add_users.join()

+             add_users2.join()

+             add_users3.join()

+ 

+             # Now launch three threads to delete the users

+             del_users = stress_tests.DelUsers(m1, 'employee')

+             del_users.start()

+             del_users2 = stress_tests.DelUsers(m1, 'entry')

+             del_users2.start()

+             del_users3 = stress_tests.DelUsers(m1, 'person')

+             del_users3.start()

+             time.sleep(1)

+ 

+             # Restart both the MO, RI plugins during these deletes, and an idle plugin

+             m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)

+             m1.plugins.disable(name=PLUGIN_MEMBER_OF)

+             m1.plugins.enable(name=PLUGIN_MEMBER_OF)

+             m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)

+             time.sleep(1)

+             m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)

+             time.sleep(1)

+             m1.plugins.disable(name=PLUGIN_MEMBER_OF)

+             time.sleep(1)

+             m1.plugins.enable(name=PLUGIN_MEMBER_OF)

+             time.sleep(1)

+             m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)

+             m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)

+             m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)

+             m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)

+             m1.plugins.disable(name=PLUGIN_MEMBER_OF)

+             m1.plugins.enable(name=PLUGIN_MEMBER_OF)

+             m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)

+             time.sleep(2)

+             m1.plugins.disable(name=PLUGIN_REFER_INTEGRITY)

+             time.sleep(1)

+             m1.plugins.disable(name=PLUGIN_MEMBER_OF)

+             time.sleep(1)

+             m1.plugins.enable(name=PLUGIN_MEMBER_OF)

+             time.sleep(1)

+             m1.plugins.enable(name=PLUGIN_REFER_INTEGRITY)

+             m1.plugins.disable(name=PLUGIN_LINKED_ATTRS)

+             m1.plugins.enable(name=PLUGIN_LINKED_ATTRS)

+ 

+             # Wait for the 'deleting' threads to complete

+             del_users.join()

+             del_users2.join()

+             del_users3.join()

+ 

+             # Now make sure both the MO and RI plugins still work correctly

+             acceptance_test.func_tests[8](topology_m2, "dynamic")  # RI plugin

+             acceptance_test.func_tests[5](topology_m2, "dynamic")  # MO plugin

+ 

+             # Cleanup the stress tests

+             stress_tests.cleanup(m1)

  

              stress_count += 1

              log.info('####################################################################')

@@ -264,166 +345,17 @@ 

              # We're done.

              break

          else:

-             #

-             # Enable replication and run everything one more time

-             #

-             log.info('Setting up replication, and rerunning the tests...\n')

- 

-             # Create replica instance

-             replica_inst = DirSrv(verbose=False)

-             args_instance[SER_HOST] = LOCALHOST

-             args_instance[SER_PORT] = REPLICA_PORT

-             args_instance[SER_SERVERID_PROP] = 'replica'

-             args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX

- 

-             args_replica_inst = args_instance.copy()

-             replica_inst.allocate(args_replica_inst)

-             replica_inst.create()

-             replica_inst.open()

- 

-             try:

-                 topology_st.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX,

-                                                                  role=ReplicaRole.MASTER,

-                                                                  replicaId=1)

-                 replica_inst.replica.enableReplication(suffix=DEFAULT_SUFFIX,

-                                                        role=ReplicaRole.CONSUMER,

-                                                        replicaId=65535)

-                 properties = {RA_NAME: r'to_replica',

-                               RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],

-                               RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],

-                               RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],

-                               RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}

-                 repl_agreement = topology_st.standalone.agreement.create(suffix=DEFAULT_SUFFIX,

-                                                                          host=LOCALHOST,

-                                                                          port=REPLICA_PORT,

-                                                                          properties=properties)

- 

-                 if not repl_agreement:

-                     log.fatal("Fail to create a replica agreement")

-                     repl_fail(replica_inst)

-                 topology_st.standalone.agreement.init(DEFAULT_SUFFIX, LOCALHOST, REPLICA_PORT)

-                 topology_st.standalone.waitForReplInit(repl_agreement)

-             except:

-                 log.info('Failed to setup replication!')

-                 repl_fail(replica_inst)

+             log.info('Resume replication and run everything one more time')

+             topology_m2.resume_all_replicas()

  

              replication_run = True

              msg = ' (replication enabled)'

              time.sleep(1)

  

      ############################################################################

-     # Check replication, and data are in sync, and remove the instance

-     ############################################################################

- 

-     log.info('Checking if replication is in sync...')

- 

-     try:

-         # Grab master's max CSN

-         entry = topology_st.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)

-         if not entry:

-             log.error('Failed to find db tombstone entry from master')

-             repl_fail(replica_inst)

-         elements = entry[0].getValues('nsds50ruv')

-         for ruv in elements:

-             if 'replica 1' in ruv:

-                 parts = ruv.split()

-                 if len(parts) == 5:

-                     master_maxcsn = parts[4]

-                     break

-                 else:

-                     log.error('RUV is incomplete')

-                     repl_fail(replica_inst)

-         if master_maxcsn == 0:

-             log.error('Failed to find maxcsn on master')

-             repl_fail(replica_inst)

- 

-     except ldap.LDAPError as e:

-         log.fatal('Unable to search masterfor db tombstone: ' + e.message['desc'])

-         repl_fail(replica_inst)

- 

-     # Loop on the consumer - waiting for it to catch up

-     count = 0

-     insync = False

-     while count < 60:

-         try:

-             # Grab master's max CSN

-             entry = replica_inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, RUV_FILTER)

-             if not entry:

-                 log.error('Failed to find db tombstone entry on consumer')

-                 repl_fail(replica_inst)

-             elements = entry[0].getValues('nsds50ruv')

-             for ruv in elements:

-                 if 'replica 1' in ruv:

-                     parts = ruv.split()

-                     if len(parts) == 5:

-                         replica_maxcsn = parts[4]

-                         break

-             if replica_maxcsn == 0:

-                 log.error('Failed to find maxcsn on consumer')

-                 repl_fail(replica_inst)

-         except ldap.LDAPError as e:

-             log.fatal('Unable to search for db tombstone on consumer: ' + e.message['desc'])

-             repl_fail(replica_inst)

- 

-         if master_maxcsn == replica_maxcsn:

-             insync = True

-             log.info('Replication is in sync.\n')

-             break

-         count += 1

-         time.sleep(1)

- 

-     # Report on replication status

-     if not insync:

-         log.error('Consumer not in sync with master!')

-         repl_fail(replica_inst)

- 

-     #

-     # Verify the databases are identical. There should not be any "user, entry, employee" entries

-     #

-     log.info('Checking if the data is the same between the replicas...')

- 

-     # Check the master

-     try:

-         entries = topology_st.standalone.search_s(DEFAULT_SUFFIX,

-                                                   ldap.SCOPE_SUBTREE,

-                                                   "(|(uid=person*)(uid=entry*)(uid=employee*))")

-         if len(entries) > 0:

-             log.error('Master database has incorrect data set!\n')

-             repl_fail(replica_inst)

-     except ldap.LDAPError as e:

-         log.fatal('Unable to search db on master: ' + e.message['desc'])

-         repl_fail(replica_inst)

- 

-     # Check the consumer

-     try:

-         entries = replica_inst.search_s(DEFAULT_SUFFIX,

-                                         ldap.SCOPE_SUBTREE,

-                                         "(|(uid=person*)(uid=entry*)(uid=employee*))")

-         if len(entries) > 0:

-             log.error('Consumer database in not consistent with master database')

-             repl_fail(replica_inst)

-     except ldap.LDAPError as e:

-         log.fatal('Unable to search db on consumer: ' + e.message['desc'])

-         repl_fail(replica_inst)

- 

-     log.info('Data is consistent across the replicas.\n')

- 

-     log.info('####################################################################')

-     log.info('Replication consistency test passed')

-     log.info('####################################################################\n')

- 

-     # Remove the replica instance

-     replica_inst.delete()

- 

-     ############################################################################

-     # We made it to the end!

+     # Check replication, and data are in sync

      ############################################################################

- 

-     log.info('#####################################################')

-     log.info('#####################################################')

-     log.info("Dynamic Plugins Testsuite: Completed Successfully!")

-     log.info('#####################################################')

-     log.info('#####################################################\n')

+     check_replicas(topology_m2)

  

  

  if __name__ == '__main__':

@@ -1,2485 +0,0 @@ 

- # --- BEGIN COPYRIGHT BLOCK ---

- # Copyright (C) 2016 Red Hat, Inc.

- # All rights reserved.

- #

- # License: GPL (version 3 or any later version).

- # See LICENSE for details.

- # --- END COPYRIGHT BLOCK ---

- #

- '''

- Created on Dec 09, 2014

- 

- @author: mreynolds

- '''

- import logging

- from lib389 import DirSrv

- from lib389.tasks import *

- from lib389.properties import *

- from lib389._constants import (DEFAULT_SUFFIX, PLUGIN_ACCT_USABILITY, PLUGIN_ACCT_POLICY,

-                               PLUGIN_ATTR_UNIQUENESS, PLUGIN_AUTOMEMBER, PLUGIN_DNA,

-                               PLUGIN_LINKED_ATTRS, PLUGIN_MEMBER_OF, DN_CONFIG,

-                               PLUGIN_MANAGED_ENTRY, PLUGIN_PASSTHRU, PLUGIN_REFER_INTEGRITY,

-                               PLUGIN_RETRO_CHANGELOG, PLUGIN_ROOTDN_ACCESS, DN_DM, PASSWORD,

-                               LOCALHOST, RETROCL_SUFFIX, args_instance)

- 

- log = logging.getLogger(__name__)

- 

- USER1_DN = 'uid=user1,' + DEFAULT_SUFFIX

- USER2_DN = 'uid=user2,' + DEFAULT_SUFFIX

- USER3_DN = 'uid=user3,' + DEFAULT_SUFFIX

- BUSER1_DN = 'uid=user1,ou=branch1,' + DEFAULT_SUFFIX

- BUSER2_DN = 'uid=user2,ou=branch2,' + DEFAULT_SUFFIX

- BUSER3_DN = 'uid=user3,ou=branch2,' + DEFAULT_SUFFIX

- BRANCH1_DN = 'ou=branch1,' + DEFAULT_SUFFIX

- BRANCH2_DN = 'ou=branch2,' + DEFAULT_SUFFIX

- GROUP_OU = 'ou=groups,' + DEFAULT_SUFFIX

- PEOPLE_OU = 'ou=people,' + DEFAULT_SUFFIX

- GROUP_DN = 'cn=group,' + DEFAULT_SUFFIX

- CONFIG_AREA = 'nsslapd-pluginConfigArea'

- 

- '''

-    Functional tests for each plugin

- 

-    Test:

-          plugin restarts (test when on and off)

-          plugin config validation

-          plugin dependencies

-          plugin functionality (including plugin tasks)

- '''

- 

- 

- ################################################################################

- #

- # Test Plugin Dependency

- #

- ################################################################################

- def test_dependency(inst, plugin):

-     """

-     Set the "account usabilty" plugin to depend on this plugin.  This plugin

-     is generic, always enabled, and perfect for our testing

-     """

- 

-     try:

-         inst.modify_s('cn=' + PLUGIN_ACCT_USABILITY + ',cn=plugins,cn=config',

-                       [(ldap.MOD_REPLACE, 'nsslapd-plugin-depends-on-named', plugin)])

- 

-     except ldap.LDAPError as e:

-         log.fatal('test_dependency: Failed to modify ' + PLUGIN_ACCT_USABILITY + ': error ' + e.message['desc'])

-         assert False

- 

-     try:

-         inst.modify_s('cn=' + plugin + ',cn=plugins,cn=config',

-                       [(ldap.MOD_REPLACE, 'nsslapd-pluginenabled', 'off')])

- 

-     except ldap.UNWILLING_TO_PERFORM:

-         # failed as expected

-         pass

-     else:

-         # Incorrectly succeeded

-         log.fatal('test_dependency: Plugin dependency check failed (%s)' % plugin)

-         assert False

- 

-     # Now undo the change

-     try:

-         inst.modify_s('cn=' + PLUGIN_ACCT_USABILITY + ',cn=plugins,cn=config',

-                       [(ldap.MOD_DELETE, 'nsslapd-plugin-depends-on-named', None)])

-     except ldap.LDAPError as e:

-         log.fatal('test_dependency: Failed to reset ' + plugin + ': error ' + e.message['desc'])

-         assert False

- 

- 

- ################################################################################

- #

- # Wait for task to complete

- #

- ################################################################################

- def wait_for_task(conn, task_dn):

-     finished = False

-     exitcode = 0

-     count = 0

-     while count < 60:

-         try:

-             task_entry = conn.search_s(task_dn, ldap.SCOPE_BASE, 'objectclass=*')

-             if not task_entry:

-                 log.fatal('wait_for_task: Search failed to find task: ' + task_dn)

-                 assert False

-             if task_entry[0].hasAttr('nstaskexitcode'):

-                 # task is done

-                 exitcode = task_entry[0].nsTaskExitCode

-                 finished = True

-                 break

-         except ldap.LDAPError as e:

-             log.fatal('wait_for_task: Search failed: ' + e.message['desc'])

-             assert False

- 

-         time.sleep(1)

-         count += 1

-     if not finished:

-         log.fatal('wait_for_task: Task (%s) did not complete!' % task_dn)

-         assert False

- 

-     return exitcode

- 

- 

- ################################################################################

- #

- # Test Account Policy Plugin (0)

- #

- ################################################################################

- def test_acctpolicy(inst, args=None):

-     # stop the plugin, and start it

-     inst.plugins.disable(name=PLUGIN_ACCT_POLICY)

-     inst.plugins.enable(name=PLUGIN_ACCT_POLICY)

- 

-     if args == "restart":

-         return True

- 

-     CONFIG_DN = 'cn=config,cn=Account Policy Plugin,cn=plugins,cn=config'

- 

-     log.info('Testing ' + PLUGIN_ACCT_POLICY + '...')

- 

-     ############################################################################

-     # Configure plugin

-     ############################################################################

- 

-     # Add the config entry

-     try:

-         inst.add_s(Entry((CONFIG_DN, {

-             'objectclass': 'top extensibleObject'.split(),

-             'cn': 'config',

-             'alwaysrecordlogin': 'yes',

-             'stateattrname': 'lastLoginTime'

-         })))

-     except ldap.ALREADY_EXISTS:

-         try:

-             inst.modify_s(CONFIG_DN,

-                           [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),

-                            (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime')])

-         except ldap.LDAPError as e:

-             log.fatal('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc'])

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_acctpolicy: Failed to add config entry: error ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test plugin

-     ############################################################################

- 

-     # Add an entry

-     time.sleep(1)

-     try:

-         inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),

-                                      'sn': '1',

-                                      'cn': 'user 1',

-                                      'uid': 'user1',

-                                      'userpassword': 'password'})))

-     except ldap.LDAPError as e:

-         log.fatal('test_acctpolicy: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])

-         assert False

- 

-     # bind as user

-     try:

-         inst.simple_bind_s(USER1_DN, "password")

-     except ldap.LDAPError as e:

-         log.fatal('test_acctpolicy: Failed to bind as user1: ' + e.message['desc'])

-         assert False

- 

-     # Bind as Root DN

-     time.sleep(1)

-     try:

-         inst.simple_bind_s(DN_DM, PASSWORD)

-     except ldap.LDAPError as e:

-         log.fatal('test_acctpolicy: Failed to bind as rootDN: ' + e.message['desc'])

-         assert False

- 

-     # Check lastLoginTime of USER1

-     try:

-         entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'lastLoginTime=*')

-         if not entries:

-             log.fatal('test_acctpolicy: Search failed to find an entry with lastLoginTime.')

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_acctpolicy: Search failed: ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Change config - change the stateAttrName to a new attribute

-     ############################################################################

- 

-     try:

-         inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'stateattrname', 'testLastLoginTime')])

- 

-     except ldap.LDAPError as e:

-         log.fatal('test_acctpolicy: Failed to modify config entry: error ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test plugin

-     ############################################################################

- 

-     time.sleep(1)

-     # login as user

-     try:

-         inst.simple_bind_s(USER1_DN, "password")

-     except ldap.LDAPError as e:

-         log.fatal('test_acctpolicy: Failed to bind(2nd) as user1: ' + e.message['desc'])

-         assert False

- 

-     time.sleep(1)

-     # Bind as Root DN

-     try:

-         inst.simple_bind_s(DN_DM, PASSWORD)

-     except ldap.LDAPError as e:

-         log.fatal('test_acctpolicy: Failed to bind as rootDN: ' + e.message['desc'])

-         assert False

- 

-     # Check testLastLoginTime was added to USER1

-     try:

-         entries = inst.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(testLastLoginTime=*)')

-         if not entries:

-             log.fatal('test_acctpolicy: Search failed to find an entry with testLastLoginTime.')

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_acctpolicy: Search failed: ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test plugin dependency

-     ############################################################################

- 

-     test_dependency(inst, PLUGIN_ACCT_POLICY)

- 

-     ############################################################################

-     # Cleanup

-     ############################################################################

- 

-     try:

-         inst.delete_s(USER1_DN)

-     except ldap.LDAPError as e:

-         log.fatal('test_acctpolicy: Failed to delete test entry: ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test passed

-     ############################################################################

- 

-     log.info('test_acctpolicy: PASS\n')

- 

-     return

- 

- 

- ################################################################################

- #

- # Test Attribute Uniqueness Plugin (1)

- #

- ################################################################################

- def test_attruniq(inst, args=None):

-     # stop the plugin, and start it

-     inst.plugins.disable(name=PLUGIN_ATTR_UNIQUENESS)

-     inst.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)

- 

-     if args == "restart":

-         return

- 

-     log.info('Testing ' + PLUGIN_ATTR_UNIQUENESS + '...')

- 

-     ############################################################################

-     # Configure plugin

-     ############################################################################

- 

-     try:

-         inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',

-                       [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'uid')])

- 

-     except ldap.LDAPError as e:

-         log.fatal('test_attruniq: Failed to configure plugin for "uid": error ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test plugin

-     ############################################################################

- 

-     # Add an entry

-     try:

-         inst.add_s(Entry((USER1_DN, {'objectclass': "top extensibleObject".split(),

-                                      'sn': '1',

-                                      'cn': 'user 1',

-                                      'uid': 'user1',

-                                      'mail': 'user1@example.com',

-                                      'mailAlternateAddress': 'user1@alt.example.com',

-                                      'userpassword': 'password'})))

-     except ldap.LDAPError as e:

-         log.fatal('test_attruniq: Failed to add test user' + USER1_DN + ': error ' + e.message['desc'])

-         assert False

- 

-     # Add an entry with a duplicate "uid"

-     try:

-         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),

-                                      'sn': '2',

-                                      'cn': 'user 2',

-                                      'uid': 'user2',

-                                      'uid': 'user1',

-                                      'userpassword': 'password'})))

- 

-     except ldap.CONSTRAINT_VIOLATION:

-         pass

-     else:

-         log.fatal('test_attruniq: Adding of 2nd entry(uid) incorrectly succeeded')

-         assert False

- 

-     ############################################################################

-     # Change config to use "mail" instead of "uid"

-     ############################################################################

- 

-     try:

-         inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',

-                       [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail')])

- 

-     except ldap.LDAPError as e:

-         log.fatal('test_attruniq: Failed to configure plugin for "mail": error ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test plugin - Add an entry, that has a duplicate "mail" value

-     ############################################################################

- 

-     try:

-         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),

-                                      'sn': '2',

-                                      'cn': 'user 2',

-                                      'uid': 'user2',

-                                      'mail': 'user1@example.com',

-                                      'userpassword': 'password'})))

-     except ldap.CONSTRAINT_VIOLATION:

-         pass

-     else:

-         log.fatal('test_attruniq: Adding of 2nd entry(mail) incorrectly succeeded')

-         assert False

- 

-     ############################################################################

-     # Reconfigure plugin for mail and mailAlternateAddress

-     ############################################################################

- 

-     try:

-         inst.modify_s('cn=' + PLUGIN_ATTR_UNIQUENESS + ',cn=plugins,cn=config',

-                       [(ldap.MOD_REPLACE, 'uniqueness-attribute-name', 'mail'),

-                        (ldap.MOD_ADD, 'uniqueness-attribute-name',

-                         'mailAlternateAddress')])

- 

-     except ldap.LDAPError as e:

-         log.error(

-             'test_attruniq: Failed to reconfigure plugin for "mail mailAlternateAddress": error ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test plugin - Add an entry, that has a duplicate "mail" value

-     ############################################################################

- 

-     try:

-         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),

-                                      'sn': '2',

-                                      'cn': 'user 2',

-                                      'uid': 'user2',

-                                      'mail': 'user1@example.com',

-                                      'userpassword': 'password'})))

-     except ldap.CONSTRAINT_VIOLATION:

-         pass

-     else:

-         log.error('test_attruniq: Adding of 3rd entry(mail) incorrectly succeeded')

-         assert False

- 

-     ############################################################################

-     # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" value

-     ############################################################################

- 

-     try:

-         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),

-                                      'sn': '2',

-                                      'cn': 'user 2',

-                                      'uid': 'user2',

-                                      'mailAlternateAddress': 'user1@alt.example.com',

-                                      'userpassword': 'password'})))

-     except ldap.CONSTRAINT_VIOLATION:

-         pass

-     else:

-         log.error('test_attruniq: Adding of 4th entry(mailAlternateAddress) incorrectly succeeded')

-         assert False

- 

-     ############################################################################

-     # Test plugin - Add an entry, that has a duplicate "mail" value conflicting mailAlternateAddress

-     ############################################################################

- 

-     try:

-         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),

-                                      'sn': '2',

-                                      'cn': 'user 2',

-                                      'uid': 'user2',

-                                      'mail': 'user1@alt.example.com',

-                                      'userpassword': 'password'})))

-     except ldap.CONSTRAINT_VIOLATION:

-         pass

-     else:

-         log.error('test_attruniq: Adding of 5th entry(mailAlternateAddress) incorrectly succeeded')

-         assert False

- 

-     ############################################################################

-     # Test plugin - Add an entry, that has a duplicate "mailAlternateAddress" conflicting mail

-     ############################################################################

- 

-     try:

-         inst.add_s(Entry((USER2_DN, {'objectclass': "top extensibleObject".split(),

-                                      'sn': '2',

-                                      'cn': 'user 2',

-                                      'uid': 'user2',

-                                      'mailAlternateAddress': 'user1@example.com',

-                                      'userpassword': 'password'})))

-     except ldap.CONSTRAINT_VIOLATION:

-         pass

-     else:

-         log.error('test_attruniq: Adding of 6th entry(mail) incorrectly succeeded')

-         assert False

- 

-     ############################################################################

-     # Test plugin dependency

-     ############################################################################

- 

-     test_dependency(inst, PLUGIN_ATTR_UNIQUENESS)

- 

-     ############################################################################

-     # Cleanup

-     ############################################################################

- 

-     try:

-         inst.delete_s(USER1_DN)

-     except ldap.LDAPError as e:

-         log.fatal('test_attruniq: Failed to delete test entry: ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test passed

-     ############################################################################

- 

-     log.info('test_attruniq: PASS\n')

-     return

- 

- 

- ################################################################################

- #

- # Test Auto Membership Plugin (2)

- #

- ################################################################################

- def test_automember(inst, args=None):

-     # stop the plugin, and start it

-     inst.plugins.disable(name=PLUGIN_AUTOMEMBER)

-     inst.plugins.enable(name=PLUGIN_AUTOMEMBER)

- 

-     if args == "restart":

-         return

- 

-     CONFIG_DN = 'cn=config,cn=' + PLUGIN_AUTOMEMBER + ',cn=plugins,cn=config'

- 

-     log.info('Testing ' + PLUGIN_AUTOMEMBER + '...')

- 

-     ############################################################################

-     # Configure plugin

-     ############################################################################

- 

-     # Add the automember group

-     try:

-         inst.add_s(Entry((GROUP_DN, {

-             'objectclass': 'top extensibleObject'.split(),

-             'cn': 'group'

-         })))

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to add group: error ' + e.message['desc'])

-         assert False

- 

-     # Add ou=branch1

-     try:

-         inst.add_s(Entry((BRANCH1_DN, {

-             'objectclass': 'top extensibleObject'.split(),

-             'ou': 'branch1'

-         })))

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to add branch1: error ' + e.message['desc'])

-         assert False

- 

-     # Add ou=branch2

-     try:

-         inst.add_s(Entry((BRANCH2_DN, {

-             'objectclass': 'top extensibleObject'.split(),

-             'ou': 'branch2'

-         })))

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to add branch2: error ' + e.message['desc'])

-         assert False

- 

-     # Add the automember config entry

-     try:

-         inst.add_s(Entry((CONFIG_DN, {

-             'objectclass': 'top autoMemberDefinition'.split(),

-             'cn': 'config',

-             'autoMemberScope': 'ou=branch1,' + DEFAULT_SUFFIX,

-             'autoMemberFilter': 'objectclass=top',

-             'autoMemberDefaultGroup': 'cn=group,' + DEFAULT_SUFFIX,

-             'autoMemberGroupingAttr': 'member:dn'

-         })))

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to add config entry: error ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test the plugin

-     ############################################################################

- 

-     # Add a user that should get added to the group

-     try:

-         inst.add_s(Entry((BUSER1_DN, {

-             'objectclass': 'top extensibleObject'.split(),

-             'uid': 'user1'

-         })))

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to add user: error ' + e.message['desc'])

-         assert False

- 

-     # Check the group

-     try:

-         entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE,

-                                 '(member=' + BUSER1_DN + ')')

-         if not entries:

-             log.fatal('test_automember: Search failed to find member user1')

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Search failed: ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Change config

-     ############################################################################

- 

-     try:

-         inst.modify_s(CONFIG_DN,

-                       [(ldap.MOD_REPLACE, 'autoMemberGroupingAttr', 'uniquemember:dn'),

-                        (ldap.MOD_REPLACE, 'autoMemberScope', 'ou=branch2,' + DEFAULT_SUFFIX)])

- 

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to modify config entry: error ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test plugin

-     ############################################################################

- 

-     # Add a user that should get added to the group

-     try:

-         inst.add_s(Entry((BUSER2_DN, {

-             'objectclass': 'top extensibleObject'.split(),

-             'uid': 'user2'

-         })))

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to user to branch2: error ' + e.message['desc'])

-         assert False

- 

-     # Check the group

-     try:

-         entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE,

-                                 '(uniquemember=' + BUSER2_DN + ')')

-         if not entries:

-             log.fatal('test_automember: Search failed to find uniquemember user2')

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Search failed: ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test Task

-     ############################################################################

- 

-     # Disable plugin

-     inst.plugins.disable(name=PLUGIN_AUTOMEMBER)

- 

-     # Add an entry that should be picked up by automember - verify it is not(yet)

-     try:

-         inst.add_s(Entry((BUSER3_DN, {

-             'objectclass': 'top extensibleObject'.split(),

-             'uid': 'user3'

-         })))

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to user3 to branch2: error ' + e.message['desc'])

-         assert False

- 

-     # Check the group - uniquemember should not exist

-     try:

-         entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE,

-                                 '(uniquemember=' + BUSER3_DN + ')')

-         if entries:

-             log.fatal('test_automember: user3 was incorrectly added to the group')

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Search failed: ' + e.message['desc'])

-         assert False

- 

-     # Enable plugin

-     inst.plugins.enable(name=PLUGIN_AUTOMEMBER)

- 

-     TASK_DN = 'cn=task-' + str(int(time.time())) + ',cn=automember rebuild membership,cn=tasks,cn=config'

-     # Add the task

-     try:

-         inst.add_s(Entry((TASK_DN, {

-             'objectclass': 'top extensibleObject'.split(),

-             'basedn': 'ou=branch2,' + DEFAULT_SUFFIX,

-             'filter': 'objectclass=top'})))

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to add task: error ' + e.message['desc'])

-         assert False

- 

-     wait_for_task(inst, TASK_DN)

- 

-     # Verify the fixup task worked

-     try:

-         entries = inst.search_s(GROUP_DN, ldap.SCOPE_BASE,

-                                 '(uniquemember=' + BUSER3_DN + ')')

-         if not entries:

-             log.fatal('test_automember: user3 was not added to the group')

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Search failed: ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test plugin dependency

-     ############################################################################

- 

-     test_dependency(inst, PLUGIN_AUTOMEMBER)

- 

-     ############################################################################

-     # Cleanup

-     ############################################################################

- 

-     try:

-         inst.delete_s(BUSER1_DN)

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to delete test entry1: ' + e.message['desc'])

-         assert False

- 

-     try:

-         inst.delete_s(BUSER2_DN)

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to delete test entry2: ' + e.message['desc'])

-         assert False

- 

-     try:

-         inst.delete_s(BUSER3_DN)

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to delete test entry3: ' + e.message['desc'])

-         assert False

- 

-     try:

-         inst.delete_s(BRANCH1_DN)

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to delete branch1: ' + e.message['desc'])

-         assert False

- 

-     try:

-         inst.delete_s(BRANCH2_DN)

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to delete test branch2: ' + e.message['desc'])

-         assert False

- 

-     try:

-         inst.delete_s(GROUP_DN)

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to delete test group: ' + e.message['desc'])

-         assert False

- 

-     try:

-         inst.delete_s(CONFIG_DN)

-     except ldap.LDAPError as e:

-         log.fatal('test_automember: Failed to delete plugin config entry: ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test passed

-     ############################################################################

- 

-     log.info('test_automember: PASS\n')

-     return

- 

- 

- ################################################################################

- #

- # Test DNA Plugin (3)

- #

- ################################################################################

- def test_dna(inst, args=None):

-     # stop the plugin, and start it

-     inst.plugins.disable(name=PLUGIN_DNA)

-     inst.plugins.enable(name=PLUGIN_DNA)

- 

-     if args == "restart":

-         return

- 

-     CONFIG_DN = 'cn=config,cn=' + PLUGIN_DNA + ',cn=plugins,cn=config'

- 

-     log.info('Testing ' + PLUGIN_DNA + '...')

- 

-     ############################################################################

-     # Configure plugin

-     ############################################################################

- 

-     try:

-         inst.add_s(Entry((CONFIG_DN, {

-             'objectclass': 'top dnaPluginConfig'.split(),

-             'cn': 'config',

-             'dnatype': 'uidNumber',

-             'dnafilter': '(objectclass=top)',

-             'dnascope': DEFAULT_SUFFIX,

-             'dnaMagicRegen': '-1',

-             'dnaMaxValue': '50000',

-             'dnaNextValue': '1'

-         })))

-     except ldap.ALREADY_EXISTS:

-         try:

-             inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaNextValue', '1'),

-                                       (ldap.MOD_REPLACE, 'dnaMagicRegen', '-1')])

-         except ldap.LDAPError as e:

-             log.fatal('test_dna: Failed to set the DNA plugin: error ' + e.message['desc'])

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_dna: Failed to add config entry: error ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test plugin

-     ############################################################################

- 

-     try:

-         inst.add_s(Entry((USER1_DN, {

-             'objectclass': 'top extensibleObject'.split(),

-             'uid': 'user1'

-         })))

-     except ldap.LDAPError as e:

-         log.fatal('test_dna: Failed to user1: error ' + e.message['desc'])

-         assert False

- 

-     # See if the entry now has the new uidNumber assignment - uidNumber=1

-     try:

-         entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=1)')

-         if not entries:

-             log.fatal('test_dna: user1 was not updated - (looking for uidNumber: 1)')

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_dna: Search for user1 failed: ' + e.message['desc'])

-         assert False

- 

-     # Test the magic regen value

-     try:

-         inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-1')])

-     except ldap.LDAPError as e:

-         log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc'])

-         assert False

- 

-     # See if the entry now has the new uidNumber assignment - uidNumber=2

-     try:

-         entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=2)')

-         if not entries:

-             log.fatal('test_dna: user1 was not updated (looking for uidNumber: 2)')

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_dna: Search for user1 failed: ' + e.message['desc'])

-         assert False

- 

-     ################################################################################

-     # Change the config

-     ################################################################################

- 

-     try:

-         inst.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'dnaMagicRegen', '-2')])

-     except ldap.LDAPError as e:

-         log.fatal('test_dna: Failed to set the magic reg value to -2: error ' + e.message['desc'])

-         assert False

- 

-     ################################################################################

-     # Test plugin

-     ################################################################################

- 

-     # Test the magic regen value

-     try:

-         inst.modify_s(USER1_DN, [(ldap.MOD_REPLACE, 'uidNumber', '-2')])

-     except ldap.LDAPError as e:

-         log.fatal('test_dna: Failed to set the magic reg value: error ' + e.message['desc'])

-         assert False

- 

-     # See if the entry now has the new uidNumber assignment - uidNumber=3

-     try:

-         entries = inst.search_s(USER1_DN, ldap.SCOPE_BASE, '(uidNumber=3)')

-         if not entries:

-             log.fatal('test_dna: user1 was not updated (looking for uidNumber: 3)')

-             assert False

-     except ldap.LDAPError as e:

-         log.fatal('test_dna: Search for user1 failed: ' + e.message['desc'])

-         assert False

- 

-     ############################################################################

-     # Test plugin dependency

-     ############################################################################

- 

-     test_dependency(inst, PLUGIN_AUTOMEMBER)

- 

-     ############################################################################

-     # Cleanup

-     ############################################################################

- 

-     try: