From d1f75af041b6bb8e3c0a4db3ed5c69fc5d6b46c8 Mon Sep 17 00:00:00 2001 From: Mark Reynolds Date: Jan 23 2015 21:50:21 +0000 Subject: Ticket 47999 - lib389 individual tests not running correctly when run as a whole Description: Each test should delete the instances it creates. That way each new test truly starts fresh. Otherwise if running all the tests, like in jenkins, or running py.test, there are no conflicts and unexpected instances in the mix. Made sure all the tests call the "final" function, and that the "final" function deletes any instances it creates. Also fixed the formatting of all the scripts to be compliant with python coding standards. Removed lines that accidentally ended with ";", and fixed deprecated processes. https://fedorahosted.org/389/ticket/47999 Reviewed by: nhosoi(Thanks!) --- diff --git a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py index 288505b..2460ecc 100644 --- a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py +++ b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py @@ -513,7 +513,7 @@ def test_dynamic_plugins(topology): def test_dynamic_plugins_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -529,6 +529,8 @@ def run_isolated(): topo = topology(True) test_dynamic_plugins(topo) + test_dynamic_plugins_final(topo) + if __name__ == '__main__': run_isolated() diff --git a/dirsrvtests/suites/schema/test_schema.py b/dirsrvtests/suites/schema/test_schema.py index cbf8579..4629cc6 100644 --- a/dirsrvtests/suites/schema/test_schema.py +++ b/dirsrvtests/suites/schema/test_schema.py @@ -26,18 +26,20 @@ log = logging.getLogger(__name__) installation_prefix = None + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() self.standalone = standalone + @pytest.fixture(scope="module") def topology(request): ''' This fixture is used to create a DirSrv instance for the 'module'. At the beginning, there may already be an instance. There may also be a backup for the instance. - + Principle: If instance exists: restart it @@ -55,30 +57,30 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - schemainst = DirSrv(verbose=False) - + schemainst = DirSrv(verbose=False) + # Args for the master instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE schemainst.allocate(args_instance) - + # Get the status of the backups - backup = schemainst.checkBackupFS() - + backup = schemainst.checkBackupFS() + # Get the status of the instance and restart it if it exists if schemainst.exists(): schemainst.stop(timeout=10) schemainst.start(timeout=10) - + if backup: - # The backup exists, assuming it is correct + # The backup exists, assuming it is correct # we just re-init the instance with it if not schemainst.exists(): schemainst.create() # Used to retrieve configuration information (dbdir, confdir...) schemainst.open() - + # restore from backup schemainst.stop(timeout=10) schemainst.restoreFS(backup) @@ -89,30 +91,31 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove the backup. So even if we have a specific backup file # (e.g backup) we clear all backups that an instance may have created if backup: schemainst.clearBackupFS() - + # Remove all the instances if schemainst.exists(): schemainst.delete() - + # Create the instances schemainst.create() - schemainst.open() - + schemainst.open() + # Time to create the backup schemainst.stop(timeout=10) schemainst.backupfile = schemainst.backupFS() - schemainst.start(timeout=10) - # + schemainst.start(timeout=10) + # return TopologyStandalone(schemainst) attrclass = ldap.schema.models.AttributeType occlass = ldap.schema.models.ObjectClass + def ochasattr(subschema, oc, mustormay, attr, key): """See if the oc and any of its parents and ancestors have the given attr""" @@ -134,6 +137,7 @@ def ochasattr(subschema, oc, mustormay, attr, key): break return rc + def ochasattrs(subschema, oc, mustormay, attrs): key = mustormay + "dict" ret = [] @@ -142,6 +146,7 @@ def ochasattrs(subschema, oc, mustormay, attrs): ret.append(attr) return ret + def mycmp(v1, v2): v1ary, v2ary = [v1], [v2] if isinstance(v1, list) or isinstance(v1, tuple): @@ -156,6 +161,7 @@ def mycmp(v1, v2): return False return True + def ocgetdiffs(ldschema, oc1, oc2): fields = ['obsolete', 'names', 'desc', 'must', 'may', 'kind', 'sup'] ret = '' @@ -172,6 +178,7 @@ def ocgetdiffs(ldschema, oc1, oc2): ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, oc1.__dict__[field], oc2.__dict__[field]) return ret + def atgetparfield(subschema, at, field): v = None for nameoroid in at.sup: @@ -184,6 +191,7 @@ def atgetparfield(subschema, at, field): syntax_len_supported = False + def atgetdiffs(ldschema, at1, at2): fields = ['names', 'desc', 'obsolete', 'sup', 'equality', 'ordering', 'substr', 'syntax', 'single_value', 'collective', 'no_user_mod', 'usage'] @@ -197,6 +205,7 @@ def atgetdiffs(ldschema, at1, at2): ret = ret + '\t%s differs: [%s] vs. [%s]\n' % (field, at1.__dict__[field], at2.__dict__[field]) return ret + def test_schema_comparewithfiles(topology): '''Compare the schema from ldap cn=schema with the schema files''' retval = True @@ -235,24 +244,26 @@ def test_schema_comparewithfiles(topology): retval = False assert retval + def test_schema_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix - installation_prefix = os.environ.get('PREFIX') - + installation_prefix = os.environ.get('PREFIX') + topo = topology(True) test_schema_comparewithfiles(topo) - + test_schema_final(topo) if __name__ == '__main__': diff --git a/dirsrvtests/tickets/ticket47313_test.py b/dirsrvtests/tickets/ticket47313_test.py index a946f08..1907faa 100644 --- a/dirsrvtests/tickets/ticket47313_test.py +++ b/dirsrvtests/tickets/ticket47313_test.py @@ -18,7 +18,7 @@ log = logging.getLogger(__name__) installation_prefix = None ENTRY_NAME = 'test_entry' - + class TopologyStandalone(object): def __init__(self, standalone): @@ -32,7 +32,7 @@ def topology(request): This fixture is used to standalone topology for the 'module'. At the beginning, It may exists a standalone instance. It may also exists a backup for the standalone instance. - + Principle: If standalone instance exists: restart it @@ -51,60 +51,60 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + standalone = DirSrv(verbose=False) - + # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) - + # Get the status of the backups backup_standalone = standalone.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() + instance_standalone = standalone.exists() if instance_standalone: # assuming the instance is already stopped, just wait 5 sec max standalone.stop(timeout=5) standalone.start(timeout=10) - + if backup_standalone: - # The backup exist, assuming it is correct + # The backup exist, assuming it is correct # we just re-init the instance with it if not instance_standalone: standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # restore standalone instance from backup standalone.stop(timeout=10) standalone.restoreFS(backup_standalone) standalone.start(timeout=10) - + else: # We should be here only in two conditions # - This is the first time a test involve standalone instance # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove the backup. So even if we have a specific backup file # (e.g backup_standalone) we clear backup that an instance may have created if backup_standalone: standalone.clearBackupFS() - + # Remove the instance if instance_standalone: standalone.delete() - + # Create the instance standalone.create() - + # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # Time to create the backups standalone.stop(timeout=10) standalone.backupfile = standalone.backupFS() @@ -112,8 +112,8 @@ def topology(request): # clear the tmp directory standalone.clearTmpDir(__file__) - - # + + # # Here we have standalone instance up and running # Either coming from a backup recovery # or from a fresh (re)init @@ -125,19 +125,19 @@ def test_ticket47313_run(topology): """ It adds 2 test entries Search with filters including subtype and ! - It deletes the added entries + It deletes the added entries """ - + # bind as directory manager topology.standalone.log.info("Bind as %s" % DN_DM) topology.standalone.simple_bind_s(DN_DM, PASSWORD) - + # enable filter error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')] topology.standalone.modify_s(DN_CONFIG, mod) - + topology.standalone.log.info("\n\n######################### ADD ######################\n") - + # Prepare the entry with cn;fr & cn;en entry_name_fr = '%s fr' % (ENTRY_NAME) entry_name_en = '%s en' % (ENTRY_NAME) @@ -149,7 +149,7 @@ def test_ticket47313_run(topology): entry_both.setValues('cn', entry_name_both) entry_both.setValues('cn;fr', entry_name_fr) entry_both.setValues('cn;en', entry_name_en) - + # Prepare the entry with one member entry_name_en_only = '%s en only' % (ENTRY_NAME) entry_dn_en_only = 'cn=%s, %s' % (entry_name_en_only, SUFFIX) @@ -158,15 +158,15 @@ def test_ticket47313_run(topology): entry_en_only.setValues('sn', entry_name_en_only) entry_en_only.setValues('cn', entry_name_en_only) entry_en_only.setValues('cn;en', entry_name_en) - + topology.standalone.log.info("Try to add Add %s: %r" % (entry_dn_both, entry_both)) topology.standalone.add_s(entry_both) topology.standalone.log.info("Try to add Add %s: %r" % (entry_dn_en_only, entry_en_only)) topology.standalone.add_s(entry_en_only) - + topology.standalone.log.info("\n\n######################### SEARCH ######################\n") - + # filter: (&(cn=test_entry en only)(!(cn=test_entry fr))) myfilter = '(&(sn=%s)(!(cn=%s)))' % (entry_name_en_only, entry_name_fr) topology.standalone.log.info("Try to search with filter %s" % myfilter) @@ -174,7 +174,7 @@ def test_ticket47313_run(topology): assert len(ents) == 1 assert ents[0].sn == entry_name_en_only topology.standalone.log.info("Found %s" % ents[0].dn) - + # filter: (&(cn=test_entry en only)(!(cn;fr=test_entry fr))) myfilter = '(&(sn=%s)(!(cn;fr=%s)))' % (entry_name_en_only, entry_name_fr) topology.standalone.log.info("Try to search with filter %s" % myfilter) @@ -182,42 +182,41 @@ def test_ticket47313_run(topology): assert len(ents) == 1 assert ents[0].sn == entry_name_en_only topology.standalone.log.info("Found %s" % ents[0].dn) - + # filter: (&(cn=test_entry en only)(!(cn;en=test_entry en))) myfilter = '(&(sn=%s)(!(cn;en=%s)))' % (entry_name_en_only, entry_name_en) topology.standalone.log.info("Try to search with filter %s" % myfilter) ents = topology.standalone.search_s(SUFFIX, ldap.SCOPE_SUBTREE, myfilter) assert len(ents) == 0 topology.standalone.log.info("Found none") - + topology.standalone.log.info("\n\n######################### DELETE ######################\n") - + topology.standalone.log.info("Try to delete %s " % entry_dn_both) topology.standalone.delete_s(entry_dn_both) - + topology.standalone.log.info("Try to delete %s " % entry_dn_en_only) topology.standalone.delete_s(entry_dn_en_only) + def test_ticket47313_final(topology): - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '0')] - topology.standalone.modify_s(DN_CONFIG, mod) - - topology.standalone.stop(timeout=10) - + topology.standalone.delete() + + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix - installation_prefix = None - + installation_prefix = None + topo = topology(True) test_ticket47313_run(topo) - + test_ticket47313_final(topo) diff --git a/dirsrvtests/tickets/ticket47462_test.py b/dirsrvtests/tickets/ticket47462_test.py index a909b37..67b43d7 100644 --- a/dirsrvtests/tickets/ticket47462_test.py +++ b/dirsrvtests/tickets/ticket47462_test.py @@ -244,9 +244,14 @@ def test_ticket47462(topology): # try: topology.master1.modify_s(DES_PLUGIN, - [(ldap.MOD_REPLACE, 'nsslapd-pluginPath', 'libdes-plugin'), - (ldap.MOD_ADD, 'nsslapd-pluginarg2', 'description')]) + [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on')]) + except ldap.LDAPError, e: + log.fatal('Failed to enable DES plugin, error: ' + e.message['desc']) + assert False + try: + topology.master1.modify_s(DES_PLUGIN, + [(ldap.MOD_ADD, 'nsslapd-pluginarg2', 'description')]) except ldap.LDAPError, e: log.fatal('Failed to reset DES plugin, error: ' + e.message['desc']) assert False @@ -258,7 +263,7 @@ def test_ticket47462(topology): except ldap.NO_SUCH_ATTRIBUTE: pass except ldap.LDAPError, e: - log.fatal('Failed to reset DES plugin, error: ' + e.message['desc']) + log.fatal('Failed to reset MMR plugin, error: ' + e.message['desc']) assert False # @@ -428,8 +433,8 @@ def test_ticket47462(topology): def test_ticket47462_final(topology): - topology.master1.stop(timeout=10) - topology.master2.stop(timeout=10) + topology.master1.delete() + topology.master2.delete() def run_isolated(): @@ -447,6 +452,8 @@ def run_isolated(): topo = topology(True) test_ticket47462(topo) + test_ticket47462_final(topo) + if __name__ == '__main__': run_isolated() diff --git a/dirsrvtests/tickets/ticket47490_test.py b/dirsrvtests/tickets/ticket47490_test.py index a1946de..29d8d04 100644 --- a/dirsrvtests/tickets/ticket47490_test.py +++ b/dirsrvtests/tickets/ticket47490_test.py @@ -36,27 +36,29 @@ class TopologyMasterConsumer(object): def __init__(self, master, consumer): master.open() self.master = master - + consumer.open() self.consumer = consumer + def _header(topology, label): topology.master.log.info("\n\n###############################################") topology.master.log.info("#######") topology.master.log.info("####### %s" % label) topology.master.log.info("#######") topology.master.log.info("###################################################") - + + def pattern_errorlog(file, log_pattern): try: pattern_errorlog.last_pos += 1 except AttributeError: pattern_errorlog.last_pos = 0 - + found = None log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) file.seek(pattern_errorlog.last_pos) - + # Use a while true iteration because 'for line in file: hit a # python bug that break file.tell() while True: @@ -65,11 +67,12 @@ def pattern_errorlog(file, log_pattern): found = log_pattern.search(line) if ((line == '') or (found)): break - + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) pattern_errorlog.last_pos = file.tell() return found + def _oc_definition(oid_ext, name, must=None, may=None): oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext desc = 'To test ticket 47490' @@ -78,37 +81,40 @@ def _oc_definition(oid_ext, name, must=None, may=None): must = MUST_OLD if not may: may = MAY_OLD - + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return new_oc + def add_OC(instance, oid_ext, name): new_oc = _oc_definition(oid_ext, name) instance.schema.add_schema('objectClasses', new_oc) + def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): old_oc = _oc_definition(oid_ext, name, old_must, old_may) new_oc = _oc_definition(oid_ext, name, new_must, new_may) instance.schema.del_schema('objectClasses', old_oc) instance.schema.add_schema('objectClasses', new_oc) + def support_schema_learning(topology): """ - with https://fedorahosted.org/389/ticket/47721, the supplier and consumer can learn + with https://fedorahosted.org/389/ticket/47721, the supplier and consumer can learn schema definitions when a replication occurs. Before that ticket: replication of the schema fails requiring administrative operation In the test the schemaCSN (master consumer) differs - + After that ticket: replication of the schema succeeds (after an initial phase of learning) In the test the schema CSN (master consumer) are in sync - + This function returns True if 47721 is fixed in the current release False else """ ent = topology.consumer.getEntry(DN_CONFIG, ldap.SCOPE_BASE, "(cn=config)", ['nsslapd-versionstring']) if ent.hasAttr('nsslapd-versionstring'): val = ent.getValue('nsslapd-versionstring') - version = val.split('/')[1].split('.') # something like ['1', '3', '1', '23', 'final_fix'] + version = val.split('/')[1].split('.') # something like ['1', '3', '1', '23', 'final_fix'] major = int(version[0]) minor = int(version[1]) if major > 1: @@ -121,7 +127,8 @@ def support_schema_learning(topology): if int(version[2]) >= 3: return True return False - + + def trigger_update(topology): """ It triggers an update on the supplier. This will start a replication @@ -133,7 +140,7 @@ def trigger_update(topology): trigger_update.value = 1 replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_update.value))] topology.master.modify_s(ENTRY_DN, replace) - + # wait 10 seconds that the update is replicated loop = 0 while loop <= 10: @@ -149,7 +156,8 @@ def trigger_update(topology): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + + def trigger_schema_push(topology): ''' Trigger update to create a replication session. @@ -165,8 +173,7 @@ def trigger_schema_push(topology): topology.master.agreement.pause(ra.dn) topology.master.agreement.resume(ra.dn) trigger_update(topology) - - + @pytest.fixture(scope="module") def topology(request): @@ -175,7 +182,7 @@ def topology(request): The replicated topology is MASTER -> Consumer. At the beginning, It may exists a master instance and/or a consumer instance. It may also exists a backup for the master and/or the consumer. - + Principle: If master instance exists: restart it @@ -199,17 +206,17 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + master = DirSrv(verbose=False) consumer = DirSrv(verbose=False) - + # Args for the master instance args_instance[SER_HOST] = HOST_MASTER args_instance[SER_PORT] = PORT_MASTER args_instance[SER_SERVERID_PROP] = SERVERID_MASTER args_master = args_instance.copy() master.allocate(args_master) - + # Args for the consumer instance args_instance[SER_HOST] = HOST_CONSUMER args_instance[SER_PORT] = PORT_CONSUMER @@ -217,40 +224,39 @@ def topology(request): args_consumer = args_instance.copy() consumer.allocate(args_consumer) - # Get the status of the backups backup_master = master.checkBackupFS() backup_consumer = consumer.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_master = master.exists() + instance_master = master.exists() if instance_master: master.stop(timeout=10) master.start(timeout=10) - + instance_consumer = consumer.exists() if instance_consumer: consumer.stop(timeout=10) consumer.start(timeout=10) - + if backup_master and backup_consumer: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master: master.create() # Used to retrieve configuration information (dbdir, confdir...) master.open() - + if not instance_consumer: consumer.create() # Used to retrieve configuration information (dbdir, confdir...) consumer.open() - + # restore master from backup master.stop(timeout=10) master.restoreFS(backup_master) master.start(timeout=10) - + # restore consumer from backup consumer.stop(timeout=10) consumer.restoreFS(backup_consumer) @@ -261,55 +267,56 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master: master.clearBackupFS() if backup_consumer: consumer.clearBackupFS() - + # Remove all the instances if instance_master: master.delete() if instance_consumer: consumer.delete() - + # Create the instances master.create() master.open() consumer.create() consumer.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) master.agreement.init(SUFFIX, HOST_CONSUMER, PORT_CONSUMER) master.waitForReplInit(repl_agreement) - + # Check replication is working fine master.add_s(Entry((TEST_REPL_DN, { 'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) + ent = None loop = 0 while loop <= 10: try: @@ -318,12 +325,14 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master.stop(timeout=10) master.backupfile = master.backupFS() master.start(timeout=10) - + consumer.stop(timeout=10) consumer.backupfile = consumer.backupFS() consumer.start(timeout=10) @@ -331,7 +340,7 @@ def topology(request): # clear the tmp directory master.clearTmpDir(__file__) - # + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init @@ -340,24 +349,25 @@ def topology(request): def test_ticket47490_init(topology): - """ + """ Initialize the test environment """ log.debug("test_ticket47490_init topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer)) - # the test case will check if a warning message is logged in the + # the test case will check if a warning message is logged in the # error log of the supplier topology.master.errorlog_file = open(topology.master.errlog, "r") - + # This entry will be used to trigger attempt of schema push topology.master.add_s(Entry((ENTRY_DN, { 'objectclass': "top person".split(), 'sn': 'test_entry', 'cn': 'test_entry'}))) - + + def test_ticket47490_one(topology): """ Summary: Extra OC Schema is pushed - no error - + If supplier schema is a superset (one extra OC) of consumer schema, then schema is pushed and there is no message in the error log State at startup: @@ -366,58 +376,60 @@ def test_ticket47490_one(topology): Final state - supplier +masterNewOCA - consumer +masterNewOCA - + """ _header(topology, "Extra OC Schema is pushed - no error") - + log.debug("test_ticket47490_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer)) - # update the schema of the supplier so that it is a superset of + # update the schema of the supplier so that it is a superset of # consumer. Schema should be pushed add_OC(topology.master, 2, 'masterNewOCA') - + trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was updated on the consumer log.debug("test_ticket47490_one master_schema_csn=%s", master_schema_csn) log.debug("ctest_ticket47490_one onsumer_schema_csn=%s", consumer_schema_csn) assert master_schema_csn == consumer_schema_csn - + # Check the error log of the supplier does not contain an error regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) - assert res == None - + if res is not None: + assert False + + def test_ticket47490_two(topology): """ Summary: Extra OC Schema is pushed - (ticket 47721 allows to learn missing def) - + If consumer schema is a superset (one extra OC) of supplier schema, then schema is pushed and there is a message in the error log - State at startup + State at startup - supplier +masterNewOCA - consumer +masterNewOCA Final state - supplier +masterNewOCA +masterNewOCB - consumer +masterNewOCA +consumerNewOCA - + """ - + _header(topology, "Extra OC Schema is pushed - (ticket 47721 allows to learn missing def)") - + # add this OC on consumer. Supplier will no push the schema add_OC(topology.consumer, 1, 'consumerNewOCA') - + # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) time.sleep(2) add_OC(topology.master, 3, 'masterNewOCB') - + # now push the scheam trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was NOT updated on the consumer # with 47721, supplier learns the missing definition log.debug("test_ticket47490_two master_schema_csn=%s", master_schema_csn) @@ -432,13 +444,14 @@ def test_ticket47490_two(topology): regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) + def test_ticket47490_three(topology): """ Summary: Extra OC Schema is pushed - no error - + If supplier schema is again a superset (one extra OC), then schema is pushed and there is no message in the error log - State at startup + State at startup - supplier +masterNewOCA +masterNewOCB - consumer +masterNewOCA +consumerNewOCA Final state @@ -447,16 +460,16 @@ def test_ticket47490_three(topology): """ _header(topology, "Extra OC Schema is pushed - no error") - + # Do an upate to trigger the schema push attempt # add this OC on consumer. Supplier will no push the schema add_OC(topology.master, 1, 'consumerNewOCA') - + # now push the scheam trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was NOT updated on the consumer log.debug("test_ticket47490_three master_schema_csn=%s", master_schema_csn) log.debug("test_ticket47490_three consumer_schema_csn=%s", consumer_schema_csn) @@ -465,15 +478,17 @@ def test_ticket47490_three(topology): # Check the error log of the supplier does not contain an error regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) - assert res == None - + if res is not None: + assert False + + def test_ticket47490_four(topology): """ Summary: Same OC - extra MUST: Schema is pushed - no error - + If supplier schema is again a superset (OC with more MUST), then schema is pushed and there is no message in the error log - State at startup + State at startup - supplier +masterNewOCA +masterNewOCB +consumerNewOCA - consumer +masterNewOCA +masterNewOCB +consumerNewOCA Final state @@ -481,34 +496,35 @@ def test_ticket47490_four(topology): +must=telexnumber - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +must=telexnumber - + """ _header(topology, "Same OC - extra MUST: Schema is pushed - no error") - + mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) - - + trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was updated on the consumer log.debug("test_ticket47490_four master_schema_csn=%s", master_schema_csn) log.debug("ctest_ticket47490_four onsumer_schema_csn=%s", consumer_schema_csn) assert master_schema_csn == consumer_schema_csn - + # Check the error log of the supplier does not contain an error regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) - assert res == None - + if res is not None: + assert False + + def test_ticket47490_five(topology): """ Summary: Same OC - extra MUST: Schema is pushed - (fix for 47721) - + If consumer schema is a superset (OC with more MUST), then schema is pushed (fix for 47721) and there is a message in the error log - State at startup + State at startup - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +must=telexnumber - consumer +masterNewOCA +masterNewOCB +consumerNewOCA @@ -518,24 +534,24 @@ def test_ticket47490_five(topology): +must=telexnumber - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +must=telexnumber +must=telexnumber - + Note: replication log is enabled to get more details """ _header(topology, "Same OC - extra MUST: Schema is pushed - (fix for 47721)") - + # get more detail why it fails topology.master.enableReplLogging() - + # add telenumber to 'consumerNewOCA' on the consumer mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) # add a new OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) time.sleep(2) add_OC(topology.master, 4, 'masterNewOCC') - + trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was NOT updated on the consumer # with 47721, supplier learns the missing definition log.debug("test_ticket47490_five master_schema_csn=%s", master_schema_csn) @@ -544,59 +560,59 @@ def test_ticket47490_five(topology): assert master_schema_csn == consumer_schema_csn else: assert master_schema_csn != consumer_schema_csn - + # Check the error log of the supplier does not contain an error # This message may happen during the learning phase regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) - + def test_ticket47490_six(topology): """ Summary: Same OC - extra MUST: Schema is pushed - no error - + If supplier schema is again a superset (OC with more MUST), then schema is pushed and there is no message in the error log - State at startup + State at startup - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC +must=telexnumber - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +must=telexnumber +must=telexnumber Final state - + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC +must=telexnumber +must=telexnumber - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC +must=telexnumber +must=telexnumber - + Note: replication log is enabled to get more details """ _header(topology, "Same OC - extra MUST: Schema is pushed - no error") - # add telenumber to 'consumerNewOCA' on the consumer mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_OLD) - + trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was NOT updated on the consumer log.debug("test_ticket47490_six master_schema_csn=%s", master_schema_csn) log.debug("ctest_ticket47490_six onsumer_schema_csn=%s", consumer_schema_csn) assert master_schema_csn == consumer_schema_csn - + # Check the error log of the supplier does not contain an error # This message may happen during the learning phase regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) - assert res == None + if res is not None: + assert False def test_ticket47490_seven(topology): """ Summary: Same OC - extra MAY: Schema is pushed - no error - + If supplier schema is again a superset (OC with more MAY), then schema is pushed and there is no message in the error log State at startup @@ -610,32 +626,32 @@ def test_ticket47490_seven(topology): +may=postOfficeBox - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC +must=telexnumber +must=telexnumber - +may=postOfficeBox + +may=postOfficeBox """ _header(topology, "Same OC - extra MAY: Schema is pushed - no error") mod_OC(topology.master, 2, 'masterNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) - - + trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was updated on the consumer log.debug("test_ticket47490_seven master_schema_csn=%s", master_schema_csn) log.debug("ctest_ticket47490_seven consumer_schema_csn=%s", consumer_schema_csn) assert master_schema_csn == consumer_schema_csn - + # Check the error log of the supplier does not contain an error regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) - assert res == None - + if res is not None: + assert False + def test_ticket47490_eight(topology): """ Summary: Same OC - extra MAY: Schema is pushed (fix for 47721) - + If consumer schema is a superset (OC with more MAY), then schema is pushed (fix for 47721) and there is message in the error log State at startup @@ -651,20 +667,20 @@ def test_ticket47490_eight(topology): +may=postOfficeBox +may=postOfficeBox - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC +must=telexnumber +must=telexnumber - +may=postOfficeBox +may=postOfficeBox + +may=postOfficeBox +may=postOfficeBox """ _header(topology, "Same OC - extra MAY: Schema is pushed (fix for 47721)") - + mod_OC(topology.consumer, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) # modify OC on the supplier so that its nsSchemaCSN is larger than the consumer (wait 2s) time.sleep(2) mod_OC(topology.master, 4, 'masterNewOCC', old_must=MUST_OLD, new_must=MUST_OLD, old_may=MAY_OLD, new_may=MAY_NEW) - + trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was not updated on the consumer # with 47721, supplier learns the missing definition log.debug("test_ticket47490_eight master_schema_csn=%s", master_schema_csn) @@ -673,17 +689,17 @@ def test_ticket47490_eight(topology): assert master_schema_csn == consumer_schema_csn else: assert master_schema_csn != consumer_schema_csn - + # Check the error log of the supplier does not contain an error # This message may happen during the learning phase regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) - - + + def test_ticket47490_nine(topology): """ Summary: Same OC - extra MAY: Schema is pushed - no error - + If consumer schema is a superset (OC with more MAY), then schema is not pushed and there is message in the error log State at startup @@ -692,10 +708,10 @@ def test_ticket47490_nine(topology): +may=postOfficeBox +may=postOfficeBox - consumer +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC +must=telexnumber +must=telexnumber - +may=postOfficeBox +may=postOfficeBox + +may=postOfficeBox +may=postOfficeBox Final state - + - supplier +masterNewOCA +masterNewOCB +consumerNewOCA +masterNewOCC +must=telexnumber +must=telexnumber +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox @@ -704,38 +720,41 @@ def test_ticket47490_nine(topology): +may=postOfficeBox +may=postOfficeBox +may=postOfficeBox """ _header(topology, "Same OC - extra MAY: Schema is pushed - no error") - + mod_OC(topology.master, 1, 'consumerNewOCA', old_must=MUST_NEW, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) - + trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was updated on the consumer log.debug("test_ticket47490_nine master_schema_csn=%s", master_schema_csn) log.debug("ctest_ticket47490_nine onsumer_schema_csn=%s", consumer_schema_csn) assert master_schema_csn == consumer_schema_csn - + # Check the error log of the supplier does not contain an error regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) - assert res == None - + if res is not None: + assert False + + def test_ticket47490_final(topology): - topology.master.stop(timeout=10) - topology.consumer.stop(timeout=10) + topology.master.delete() + topology.consumer.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix - installation_prefix = None - + installation_prefix = None + topo = topology(True) test_ticket47490_init(topo) test_ticket47490_one(topo) @@ -747,7 +766,7 @@ def run_isolated(): test_ticket47490_seven(topo) test_ticket47490_eight(topo) test_ticket47490_nine(topo) - + test_ticket47490_final(topo) diff --git a/dirsrvtests/tickets/ticket47553_ger.py b/dirsrvtests/tickets/ticket47553_ger.py index d688c70..72df885 100644 --- a/dirsrvtests/tickets/ticket47553_ger.py +++ b/dirsrvtests/tickets/ticket47553_ger.py @@ -19,7 +19,7 @@ from lib389._constants import * from lib389.properties import * from constants import * from lib389._constants import REPLICAROLE_MASTER -from ldap.controls.simple import GetEffectiveRightsControl +from ldap.controls.simple import GetEffectiveRightsControl logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -54,11 +54,12 @@ MAX_ACCOUNTS = 20 CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" + class TopologyMaster1Master2(object): def __init__(self, master1, master2): master1.open() self.master1 = master1 - + master2.open() self.master2 = master2 @@ -70,7 +71,7 @@ def topology(request): The replicated topology is MASTER1 <-> Master2. At the beginning, It may exists a master2 instance and/or a master2 instance. It may also exists a backup for the master1 and/or the master2. - + Principle: If master1 instance exists: restart it @@ -94,22 +95,22 @@ def topology(request): global installation2_prefix # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) + master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix - + # Args for the master1 instance args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master1.allocate(args_master) - + # allocate master1 on a given deployement master2 = DirSrv(verbose=False) if installation2_prefix: args_instance[SER_DEPLOYED_DIR] = installation2_prefix - + # Args for the consumer instance args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 @@ -117,40 +118,39 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups backup_master1 = master1.checkBackupFS() backup_master2 = master2.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() + instance_master1 = master1.exists() if instance_master1: master1.stop(timeout=10) master1.start(timeout=10) - + instance_master2 = master2.exists() if instance_master2: master2.stop(timeout=10) master2.start(timeout=10) - + if backup_master1 and backup_master2: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master1: master1.create() # Used to retrieve configuration information (dbdir, confdir...) master1.open() - + if not instance_master2: master2.create() # Used to retrieve configuration information (dbdir, confdir...) master2.open() - + # restore master1 from backup master1.stop(timeout=10) master1.restoreFS(backup_master1) master1.start(timeout=10) - + # restore master2 from backup master2.stop(timeout=10) master2.restoreFS(backup_master2) @@ -161,48 +161,48 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master1: master1.clearBackupFS() if backup_master2: master2.clearBackupFS() - + # Remove all the instances if instance_master1: master1.delete() if instance_master2: master2.delete() - + # Create the instances master1.create() master1.open() master2.create() master2.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], @@ -212,13 +212,14 @@ def topology(request): master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(repl_agreement) - + # Check replication is working fine master1.add_s(Entry((TEST_REPL_DN, { 'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) loop = 0 + ent = None while loop <= 10: try: ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -226,20 +227,22 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master1.stop(timeout=10) master1.backupfile = master1.backupFS() master1.start(timeout=10) - + master2.stop(timeout=10) master2.backupfile = master2.backupFS() master2.start(timeout=10) # clear the tmp directory master1.clearTmpDir(__file__) - - # + + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init @@ -247,29 +250,31 @@ def topology(request): return TopologyMaster1Master2(master1, master2) - def _bind_manager(topology): topology.master1.log.info("Bind as %s " % DN_DM) topology.master1.simple_bind_s(DN_DM, PASSWORD) - + + def _bind_normal(topology): # bind as bind_entry topology.master1.log.info("Bind as %s" % BIND_DN) topology.master1.simple_bind_s(BIND_DN, BIND_PW) - + + def _moddn_aci_deny_tree(topology, mod_type=None, target_from=STAGING_DN, target_to=PROD_EXCEPT_DN): ''' It denies the access moddn_to in cn=except,cn=accounts,SUFFIX ''' - assert mod_type != None - + if mod_type is None: + assert False + ACI_TARGET_FROM = "" ACI_TARGET_TO = "" if target_from: ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) if target_to: ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) - + ACI_ALLOW = "(version 3.0; acl \"Deny MODDN to prod_except\"; deny (moddn)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_TARGET_TO + ACI_TARGET_FROM + ACI_ALLOW + ACI_SUBJECT @@ -277,11 +282,12 @@ def _moddn_aci_deny_tree(topology, mod_type=None, target_from=STAGING_DN, target #topology.master1.modify_s(SUFFIX, mod) topology.master1.log.info("Add a DENY aci under %s " % PROD_EXCEPT_DN) topology.master1.modify_s(PROD_EXCEPT_DN, mod) - -def _moddn_aci_staging_to_production(topology, mod_type=None, target_from=STAGING_DN, target_to=PRODUCTION_DN): - assert mod_type != None +def _moddn_aci_staging_to_production(topology, mod_type=None, target_from=STAGING_DN, target_to=PRODUCTION_DN): + if mod_type is None: + assert False + ACI_TARGET_FROM = "" ACI_TARGET_TO = "" if target_from: @@ -295,9 +301,11 @@ def _moddn_aci_staging_to_production(topology, mod_type=None, target_from=STAGIN mod = [(mod_type, 'aci', ACI_BODY)] topology.master1.modify_s(SUFFIX, mod) + def _moddn_aci_from_production_to_staging(topology, mod_type=None): - assert mod_type != None - + if mod_type is None: + assert False + ACI_TARGET = "(target_from = \"ldap:///%s\") (target_to = \"ldap:///%s\")" % (PRODUCTION_DN, STAGING_DN) ACI_ALLOW = "(version 3.0; acl \"MODDN from production to staging\"; allow (moddn)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN @@ -313,11 +321,11 @@ def test_ticket47553_init(topology): - a production DIT - add accounts in staging DIT - enable ACL logging (commented for performance reason) - + """ - + topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n") - + # entry used to bind with topology.master1.log.info("Add %s" % BIND_DN) topology.master1.add_s(Entry((BIND_DN, { @@ -325,37 +333,33 @@ def test_ticket47553_init(topology): 'sn': BIND_CN, 'cn': BIND_CN, 'userpassword': BIND_PW}))) - + # DIT for staging topology.master1.log.info("Add %s" % STAGING_DN) topology.master1.add_s(Entry((STAGING_DN, { 'objectclass': "top organizationalRole".split(), 'cn': STAGING_CN, 'description': "staging DIT"}))) - + # DIT for production topology.master1.log.info("Add %s" % PRODUCTION_DN) topology.master1.add_s(Entry((PRODUCTION_DN, { 'objectclass': "top organizationalRole".split(), 'cn': PRODUCTION_CN, 'description': "production DIT"}))) - + # DIT for production/except topology.master1.log.info("Add %s" % PROD_EXCEPT_DN) topology.master1.add_s(Entry((PROD_EXCEPT_DN, { 'objectclass': "top organizationalRole".split(), 'cn': EXCEPT_CN, 'description': "production except DIT"}))) - + # enable acl error logging #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] #topology.master1.modify_s(DN_CONFIG, mod) #topology.master2.modify_s(DN_CONFIG, mod) - - - - # add dummy entries in the staging DIT for cpt in range(MAX_ACCOUNTS): name = "%s%d" % (NEW_ACCOUNT, cpt) @@ -369,11 +373,11 @@ def test_ticket47553_mode_default_add_deny(topology): ''' This test case checks that the ADD operation fails (no ADD aci on production) ''' - + topology.master1.log.info("\n\n######################### mode moddn_aci : ADD (should fail) ######################\n") - + _bind_normal(topology) - + # # First try to add an entry in production => INSUFFICIENT_ACCESS # @@ -389,42 +393,44 @@ def test_ticket47553_mode_default_add_deny(topology): topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + def test_ticket47553_mode_default_ger_no_moddn(topology): topology.master1.log.info("\n\n######################### mode moddn_aci : GER no moddn ######################\n") - request_ctrl = GetEffectiveRightsControl(criticality=True,authzId="dn: " + BIND_DN) + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) msg_id = topology.master1.search_ext(PRODUCTION_DN, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) - rtype,rdata,rmsgid,response_ctrl = topology.master1.result3(msg_id) - ger={} - value='' + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + #ger={} + value = '' for dn, attrs in rdata: - topology.master1.log.info ("dn: %s" % dn) + topology.master1.log.info("dn: %s" % dn) value = attrs['entryLevelRights'][0] - topology.master1.log.info ("############### entryLevelRights: %r" % value) + topology.master1.log.info("############### entryLevelRights: %r" % value) assert 'n' not in value - + + def test_ticket47553_mode_default_ger_with_moddn(topology): ''' This test case adds the moddn aci and check ger contains 'n' ''' - + topology.master1.log.info("\n\n######################### mode moddn_aci: GER with moddn ######################\n") # successfull MOD with the ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) - - request_ctrl = GetEffectiveRightsControl(criticality=True,authzId="dn: " + BIND_DN) + + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) msg_id = topology.master1.search_ext(PRODUCTION_DN, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) - rtype,rdata,rmsgid,response_ctrl = topology.master1.result3(msg_id) - ger={} + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + #ger={} value = '' for dn, attrs in rdata: - topology.master1.log.info ("dn: %s" % dn) + topology.master1.log.info("dn: %s" % dn) value = attrs['entryLevelRights'][0] - topology.master1.log.info ("############### entryLevelRights: %r" % value) + topology.master1.log.info("############### entryLevelRights: %r" % value) assert 'n' in value # successfull MOD with the both ACI @@ -432,53 +438,57 @@ def test_ticket47553_mode_default_ger_with_moddn(topology): _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) + def test_ticket47553_mode_switch_default_to_legacy(topology): ''' This test switch the server from default mode to legacy ''' - topology.master1.log.info("\n\n######################### Disable the moddn aci mod ######################\n" ) + topology.master1.log.info("\n\n######################### Disable the moddn aci mod ######################\n") _bind_manager(topology) mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'off')] topology.master1.modify_s(DN_CONFIG, mod) - + + def test_ticket47553_mode_legacy_ger_no_moddn1(topology): topology.master1.log.info("\n\n######################### mode legacy 1: GER no moddn ######################\n") - request_ctrl = GetEffectiveRightsControl(criticality=True,authzId="dn: " + BIND_DN) + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) msg_id = topology.master1.search_ext(PRODUCTION_DN, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) - rtype,rdata,rmsgid,response_ctrl = topology.master1.result3(msg_id) - ger={} - value='' + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + #ger={} + value = '' for dn, attrs in rdata: - topology.master1.log.info ("dn: %s" % dn) + topology.master1.log.info("dn: %s" % dn) value = attrs['entryLevelRights'][0] - topology.master1.log.info ("############### entryLevelRights: %r" % value) + topology.master1.log.info("############### entryLevelRights: %r" % value) assert 'n' not in value + def test_ticket47553_mode_legacy_ger_no_moddn2(topology): topology.master1.log.info("\n\n######################### mode legacy 2: GER no moddn ######################\n") # successfull MOD with the ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) - - request_ctrl = GetEffectiveRightsControl(criticality=True,authzId="dn: " + BIND_DN) + + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) msg_id = topology.master1.search_ext(PRODUCTION_DN, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) - rtype,rdata,rmsgid,response_ctrl = topology.master1.result3(msg_id) - ger={} - value='' + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + #ger={} + value = '' for dn, attrs in rdata: - topology.master1.log.info ("dn: %s" % dn) + topology.master1.log.info("dn: %s" % dn) value = attrs['entryLevelRights'][0] - topology.master1.log.info ("############### entryLevelRights: %r" % value) + topology.master1.log.info("############### entryLevelRights: %r" % value) assert 'n' not in value - - # successfull MOD with the both ACI + + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) - + + def test_ticket47553_mode_legacy_ger_with_moddn(topology): topology.master1.log.info("\n\n######################### mode legacy : GER with moddn ######################\n") @@ -493,34 +503,35 @@ def test_ticket47553_mode_legacy_ger_with_moddn(topology): mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] topology.master1.modify_s(SUFFIX, mod) _bind_normal(topology) - - request_ctrl = GetEffectiveRightsControl(criticality=True,authzId="dn: " + BIND_DN) + + request_ctrl = GetEffectiveRightsControl(criticality=True, authzId="dn: " + BIND_DN) msg_id = topology.master1.search_ext(PRODUCTION_DN, ldap.SCOPE_SUBTREE, "objectclass=*", serverctrls=[request_ctrl]) - rtype,rdata,rmsgid,response_ctrl = topology.master1.result3(msg_id) - ger={} - value='' + rtype, rdata, rmsgid, response_ctrl = topology.master1.result3(msg_id) + #ger={} + value = '' for dn, attrs in rdata: - topology.master1.log.info ("dn: %s" % dn) + topology.master1.log.info("dn: %s" % dn) value = attrs['entryLevelRights'][0] - topology.master1.log.info ("############### entryLevelRights: %r" % value) + topology.master1.log.info("############### entryLevelRights: %r" % value) assert 'n' in value - + # successfull MOD with the both ACI _bind_manager(topology) mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] topology.master1.modify_s(SUFFIX, mod) _bind_normal(topology) - - + + def test_ticket47553_final(topology): - topology.master1.stop(timeout=10) - topology.master2.stop(timeout=10) + topology.master1.delete() + topology.master2.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program @@ -529,7 +540,7 @@ def run_isolated(): global installation2_prefix installation1_prefix = None installation2_prefix = None - + topo = topology(True) topo.master1.log.info("\n\n######################### Ticket 47553 ######################\n") test_ticket47553_init(topo) @@ -542,10 +553,8 @@ def run_isolated(): test_ticket47553_mode_legacy_ger_no_moddn1(topo) test_ticket47553_mode_legacy_ger_no_moddn2(topo) test_ticket47553_mode_legacy_ger_with_moddn(topo) - - test_ticket47553_final(topo) - + test_ticket47553_final(topo) if __name__ == '__main__': diff --git a/dirsrvtests/tickets/ticket47553_single_aci_test.py b/dirsrvtests/tickets/ticket47553_single_aci_test.py index 4be2470..34e3eec 100644 --- a/dirsrvtests/tickets/ticket47553_single_aci_test.py +++ b/dirsrvtests/tickets/ticket47553_single_aci_test.py @@ -53,11 +53,12 @@ MAX_ACCOUNTS = 20 CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" + class TopologyMaster1Master2(object): def __init__(self, master1, master2): master1.open() self.master1 = master1 - + master2.open() self.master2 = master2 @@ -69,7 +70,7 @@ def topology(request): The replicated topology is MASTER1 <-> Master2. At the beginning, It may exists a master2 instance and/or a master2 instance. It may also exists a backup for the master1 and/or the master2. - + Principle: If master1 instance exists: restart it @@ -93,22 +94,22 @@ def topology(request): global installation2_prefix # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) + master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix - + # Args for the master1 instance args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master1.allocate(args_master) - + # allocate master1 on a given deployement master2 = DirSrv(verbose=False) if installation2_prefix: args_instance[SER_DEPLOYED_DIR] = installation2_prefix - + # Args for the consumer instance args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 @@ -116,40 +117,39 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups backup_master1 = master1.checkBackupFS() backup_master2 = master2.checkBackupFS() - + # Get the status of the instance and restart it if it exists instance_master1 = master1.exists() if instance_master1: master1.stop(timeout=10) master1.start(timeout=10) - + instance_master2 = master2.exists() if instance_master2: master2.stop(timeout=10) master2.start(timeout=10) - + if backup_master1 and backup_master2: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master1: master1.create() # Used to retrieve configuration information (dbdir, confdir...) master1.open() - + if not instance_master2: master2.create() # Used to retrieve configuration information (dbdir, confdir...) master2.open() - + # restore master1 from backup master1.stop(timeout=10) master1.restoreFS(backup_master1) master1.start(timeout=10) - + # restore master2 from backup master2.stop(timeout=10) master2.restoreFS(backup_master2) @@ -160,48 +160,48 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master1: master1.clearBackupFS() if backup_master2: master2.clearBackupFS() - + # Remove all the instances if instance_master1: master1.delete() if instance_master2: master2.delete() - + # Create the instances master1.create() master1.open() master2.create() master2.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], @@ -211,13 +211,14 @@ def topology(request): master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(repl_agreement) - + # Check replication is working fine master1.add_s(Entry((TEST_REPL_DN, { 'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) loop = 0 + ent = None while loop <= 10: try: ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -225,20 +226,22 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master1.stop(timeout=10) master1.backupfile = master1.backupFS() master1.start(timeout=10) - + master2.stop(timeout=10) master2.backupfile = master2.backupFS() master2.start(timeout=10) # clear the tmp directory master1.clearTmpDir(__file__) - - # + + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init @@ -246,29 +249,30 @@ def topology(request): return TopologyMaster1Master2(master1, master2) - def _bind_manager(topology): topology.master1.log.info("Bind as %s " % DN_DM) topology.master1.simple_bind_s(DN_DM, PASSWORD) - + + def _bind_normal(topology): # bind as bind_entry topology.master1.log.info("Bind as %s" % BIND_DN) topology.master1.simple_bind_s(BIND_DN, BIND_PW) - + + def _moddn_aci_deny_tree(topology, mod_type=None, target_from=STAGING_DN, target_to=PROD_EXCEPT_DN): ''' It denies the access moddn_to in cn=except,cn=accounts,SUFFIX ''' - assert mod_type != None - + assert mod_type is not None + ACI_TARGET_FROM = "" ACI_TARGET_TO = "" if target_from: ACI_TARGET_FROM = "(target_from = \"ldap:///%s\")" % (target_from) if target_to: ACI_TARGET_TO = "(target_to = \"ldap:///%s\")" % (target_to) - + ACI_ALLOW = "(version 3.0; acl \"Deny MODDN to prod_except\"; deny (moddn)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_TARGET_TO + ACI_TARGET_FROM + ACI_ALLOW + ACI_SUBJECT @@ -276,11 +280,11 @@ def _moddn_aci_deny_tree(topology, mod_type=None, target_from=STAGING_DN, target #topology.master1.modify_s(SUFFIX, mod) topology.master1.log.info("Add a DENY aci under %s " % PROD_EXCEPT_DN) topology.master1.modify_s(PROD_EXCEPT_DN, mod) - -def _moddn_aci_staging_to_production(topology, mod_type=None, target_from=STAGING_DN, target_to=PRODUCTION_DN): - assert mod_type != None +def _moddn_aci_staging_to_production(topology, mod_type=None, target_from=STAGING_DN, target_to=PRODUCTION_DN): + assert mod_type is not None + ACI_TARGET_FROM = "" ACI_TARGET_TO = "" if target_from: @@ -294,9 +298,10 @@ def _moddn_aci_staging_to_production(topology, mod_type=None, target_from=STAGIN mod = [(mod_type, 'aci', ACI_BODY)] topology.master1.modify_s(SUFFIX, mod) + def _moddn_aci_from_production_to_staging(topology, mod_type=None): - assert mod_type != None - + assert mod_type is not None + ACI_TARGET = "(target_from = \"ldap:///%s\") (target_to = \"ldap:///%s\")" % (PRODUCTION_DN, STAGING_DN) ACI_ALLOW = "(version 3.0; acl \"MODDN from production to staging\"; allow (moddn)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN @@ -312,11 +317,11 @@ def test_ticket47553_init(topology): - a production DIT - add accounts in staging DIT - enable ACL logging (commented for performance reason) - + """ - + topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n") - + # entry used to bind with topology.master1.log.info("Add %s" % BIND_DN) topology.master1.add_s(Entry((BIND_DN, { @@ -324,37 +329,33 @@ def test_ticket47553_init(topology): 'sn': BIND_CN, 'cn': BIND_CN, 'userpassword': BIND_PW}))) - + # DIT for staging topology.master1.log.info("Add %s" % STAGING_DN) topology.master1.add_s(Entry((STAGING_DN, { 'objectclass': "top organizationalRole".split(), 'cn': STAGING_CN, 'description': "staging DIT"}))) - + # DIT for production topology.master1.log.info("Add %s" % PRODUCTION_DN) topology.master1.add_s(Entry((PRODUCTION_DN, { 'objectclass': "top organizationalRole".split(), 'cn': PRODUCTION_CN, 'description': "production DIT"}))) - + # DIT for production/except topology.master1.log.info("Add %s" % PROD_EXCEPT_DN) topology.master1.add_s(Entry((PROD_EXCEPT_DN, { 'objectclass': "top organizationalRole".split(), 'cn': EXCEPT_CN, 'description': "production except DIT"}))) - + # enable acl error logging #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] #topology.master1.modify_s(DN_CONFIG, mod) #topology.master2.modify_s(DN_CONFIG, mod) - - - - # add dummy entries in the staging DIT for cpt in range(MAX_ACCOUNTS): name = "%s%d" % (NEW_ACCOUNT, cpt) @@ -368,11 +369,11 @@ def test_ticket47553_add(topology): ''' This test case checks that the ADD operation fails (no ADD aci on production) ''' - + topology.master1.log.info("\n\n######################### ADD (should fail) ######################\n") - + _bind_normal(topology) - + # # First try to add an entry in production => INSUFFICIENT_ACCESS # @@ -388,13 +389,14 @@ def test_ticket47553_add(topology): topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) + def test_ticket47553_delete(topology): ''' This test case checks that the DEL operation fails (no 'delete' aci on production) ''' - + topology.master1.log.info("\n\n######################### DELETE (should fail) ######################\n") - + _bind_normal(topology) # # Second try to delete an entry in staging => INSUFFICIENT_ACCESS @@ -407,23 +409,23 @@ def test_ticket47553_delete(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + + def test_ticket47553_moddn_staging_prod_0(topology): ''' This test case MOVE entry NEW_ACCOUNT0 from staging to prod target_to/target_from: equality filter ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (0) ######################\n") _bind_normal(topology) - + old_rdn = "cn=%s0" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -435,38 +437,38 @@ def test_ticket47553_moddn_staging_prod_0(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to and from equality filter ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) - + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) - - + + def test_ticket47553_moddn_staging_prod_1(topology): ''' This test case MOVE entry NEW_ACCOUNT1 from staging to prod target_to/target_from: substring/equality filter ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (1) ######################\n") _bind_normal(topology) - + old_rdn = "cn=%s1" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -478,38 +480,39 @@ def test_ticket47553_moddn_staging_prod_1(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to substring/ from equality filter ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_PATTERN) _bind_normal(topology) - - + + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_PATTERN) _bind_normal(topology) + def test_ticket47553_moddn_staging_prod_2(topology): ''' This test case fails to MOVE entry NEW_ACCOUNT2 from staging to prod because of bad pattern ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (2) ######################\n") _bind_normal(topology) - + old_rdn = "cn=%s2" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -521,14 +524,14 @@ def test_ticket47553_moddn_staging_prod_2(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to substring (BAD)/ from equality filter ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=BAD_PRODUCTION_PATTERN) _bind_normal(topology) - + try: topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) @@ -537,27 +540,28 @@ def test_ticket47553_moddn_staging_prod_2(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=BAD_PRODUCTION_PATTERN) _bind_normal(topology) - + + def test_ticket47553_moddn_staging_prod_3(topology): ''' This test case MOVE entry NEW_ACCOUNT3 from staging to prod target_to/target_from: equality/substring filter ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (3) ######################\n") _bind_normal(topology) - + old_rdn = "cn=%s3" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -569,38 +573,38 @@ def test_ticket47553_moddn_staging_prod_3(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to:equality filter / from substring filter ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_PATTERN, target_to=PRODUCTION_DN) _bind_normal(topology) - - + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_PATTERN, target_to=PRODUCTION_DN) _bind_normal(topology) - + + def test_ticket47553_moddn_staging_prod_4(topology): ''' This test case fails to MOVE entry NEW_ACCOUNT4 from staging to prod because of bad pattern ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (4) ######################\n") _bind_normal(topology) - + old_rdn = "cn=%s4" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -612,14 +616,13 @@ def test_ticket47553_moddn_staging_prod_4(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to: equality filter/ from: substring (BAD) ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=BAD_STAGING_PATTERN, target_to=PRODUCTION_DN) _bind_normal(topology) - + try: topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) @@ -628,27 +631,28 @@ def test_ticket47553_moddn_staging_prod_4(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=BAD_STAGING_PATTERN, target_to=PRODUCTION_DN) _bind_normal(topology) - + + def test_ticket47553_moddn_staging_prod_5(topology): ''' This test case MOVE entry NEW_ACCOUNT5 from staging to prod target_to/target_from: substring/substring filter ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (5) ######################\n") _bind_normal(topology) - + old_rdn = "cn=%s5" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -660,38 +664,37 @@ def test_ticket47553_moddn_staging_prod_5(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to:substring filter / from: substring filter ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_PATTERN, target_to=PRODUCTION_PATTERN) _bind_normal(topology) - - + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_PATTERN, target_to=PRODUCTION_PATTERN) _bind_normal(topology) - + + def test_ticket47553_moddn_staging_prod_6(topology): ''' This test case MOVE entry NEW_ACCOUNT6 from staging to prod target_to/target_from: substring/ filter ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (6) ######################\n") _bind_normal(topology) - + old_rdn = "cn=%s6" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -703,38 +706,37 @@ def test_ticket47553_moddn_staging_prod_6(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to:substring filter / from: empty ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=None, target_to=PRODUCTION_PATTERN) _bind_normal(topology) - - + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=None, target_to=PRODUCTION_PATTERN) _bind_normal(topology) - + + def test_ticket47553_moddn_staging_prod_7(topology): ''' This test case MOVE entry NEW_ACCOUNT7 from staging to prod target_to/target_from: /substring filter ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (7) ######################\n") _bind_normal(topology) - + old_rdn = "cn=%s7" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -746,39 +748,37 @@ def test_ticket47553_moddn_staging_prod_7(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to: empty/ from: substring filter ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_PATTERN, target_to=None) _bind_normal(topology) - - + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_PATTERN, target_to=None) _bind_normal(topology) - - + + def test_ticket47553_moddn_staging_prod_8(topology): ''' This test case MOVE entry NEW_ACCOUNT8 from staging to prod target_to/target_from: / filter ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (8) ######################\n") _bind_normal(topology) - + old_rdn = "cn=%s8" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -790,23 +790,22 @@ def test_ticket47553_moddn_staging_prod_8(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to: empty/ from: empty ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=None, target_to=None) _bind_normal(topology) - - + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=None, target_to=None) _bind_normal(topology) - + + def test_ticket47553_moddn_staging_prod_9(topology): ''' This test case disable the 'moddn' right so a MODDN requires a 'add' right @@ -814,22 +813,22 @@ def test_ticket47553_moddn_staging_prod_9(topology): It fails to MOVE entry NEW_ACCOUNT9 from staging to prod. Add a 'add' right to prod. Then it succeeds to MOVE NEW_ACCOUNT9 from staging to prod. - + Then enable the 'moddn' right so a MODDN requires a 'moddn' right It fails to MOVE entry NEW_ACCOUNT10 from staging to prod. Add a 'moddn' right to prod. Then it succeeds to MOVE NEW_ACCOUNT10 from staging to prod. ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (9) ######################\n") - - _bind_normal(topology) + + _bind_normal(topology) old_rdn = "cn=%s9" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -841,21 +840,21 @@ def test_ticket47553_moddn_staging_prod_9(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + ############################################ # Now do tests with no support of moddn aci ############################################ - topology.master1.log.info("Disable the moddn right" ) + topology.master1.log.info("Disable the moddn right") _bind_manager(topology) mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'off')] topology.master1.modify_s(DN_CONFIG, mod) - + # Add the moddn aci that will not be evaluated because of the config flag topology.master1.log.info("\n\n######################### MOVE to and from equality filter ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) - + # It will fail because it will test the ADD right try: topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) @@ -866,51 +865,50 @@ def test_ticket47553_moddn_staging_prod_9(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + # remove the moddn aci _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) - - # + + # # add the 'add' right to the production DN # Then do a successfull moddn # ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_ALLOW + ACI_SUBJECT - + _bind_manager(topology) mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] topology.master1.modify_s(PRODUCTION_DN, mod) _bind_normal(topology) - + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - + _bind_manager(topology) mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] topology.master1.modify_s(PRODUCTION_DN, mod) _bind_normal(topology) - - + ############################################ # Now do tests with support of moddn aci ############################################ - topology.master1.log.info("Enable the moddn right" ) + topology.master1.log.info("Enable the moddn right") _bind_manager(topology) mod = [(ldap.MOD_REPLACE, CONFIG_MODDN_ACI_ATTR, 'on')] topology.master1.modify_s(DN_CONFIG, mod) - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (10) ######################\n") - - _bind_normal(topology) + + _bind_normal(topology) old_rdn = "cn=%s10" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -922,20 +920,20 @@ def test_ticket47553_moddn_staging_prod_9(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - # + + # # add the 'add' right to the production DN # Then do a failing moddn # ACI_ALLOW = "(version 3.0; acl \"ADD rights to allow moddn\"; allow (add)" ACI_SUBJECT = " userdn = \"ldap:///%s\";)" % BIND_DN ACI_BODY = ACI_ALLOW + ACI_SUBJECT - + _bind_manager(topology) mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] topology.master1.modify_s(PRODUCTION_DN, mod) _bind_normal(topology) - + try: topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) @@ -945,41 +943,42 @@ def test_ticket47553_moddn_staging_prod_9(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + _bind_manager(topology) mod = [(ldap.MOD_DELETE, 'aci', ACI_BODY)] topology.master1.modify_s(PRODUCTION_DN, mod) _bind_normal(topology) - + # Add the moddn aci that will be evaluated because of the config flag topology.master1.log.info("\n\n######################### MOVE to and from equality filter ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) - + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - + # remove the moddn aci _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) - + + def test_ticket47553_moddn_prod_staging(topology): ''' This test checks that we can move ACCOUNT11 from staging to prod but not move back ACCOUNT11 from prod to staging ''' topology.master1.log.info("\n\n######################### MOVE staging -> Prod (11) ######################\n") - + _bind_normal(topology) - + old_rdn = "cn=%s11" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -991,25 +990,22 @@ def test_ticket47553_moddn_prod_staging(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to and from equality filter ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _bind_normal(topology) - + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - - - # + # Now check we can not move back the entry to staging old_rdn = "cn=%s11" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, PRODUCTION_DN) new_rdn = old_rdn new_superior = STAGING_DN - + try: topology.master1.log.info("Try to move back MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) @@ -1019,7 +1015,7 @@ def test_ticket47553_moddn_prod_staging(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) @@ -1030,14 +1026,14 @@ def test_ticket47553_check_repl_M2_to_M1(topology): ''' Checks that replication is still working M2->M1, using ACCOUNT12 ''' - + topology.master1.log.info("Bind as %s (M2)" % DN_DM) topology.master2.simple_bind_s(DN_DM, PASSWORD) - + rdn = "cn=%s12" % NEW_ACCOUNT dn = "%s,%s" % (rdn, STAGING_DN) - - # First wait for the ACCOUNT19 entry being replicated on M2 + + # First wait for the ACCOUNT19 entry being replicated on M2 loop = 0 while loop <= 10: try: @@ -1047,41 +1043,41 @@ def test_ticket47553_check_repl_M2_to_M1(topology): time.sleep(1) loop += 1 assert loop <= 10 - - + attribute = 'description' tested_value = 'Hello world' mod = [(ldap.MOD_ADD, attribute, tested_value)] topology.master1.log.info("Update (M2) %s (%s)" % (dn, attribute)) topology.master2.modify_s(dn, mod) - + loop = 0 while loop <= 10: ent = topology.master1.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") - assert ent != None + assert ent is not None if ent.hasAttr(attribute) and (ent.getValue(attribute) == tested_value): break - + time.sleep(1) loop += 1 assert loop < 10 topology.master1.log.info("Update %s (%s) replicated on M1" % (dn, attribute)) + def test_ticket47553_moddn_staging_prod_except(topology): ''' This test case MOVE entry NEW_ACCOUNT13 from staging to prod but fails to move entry NEW_ACCOUNT14 from staging to prod_except ''' - + topology.master1.log.info("\n\n######################### MOVE staging -> Prod (13) ######################\n") _bind_normal(topology) - + old_rdn = "cn=%s13" % NEW_ACCOUNT old_dn = "%s,%s" % (old_rdn, STAGING_DN) new_rdn = old_rdn new_superior = PRODUCTION_DN - - # + + # # Try to rename without the apropriate ACI => INSUFFICIENT_ACCESS # try: @@ -1093,18 +1089,17 @@ def test_ticket47553_moddn_staging_prod_except(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + # successfull MOD with the ACI topology.master1.log.info("\n\n######################### MOVE to and from equality filter ######################\n") _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_ADD, target_from=STAGING_DN, target_to=PRODUCTION_DN) _moddn_aci_deny_tree(topology, mod_type=ldap.MOD_ADD) _bind_normal(topology) - + topology.master1.log.info("Try to MODDN %s -> %s,%s" % (old_dn, new_rdn, new_superior)) topology.master1.rename_s(old_dn, new_rdn, newsuperior=new_superior) - + # # Now try to move an entry under except # @@ -1122,21 +1117,23 @@ def test_ticket47553_moddn_staging_prod_except(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + # successfull MOD with the both ACI _bind_manager(topology) _moddn_aci_staging_to_production(topology, mod_type=ldap.MOD_DELETE, target_from=STAGING_DN, target_to=PRODUCTION_DN) _moddn_aci_deny_tree(topology, mod_type=ldap.MOD_DELETE) _bind_normal(topology) - + + def test_ticket47553_final(topology): - topology.master1.stop(timeout=10) - topology.master2.stop(timeout=10) + topology.master1.delete() + topology.master2.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program @@ -1145,12 +1142,11 @@ def run_isolated(): global installation2_prefix installation1_prefix = None installation2_prefix = None - + topo = topology(True) topo.master1.log.info("\n\n######################### Ticket 47553 ######################\n") test_ticket47553_init(topo) - # Check that without appropriate aci we are not allowed to add/delete test_ticket47553_add(topo) test_ticket47553_delete(topo) @@ -1162,27 +1158,25 @@ def run_isolated(): test_ticket47553_moddn_staging_prod_3(topo) test_ticket47553_moddn_staging_prod_4(topo) test_ticket47553_moddn_staging_prod_5(topo) - + # tests the ACI with undefined 'target_to'/'target_from' test_ticket47553_moddn_staging_prod_6(topo) test_ticket47553_moddn_staging_prod_7(topo) test_ticket47553_moddn_staging_prod_8(topo) - + # Check we can control the behavior with nsslapd-moddn-aci test_ticket47553_moddn_staging_prod_9(topo) - + # Check we can move entry 'from' -> 'to' but not 'to' -> 'from' test_ticket47553_moddn_prod_staging(topo) - + # check replication is still working test_ticket47553_check_repl_M2_to_M1(topo) - + # check DENY rule is working test_ticket47553_moddn_staging_prod_except(topo) - - test_ticket47553_final(topo) - + test_ticket47553_final(topo) if __name__ == '__main__': diff --git a/dirsrvtests/tickets/ticket47560_test.py b/dirsrvtests/tickets/ticket47560_test.py index af7fdc3..a4b4433 100644 --- a/dirsrvtests/tickets/ticket47560_test.py +++ b/dirsrvtests/tickets/ticket47560_test.py @@ -17,6 +17,7 @@ log = logging.getLogger(__name__) installation_prefix = None + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -29,7 +30,7 @@ def topology(request): This fixture is used to standalone topology for the 'module'. At the beginning, It may exists a standalone instance. It may also exists a backup for the standalone instance. - + Principle: If standalone instance exists: restart it @@ -48,60 +49,60 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + standalone = DirSrv(verbose=False) - + # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) - + # Get the status of the backups backup_standalone = standalone.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() + instance_standalone = standalone.exists() if instance_standalone: # assuming the instance is already stopped, just wait 5 sec max standalone.stop(timeout=5) standalone.start(timeout=10) - + if backup_standalone: - # The backup exist, assuming it is correct + # The backup exist, assuming it is correct # we just re-init the instance with it if not instance_standalone: standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # restore standalone instance from backup standalone.stop(timeout=10) standalone.restoreFS(backup_standalone) standalone.start(timeout=10) - + else: # We should be here only in two conditions # - This is the first time a test involve standalone instance # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove the backup. So even if we have a specific backup file # (e.g backup_standalone) we clear backup that an instance may have created if backup_standalone: standalone.clearBackupFS() - + # Remove the instance if instance_standalone: standalone.delete() - + # Create the instance standalone.create() - + # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # Time to create the backups standalone.stop(timeout=10) standalone.backupfile = standalone.backupFS() @@ -110,7 +111,7 @@ def topology(request): # clear the tmp directory standalone.clearTmpDir(__file__) - # + # # Here we have standalone instance up and running # Either coming from a backup recovery # or from a fresh (re)init @@ -126,21 +127,21 @@ def test_ticket47560(topology): - Create entry cn=member,SUFFIX - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" - Enable Memberof Plugins - - # Here the cn=member entry has a 'memberOf' but + + # Here the cn=member entry has a 'memberOf' but # cn=group entry does not contain 'cn=member' in its member - + TEST CASE - start the fixupmemberof task - read the cn=member entry - check 'memberOf is now empty - + TEARDOWN - Delete entry cn=group,SUFFIX - Delete entry cn=member,SUFFIX - Disable Memberof Plugins """ - + def _enable_disable_mbo(value): """ Enable or disable mbo plugin depending on 'value' ('on'/'off') @@ -152,15 +153,15 @@ def test_ticket47560(topology): topology.standalone.plugins.disable(name=PLUGIN_MEMBER_OF) log.debug("-------------> _enable_disable_mbo(%s)" % value) - + topology.standalone.stop(timeout=120) time.sleep(1) topology.standalone.start(timeout=120) time.sleep(3) - + # need to reopen a connection toward the instance topology.standalone.open() - + def _test_ticket47560_setup(): """ - Create entry cn=group,SUFFIX @@ -168,10 +169,10 @@ def test_ticket47560(topology): - Update 'cn=member,SUFFIX' to add "memberOf: cn=group,SUFFIX" - Enable Memberof Plugins """ - log.debug( "-------- > _test_ticket47560_setup\n") - + log.debug("-------- > _test_ticket47560_setup\n") + # - # By default the memberof plugin is disabled create + # By default the memberof plugin is disabled create # - create a group entry # - create a member entry # - set the member entry as memberof the group entry @@ -182,8 +183,7 @@ def test_ticket47560(topology): try: topology.standalone.add_s(entry) except ldap.ALREADY_EXISTS: - log.debug( "Entry %s already exists" % (group_DN)) - + log.debug("Entry %s already exists" % (group_DN)) entry = Entry(member_DN) entry.setValues('objectclass', 'top', 'person', 'organizationalPerson', 'inetorgperson', 'inetUser') @@ -193,18 +193,16 @@ def test_ticket47560(topology): try: topology.standalone.add_s(entry) except ldap.ALREADY_EXISTS: - log.debug( "Entry %s already exists" % (member_DN)) - + log.debug("Entry %s already exists" % (member_DN)) + replace = [(ldap.MOD_REPLACE, 'memberof', group_DN)] topology.standalone.modify_s(member_DN, replace) - - + # # enable the memberof plugin and restart the instance # _enable_disable_mbo('on') - - + # # check memberof attribute is still present # @@ -215,15 +213,15 @@ def test_ticket47560(topology): #print ent value = ent.getValue('memberof') #print "memberof: %s" % (value) - assert value == group_DN - + assert value == group_DN + def _test_ticket47560_teardown(): """ - Delete entry cn=group,SUFFIX - Delete entry cn=member,SUFFIX - Disable Memberof Plugins """ - log.debug( "-------- > _test_ticket47560_teardown\n") + log.debug("-------- > _test_ticket47560_teardown\n") # remove the entries group_DN and member_DN try: topology.standalone.delete_s(group_DN) @@ -238,70 +236,69 @@ def test_ticket47560(topology): # _enable_disable_mbo('off') - - group_DN = "cn=group,%s" % (SUFFIX) member_DN = "uid=member,%s" % (SUFFIX) - + # # Initialize the test case # _test_ticket47560_setup() - - # + + # # start the test # - start the fixup task # - check the entry is fixed (no longer memberof the group) # - log.debug( "-------- > Start ticket tests\n") - + log.debug("-------- > Start ticket tests\n") + filt = 'uid=member' ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) assert len(ents) == 1 ent = ents[0] - log.debug( "Unfixed entry %r\n" % ent) - + log.debug("Unfixed entry %r\n" % ent) + # run the fixup task topology.standalone.tasks.fixupMemberOf(suffix=SUFFIX, args={TASK_WAIT: True}) - + ents = topology.standalone.search_s(member_DN, ldap.SCOPE_BASE, filt) assert len(ents) == 1 ent = ents[0] - log.debug( "Fixed entry %r\n" % ent) - + log.debug("Fixed entry %r\n" % ent) + if ent.getValue('memberof') == group_DN: log.warning("Error the fixupMemberOf did not fix %s" % (member_DN)) result_successful = False else: result_successful = True - + # # cleanup up the test case # - _test_ticket47560_teardown() - - assert result_successful == True + _test_ticket47560_teardown() + + assert result_successful is True + def test_ticket47560_final(topology): - topology.standalone.stop(timeout=10) - + topology.standalone.delete() def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix - installation_prefix = None - + installation_prefix = None + topo = topology(True) test_ticket47560(topo) test_ticket47560_final(topo) + if __name__ == '__main__': run_isolated() diff --git a/dirsrvtests/tickets/ticket47573_test.py b/dirsrvtests/tickets/ticket47573_test.py index 356873d..2641d0b 100644 --- a/dirsrvtests/tickets/ticket47573_test.py +++ b/dirsrvtests/tickets/ticket47573_test.py @@ -33,24 +33,26 @@ MAY_OLD = "(postalCode $ street)" MUST_NEW = "(postalAddress $ preferredLocale)" MAY_NEW = "(telexNumber $ postalCode $ street)" + class TopologyMasterConsumer(object): def __init__(self, master, consumer): master.open() self.master = master - + consumer.open() self.consumer = consumer + def pattern_errorlog(file, log_pattern): try: pattern_errorlog.last_pos += 1 except AttributeError: pattern_errorlog.last_pos = 0 - + found = None log.debug("_pattern_errorlog: start at offset %d" % pattern_errorlog.last_pos) file.seek(pattern_errorlog.last_pos) - + # Use a while true iteration because 'for line in file: hit a # python bug that break file.tell() while True: @@ -59,11 +61,12 @@ def pattern_errorlog(file, log_pattern): found = log_pattern.search(line) if ((line == '') or (found)): break - + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) pattern_errorlog.last_pos = file.tell() return found + def _oc_definition(oid_ext, name, must=None, may=None): oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext desc = 'To test ticket 47573' @@ -72,20 +75,23 @@ def _oc_definition(oid_ext, name, must=None, may=None): must = MUST_OLD if not may: may = MAY_OLD - + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return new_oc + def add_OC(instance, oid_ext, name): new_oc = _oc_definition(oid_ext, name) instance.schema.add_schema('objectClasses', new_oc) + def mod_OC(instance, oid_ext, name, old_must=None, old_may=None, new_must=None, new_may=None): old_oc = _oc_definition(oid_ext, name, old_must, old_may) new_oc = _oc_definition(oid_ext, name, new_must, new_may) instance.schema.del_schema('objectClasses', old_oc) instance.schema.add_schema('objectClasses', new_oc) - + + def trigger_schema_push(topology): """ It triggers an update on the supplier. This will start a replication @@ -97,7 +103,7 @@ def trigger_schema_push(topology): trigger_schema_push.value = 1 replace = [(ldap.MOD_REPLACE, 'telephonenumber', str(trigger_schema_push.value))] topology.master.modify_s(ENTRY_DN, replace) - + # wait 10 seconds that the update is replicated loop = 0 while loop <= 10: @@ -114,6 +120,7 @@ def trigger_schema_push(topology): time.sleep(1) loop += 1 + @pytest.fixture(scope="module") def topology(request): ''' @@ -121,7 +128,7 @@ def topology(request): The replicated topology is MASTER -> Consumer. At the beginning, It may exists a master instance and/or a consumer instance. It may also exists a backup for the master and/or the consumer. - + Principle: If master instance exists: restart it @@ -145,17 +152,17 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + master = DirSrv(verbose=False) consumer = DirSrv(verbose=False) - + # Args for the master instance args_instance[SER_HOST] = HOST_MASTER args_instance[SER_PORT] = PORT_MASTER args_instance[SER_SERVERID_PROP] = SERVERID_MASTER args_master = args_instance.copy() master.allocate(args_master) - + # Args for the consumer instance args_instance[SER_HOST] = HOST_CONSUMER args_instance[SER_PORT] = PORT_CONSUMER @@ -163,40 +170,40 @@ def topology(request): args_consumer = args_instance.copy() consumer.allocate(args_consumer) - + # Get the status of the backups backup_master = master.checkBackupFS() backup_consumer = consumer.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_master = master.exists() + instance_master = master.exists() if instance_master: master.stop(timeout=10) master.start(timeout=10) - + instance_consumer = consumer.exists() if instance_consumer: consumer.stop(timeout=10) consumer.start(timeout=10) - + if backup_master and backup_consumer: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master: master.create() # Used to retrieve configuration information (dbdir, confdir...) master.open() - + if not instance_consumer: consumer.create() # Used to retrieve configuration information (dbdir, confdir...) consumer.open() - + # restore master from backup master.stop(timeout=10) master.restoreFS(backup_master) master.start(timeout=10) - + # restore consumer from backup consumer.stop(timeout=10) consumer.restoreFS(backup_consumer) @@ -207,56 +214,57 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master: master.clearBackupFS() if backup_consumer: consumer.clearBackupFS() - + # Remove all the instances if instance_master: master.delete() if instance_consumer: consumer.delete() - + # Create the instances master.create() master.open() consumer.create() consumer.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) master.agreement.init(SUFFIX, HOST_CONSUMER, PORT_CONSUMER) master.waitForReplInit(repl_agreement) - + # Check replication is working fine master.add_s(Entry((TEST_REPL_DN, { 'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) loop = 0 + ent = None while loop <= 10: try: ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -264,12 +272,14 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master.stop(timeout=10) master.backupfile = master.backupFS() master.start(timeout=10) - + consumer.stop(timeout=10) consumer.backupfile = consumer.backupFS() consumer.start(timeout=10) @@ -277,7 +287,7 @@ def topology(request): # clear the tmp directory master.clearTmpDir(__file__) - # + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init @@ -286,74 +296,74 @@ def topology(request): def test_ticket47573_init(topology): - """ + """ Initialize the test environment """ log.debug("test_ticket47573_init topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer)) - # the test case will check if a warning message is logged in the + # the test case will check if a warning message is logged in the # error log of the supplier topology.master.errorlog_file = open(topology.master.errlog, "r") - + # This entry will be used to trigger attempt of schema push topology.master.add_s(Entry((ENTRY_DN, { 'objectclass': "top person".split(), 'sn': 'test_entry', 'cn': 'test_entry'}))) - + + def test_ticket47573_one(topology): """ Summary: Add a custom OC with MUST and MAY MUST = postalAddress $ preferredLocale MAY = telexNumber $ postalCode $ street - + Final state - supplier +OCwithMayAttr - consumer +OCwithMayAttr - + """ log.debug("test_ticket47573_one topology %r (master %r, consumer %r" % (topology, topology.master, topology.consumer)) - # update the schema of the supplier so that it is a superset of + # update the schema of the supplier so that it is a superset of # consumer. Schema should be pushed - new_oc = _oc_definition(2, 'OCwithMayAttr', - must = MUST_OLD, + new_oc = _oc_definition(2, 'OCwithMayAttr', + must = MUST_OLD, may = MAY_OLD) topology.master.schema.add_schema('objectClasses', new_oc) - trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was updated on the consumer log.debug("test_ticket47573_one master_schema_csn=%s", master_schema_csn) log.debug("ctest_ticket47573_one onsumer_schema_csn=%s", consumer_schema_csn) assert master_schema_csn == consumer_schema_csn - + # Check the error log of the supplier does not contain an error regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) - assert res == None - + assert res is None + + def test_ticket47573_two(topology): """ Summary: Change OCwithMayAttr to move a MAY attribute to a MUST attribute - - + + Final state - supplier OCwithMayAttr updated - consumer OCwithMayAttr updated - + """ - + # Update the objectclass so that a MAY attribute is moved to MUST attribute mod_OC(topology.master, 2, 'OCwithMayAttr', old_must=MUST_OLD, new_must=MUST_NEW, old_may=MAY_OLD, new_may=MAY_NEW) - # now push the scheam trigger_schema_push(topology) master_schema_csn = topology.master.schema.get_schema_csn() consumer_schema_csn = topology.consumer.schema.get_schema_csn() - + # Check the schemaCSN was NOT updated on the consumer log.debug("test_ticket47573_two master_schema_csn=%s", master_schema_csn) log.debug("test_ticket47573_two consumer_schema_csn=%s", consumer_schema_csn) @@ -362,7 +372,8 @@ def test_ticket47573_two(topology): # Check the error log of the supplier does not contain an error regex = re.compile("must not be overwritten \(set replication log for additional info\)") res = pattern_errorlog(topology.master.errorlog_file, regex) - assert res == None + assert res is None + def test_ticket47573_three(topology): ''' @@ -370,7 +381,7 @@ def test_ticket47573_three(topology): ''' # Check replication is working fine dn = "cn=ticket47573, %s" % SUFFIX - topology.master.add_s(Entry((dn, + topology.master.add_s(Entry((dn, {'objectclass': "top person OCwithMayAttr".split(), 'sn': 'test_repl', 'cn': 'test_repl', @@ -379,6 +390,7 @@ def test_ticket47573_three(topology): 'telexNumber': '12$us$21', 'postalCode': '54321'}))) loop = 0 + ent = None while loop <= 10: try: ent = topology.consumer.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") @@ -386,30 +398,32 @@ def test_ticket47573_three(topology): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - assert loop <= 10 - + if ent is None: + assert False + + def test_ticket47573_final(topology): - topology.master.stop(timeout=10) - topology.consumer.stop(timeout=10) + topology.master.delete() + topology.consumer.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix - installation_prefix = None - + installation_prefix = None + topo = topology(True) test_ticket47573_init(topo) test_ticket47573_one(topo) test_ticket47573_two(topo) test_ticket47573_three(topo) - test_ticket47573_final(topo) diff --git a/dirsrvtests/tickets/ticket47619_test.py b/dirsrvtests/tickets/ticket47619_test.py index af391ce..e3e7846 100644 --- a/dirsrvtests/tickets/ticket47619_test.py +++ b/dirsrvtests/tickets/ticket47619_test.py @@ -30,13 +30,14 @@ ENTRY_DN = "cn=test_entry, %s" % SUFFIX OTHER_NAME = 'other_entry' MAX_OTHERS = 100 -ATTRIBUTES = [ 'street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber' ] +ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] + class TopologyMasterConsumer(object): def __init__(self, master, consumer): master.open() self.master = master - + consumer.open() self.consumer = consumer @@ -51,7 +52,7 @@ def topology(request): The replicated topology is MASTER -> Consumer. At the beginning, It may exists a master instance and/or a consumer instance. It may also exists a backup for the master and/or the consumer. - + Principle: If master instance exists: restart it @@ -75,17 +76,17 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + master = DirSrv(verbose=False) consumer = DirSrv(verbose=False) - + # Args for the master instance args_instance[SER_HOST] = HOST_MASTER args_instance[SER_PORT] = PORT_MASTER args_instance[SER_SERVERID_PROP] = SERVERID_MASTER args_master = args_instance.copy() master.allocate(args_master) - + # Args for the consumer instance args_instance[SER_HOST] = HOST_CONSUMER args_instance[SER_PORT] = PORT_CONSUMER @@ -93,40 +94,40 @@ def topology(request): args_consumer = args_instance.copy() consumer.allocate(args_consumer) - + # Get the status of the backups backup_master = master.checkBackupFS() backup_consumer = consumer.checkBackupFS() - + # Get the status of the instance and restart it if it exists instance_master = master.exists() if instance_master: master.stop(timeout=10) master.start(timeout=10) - + instance_consumer = consumer.exists() if instance_consumer: consumer.stop(timeout=10) consumer.start(timeout=10) - + if backup_master and backup_consumer: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master: master.create() # Used to retrieve configuration information (dbdir, confdir...) master.open() - + if not instance_consumer: consumer.create() # Used to retrieve configuration information (dbdir, confdir...) consumer.open() - + # restore master from backup master.stop(timeout=10) master.restoreFS(backup_master) master.start(timeout=10) - + # restore consumer from backup consumer.stop(timeout=10) consumer.restoreFS(backup_consumer) @@ -137,56 +138,57 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master: master.clearBackupFS() if backup_consumer: consumer.clearBackupFS() - + # Remove all the instances if instance_master: master.delete() if instance_consumer: consumer.delete() - + # Create the instances master.create() master.open() consumer.create() consumer.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) master.agreement.init(SUFFIX, HOST_CONSUMER, PORT_CONSUMER) master.waitForReplInit(repl_agreement) - + # Check replication is working fine master.add_s(Entry((TEST_REPL_DN, { 'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) loop = 0 + ent = None while loop <= 10: try: ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -194,12 +196,14 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master.stop(timeout=10) master.backupfile = master.backupFS() master.start(timeout=10) - + consumer.stop(timeout=10) consumer.backupfile = consumer.backupFS() consumer.start(timeout=10) @@ -207,7 +211,7 @@ def topology(request): # clear the tmp directory master.clearTmpDir(__file__) - # + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init @@ -216,7 +220,7 @@ def topology(request): def test_ticket47619_init(topology): - """ + """ Initialize the test environment """ topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) @@ -224,13 +228,12 @@ def test_ticket47619_init(topology): #topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY) topology.master.stop(timeout=10) topology.master.start(timeout=10) - + topology.master.log.info("test_ticket47619_init topology %r" % (topology)) - # the test case will check if a warning message is logged in the + # the test case will check if a warning message is logged in the # error log of the supplier topology.master.errorlog_file = open(topology.master.errlog, "r") - - + # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) @@ -238,20 +241,21 @@ def test_ticket47619_init(topology): 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) - + topology.master.log.info("test_ticket47619_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS-1)) - + # Check the number of entries in the retro changelog time.sleep(2) ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") assert len(ents) == MAX_OTHERS + def test_ticket47619_create_index(topology): - args = {INDEX_TYPE: 'eq'} for attr in ATTRIBUTES: topology.master.index.create(suffix=RETROCL_SUFFIX, attr=attr, args=args) + def test_ticket47619_reindex(topology): ''' Reindex all the attributes in ATTRIBUTES @@ -261,31 +265,34 @@ def test_ticket47619_reindex(topology): rc = topology.master.tasks.reindex(suffix=RETROCL_SUFFIX, attrname=attr, args=args) assert rc == 0 + def test_ticket47619_check_indexed_search(topology): for attr in ATTRIBUTES: ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_SUBTREE, "(%s=hello)" % attr) assert len(ents) == 0 - + + def test_ticket47619_final(topology): - topology.master.stop(timeout=10) - topology.consumer.stop(timeout=10) + topology.master.delete() + topology.consumer.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix - installation_prefix = None - + installation_prefix = None + topo = topology(True) test_ticket47619_init(topo) - + test_ticket47619_create_index(topo) - + # important restart that trigger the hang # at restart, finding the new 'changelog' backend, the backend is acquired in Read # preventing the reindex task to complete diff --git a/dirsrvtests/tickets/ticket47653MMR_test.py b/dirsrvtests/tickets/ticket47653MMR_test.py index bce69c8..3c4c1ef 100644 --- a/dirsrvtests/tickets/ticket47653MMR_test.py +++ b/dirsrvtests/tickets/ticket47653MMR_test.py @@ -44,7 +44,8 @@ BIND_PW = 'password' ENTRY_NAME = 'test_entry' ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) ENTRY_OC = "top person %s" % OC_NAME - + + def _oc_definition(oid_ext, name, must=None, may=None): oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext desc = 'To test ticket 47490' @@ -53,14 +54,16 @@ def _oc_definition(oid_ext, name, must=None, may=None): must = MUST if not may: may = MAY - + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return new_oc + + class TopologyMaster1Master2(object): def __init__(self, master1, master2): master1.open() self.master1 = master1 - + master2.open() self.master2 = master2 @@ -72,7 +75,7 @@ def topology(request): The replicated topology is MASTER1 <-> Master2. At the beginning, It may exists a master2 instance and/or a master2 instance. It may also exists a backup for the master1 and/or the master2. - + Principle: If master1 instance exists: restart it @@ -96,22 +99,22 @@ def topology(request): global installation2_prefix # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) + master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix - + # Args for the master1 instance args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master1.allocate(args_master) - + # allocate master1 on a given deployement master2 = DirSrv(verbose=False) if installation2_prefix: args_instance[SER_DEPLOYED_DIR] = installation2_prefix - + # Args for the consumer instance args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 @@ -119,40 +122,39 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups backup_master1 = master1.checkBackupFS() backup_master2 = master2.checkBackupFS() - + # Get the status of the instance and restart it if it exists instance_master1 = master1.exists() if instance_master1: master1.stop(timeout=10) master1.start(timeout=10) - + instance_master2 = master2.exists() if instance_master2: master2.stop(timeout=10) master2.start(timeout=10) - + if backup_master1 and backup_master2: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master1: master1.create() # Used to retrieve configuration information (dbdir, confdir...) master1.open() - + if not instance_master2: master2.create() # Used to retrieve configuration information (dbdir, confdir...) master2.open() - + # restore master1 from backup master1.stop(timeout=10) master1.restoreFS(backup_master1) master1.start(timeout=10) - + # restore master2 from backup master2.stop(timeout=10) master2.restoreFS(backup_master2) @@ -163,48 +165,48 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master1: master1.clearBackupFS() if backup_master2: master2.clearBackupFS() - + # Remove all the instances if instance_master1: master1.delete() if instance_master2: master2.delete() - + # Create the instances master1.create() master1.open() master2.create() master2.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], @@ -214,13 +216,14 @@ def topology(request): master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(repl_agreement) - + # Check replication is working fine master1.add_s(Entry((TEST_REPL_DN, { 'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) loop = 0 + ent = None while loop <= 10: try: ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -228,12 +231,14 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master1.stop(timeout=10) master1.backupfile = master1.backupFS() master1.start(timeout=10) - + master2.stop(timeout=10) master2.backupfile = master2.backupFS() master2.start(timeout=10) @@ -241,7 +246,7 @@ def topology(request): # clear the tmp directory master1.clearTmpDir(__file__) - # + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init @@ -255,15 +260,13 @@ def test_ticket47653_init(topology): - Objectclass with MAY 'member' - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation It deletes the anonymous aci - + """ - - + topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME) - new_oc = _oc_definition(2, OC_NAME, must = MUST, may = MAY) + new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) topology.master1.schema.add_schema('objectClasses', new_oc) - - + # entry used to bind with topology.master1.log.info("Add %s" % BIND_DN) topology.master1.add_s(Entry((BIND_DN, { @@ -271,18 +274,18 @@ def test_ticket47653_init(topology): 'sn': BIND_NAME, 'cn': BIND_NAME, 'userpassword': BIND_PW}))) - + # enable acl error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128+8192))] # ACL + REPL + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL topology.master1.modify_s(DN_CONFIG, mod) topology.master2.modify_s(DN_CONFIG, mod) - + # get read of anonymous ACI for use 'read-search' aci in SEARCH test ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" mod = [(ldap.MOD_DELETE, 'aci', ACI_ANONYMOUS)] topology.master1.modify_s(SUFFIX, mod) topology.master2.modify_s(SUFFIX, mod) - + # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) @@ -291,22 +294,23 @@ def test_ticket47653_init(topology): 'sn': name, 'cn': name}))) + def test_ticket47653_add(topology): ''' This test ADD an entry on MASTER1 where 47653 is fixed. Then it checks that entry is replicated on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 and check the update on MASTER1 - - It checks that, bound as bind_entry, + + It checks that, bound as bind_entry, - we can not ADD an entry without the proper SELFDN aci. - with the proper ACI we can not ADD with 'member' attribute - with the proper ACI and 'member' it succeeds to ADD ''' topology.master1.log.info("\n\n######################### ADD ######################\n") - + # bind as bind_entry topology.master1.log.info("Bind as %s" % BIND_DN) topology.master1.simple_bind_s(BIND_DN, BIND_PW) - + # Prepare the entry with multivalued members entry_with_members = Entry(ENTRY_DN) entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') @@ -320,7 +324,7 @@ def test_ticket47653_add(topology): members.append("cn=%s,%s" % (name, SUFFIX)) members.append(BIND_DN) entry_with_members.setValues('member', members) - + # Prepare the entry with only one member value entry_with_member = Entry(ENTRY_DN) entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') @@ -331,21 +335,20 @@ def test_ticket47653_add(topology): member = [] member.append(BIND_DN) entry_with_member.setValues('member', member) - + # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS try: topology.master1.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) - + topology.master1.add_s(entry_with_member) except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + # Ok Now add the proper ACI topology.master1.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) topology.master1.simple_bind_s(DN_DM, PASSWORD) - + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" @@ -353,11 +356,11 @@ def test_ticket47653_add(topology): ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] topology.master1.modify_s(SUFFIX, mod) - + # bind as bind_entry topology.master1.log.info("Bind as %s" % BIND_DN) topology.master1.simple_bind_s(BIND_DN, BIND_PW) - + # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS try: topology.master1.log.info("Try to add Add %s (member is missing)" % ENTRY_DN) @@ -370,7 +373,7 @@ def test_ticket47653_add(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS # member should contain only one value try: @@ -379,10 +382,10 @@ def test_ticket47653_add(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN) topology.master1.add_s(entry_with_member) - + # # Now check the entry as been replicated # @@ -397,12 +400,12 @@ def test_ticket47653_add(topology): time.sleep(1) loop += 1 assert loop <= 10 - + # Now update the entry on Master2 (as DM because 47653 is possibly not fixed on M2) topology.master1.log.info("Update %s on M2" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'description', 'test_add')] topology.master2.modify_s(ENTRY_DN, mod) - + topology.master1.simple_bind_s(DN_DM, PASSWORD) loop = 0 while loop <= 10: @@ -413,25 +416,26 @@ def test_ticket47653_add(topology): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + assert ent.getValue('description') == 'test_add' - + + def test_ticket47653_modify(topology): ''' This test MOD an entry on MASTER1 where 47653 is fixed. Then it checks that update is replicated on MASTER2 (even if on MASTER2 47653 is NOT fixed). Then update on MASTER2 (bound as BIND_DN). This update may fail whether or not 47653 is fixed on MASTER2 - It checks that, bound as bind_entry, + It checks that, bound as bind_entry, - we can not modify an entry without the proper SELFDN aci. - adding the ACI, we can modify the entry ''' # bind as bind_entry topology.master1.log.info("Bind as %s" % BIND_DN) topology.master1.simple_bind_s(BIND_DN, BIND_PW) - + topology.master1.log.info("\n\n######################### MODIFY ######################\n") - + # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS try: topology.master1.log.info("Try to modify %s (aci is missing)" % ENTRY_DN) @@ -440,12 +444,11 @@ def test_ticket47653_modify(topology): except Exception as e: topology.master1.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + # Ok Now add the proper ACI topology.master1.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) topology.master1.simple_bind_s(DN_DM, PASSWORD) - + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETATTR = "(targetattr = *)" ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME @@ -454,26 +457,24 @@ def test_ticket47653_modify(topology): ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] topology.master1.modify_s(SUFFIX, mod) - + # bind as bind_entry topology.master1.log.info("M1: Bind as %s" % BIND_DN) topology.master1.simple_bind_s(BIND_DN, BIND_PW) - + # modify the entry and checks the value topology.master1.log.info("M1: Try to modify %s. It should succeeds" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')] topology.master1.modify_s(ENTRY_DN, mod) - + topology.master1.log.info("M1: Bind as %s" % DN_DM) topology.master1.simple_bind_s(DN_DM, PASSWORD) - + topology.master1.log.info("M1: Check the update of %s" % ENTRY_DN) ents = topology.master1.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') assert len(ents) == 1 assert ents[0].postalCode == '1928' - - - # + # Now check the update has been replicated on M2 topology.master1.log.info("M2: Bind as %s" % DN_DM) topology.master2.simple_bind_s(DN_DM, PASSWORD) @@ -489,8 +490,7 @@ def test_ticket47653_modify(topology): loop += 1 assert loop <= 10 assert ent.getValue('postalCode') == '1928' - - + # Now update the entry on Master2 bound as BIND_DN (update may fail if 47653 is not fixed on M2) topology.master1.log.info("M2: Update %s (bound as %s)" % (ENTRY_DN, BIND_DN)) topology.master2.simple_bind_s(BIND_DN, PASSWORD) @@ -505,7 +505,7 @@ def test_ticket47653_modify(topology): except Exception as e: topology.master1.log.info("M2: Exception (not expected): %s" % type(e).__name__) assert 0 - + if not fail: # Check the update has been replicaed on M1 topology.master1.log.info("M1: Bind as %s" % DN_DM) @@ -521,15 +521,17 @@ def test_ticket47653_modify(topology): time.sleep(1) loop += 1 assert ent.getValue('postalCode') == '1929' - + + def test_ticket47653_final(topology): - topology.master1.stop(timeout=10) - topology.master2.stop(timeout=10) + topology.master1.delete() + topology.master2.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program @@ -538,18 +540,15 @@ def run_isolated(): global installation2_prefix installation1_prefix = None installation2_prefix = None - + topo = topology(True) test_ticket47653_init(topo) - + test_ticket47653_add(topo) test_ticket47653_modify(topo) - - test_ticket47653_final(topo) - + test_ticket47653_final(topo) if __name__ == '__main__': run_isolated() - diff --git a/dirsrvtests/tickets/ticket47653_test.py b/dirsrvtests/tickets/ticket47653_test.py index e0dedbc..c217596 100644 --- a/dirsrvtests/tickets/ticket47653_test.py +++ b/dirsrvtests/tickets/ticket47653_test.py @@ -31,7 +31,8 @@ BIND_PW = 'password' ENTRY_NAME = 'test_entry' ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) ENTRY_OC = "top person %s" % OC_NAME - + + def _oc_definition(oid_ext, name, must=None, may=None): oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext desc = 'To test ticket 47490' @@ -40,7 +41,7 @@ def _oc_definition(oid_ext, name, must=None, may=None): must = MUST if not may: may = MAY - + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return new_oc @@ -57,7 +58,7 @@ def topology(request): This fixture is used to standalone topology for the 'module'. At the beginning, It may exists a standalone instance. It may also exists a backup for the standalone instance. - + Principle: If standalone instance exists: restart it @@ -76,60 +77,60 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + standalone = DirSrv(verbose=False) - + # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) - + # Get the status of the backups backup_standalone = standalone.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() + instance_standalone = standalone.exists() if instance_standalone: # assuming the instance is already stopped, just wait 5 sec max standalone.stop(timeout=5) standalone.start(timeout=10) - + if backup_standalone: - # The backup exist, assuming it is correct + # The backup exist, assuming it is correct # we just re-init the instance with it if not instance_standalone: standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # restore standalone instance from backup standalone.stop(timeout=10) standalone.restoreFS(backup_standalone) standalone.start(timeout=10) - + else: # We should be here only in two conditions # - This is the first time a test involve standalone instance # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove the backup. So even if we have a specific backup file # (e.g backup_standalone) we clear backup that an instance may have created if backup_standalone: standalone.clearBackupFS() - + # Remove the instance if instance_standalone: standalone.delete() - + # Create the instance standalone.create() - + # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # Time to create the backups standalone.stop(timeout=10) standalone.backupfile = standalone.backupFS() @@ -138,7 +139,7 @@ def topology(request): # clear the tmp directory standalone.clearTmpDir(__file__) - # + # # Here we have standalone instance up and running # Either coming from a backup recovery # or from a fresh (re)init @@ -152,15 +153,13 @@ def test_ticket47653_init(topology): - Objectclass with MAY 'member' - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation It deletes the anonymous aci - + """ - - + topology.standalone.log.info("Add %s that allows 'member' attribute" % OC_NAME) - new_oc = _oc_definition(2, OC_NAME, must = MUST, may = MAY) + new_oc = _oc_definition(2, OC_NAME, must=MUST, may=MAY) topology.standalone.schema.add_schema('objectClasses', new_oc) - - + # entry used to bind with topology.standalone.log.info("Add %s" % BIND_DN) topology.standalone.add_s(Entry((BIND_DN, { @@ -168,16 +167,16 @@ def test_ticket47653_init(topology): 'sn': BIND_NAME, 'cn': BIND_NAME, 'userpassword': BIND_PW}))) - + # enable acl error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '128')] topology.standalone.modify_s(DN_CONFIG, mod) - + # get read of anonymous ACI for use 'read-search' aci in SEARCH test ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" mod = [(ldap.MOD_DELETE, 'aci', ACI_ANONYMOUS)] topology.standalone.modify_s(SUFFIX, mod) - + # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) @@ -185,21 +184,21 @@ def test_ticket47653_init(topology): 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) - - + + def test_ticket47653_add(topology): ''' - It checks that, bound as bind_entry, + It checks that, bound as bind_entry, - we can not ADD an entry without the proper SELFDN aci. - with the proper ACI we can not ADD with 'member' attribute - with the proper ACI and 'member' it succeeds to ADD ''' topology.standalone.log.info("\n\n######################### ADD ######################\n") - + # bind as bind_entry topology.standalone.log.info("Bind as %s" % BIND_DN) topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - + # Prepare the entry with multivalued members entry_with_members = Entry(ENTRY_DN) entry_with_members.setValues('objectclass', 'top', 'person', 'OCticket47653') @@ -213,7 +212,7 @@ def test_ticket47653_add(topology): members.append("cn=%s,%s" % (name, SUFFIX)) members.append(BIND_DN) entry_with_members.setValues('member', members) - + # Prepare the entry with one member entry_with_member = Entry(ENTRY_DN) entry_with_member.setValues('objectclass', 'top', 'person', 'OCticket47653') @@ -224,21 +223,20 @@ def test_ticket47653_add(topology): member = [] member.append(BIND_DN) entry_with_member.setValues('member', member) - + # entry to add WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS try: topology.standalone.log.info("Try to add Add %s (aci is missing): %r" % (ENTRY_DN, entry_with_member)) - + topology.standalone.add_s(entry_with_member) except Exception as e: topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + # Ok Now add the proper ACI topology.standalone.log.info("Bind as %s and add the ADD SELFDN aci" % DN_DM) topology.standalone.simple_bind_s(DN_DM, PASSWORD) - + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME ACI_ALLOW = "(version 3.0; acl \"SelfDN add\"; allow (add)" @@ -246,11 +244,11 @@ def test_ticket47653_add(topology): ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] topology.standalone.modify_s(SUFFIX, mod) - + # bind as bind_entry topology.standalone.log.info("Bind as %s" % BIND_DN) topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - + # entry to add WITHOUT member and WITH the ACI -> ldap.INSUFFICIENT_ACCESS try: topology.standalone.log.info("Try to add Add %s (member is missing)" % ENTRY_DN) @@ -263,7 +261,7 @@ def test_ticket47653_add(topology): except Exception as e: topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + # entry to add WITH memberS and WITH the ACI -> ldap.INSUFFICIENT_ACCESS # member should contain only one value try: @@ -272,13 +270,14 @@ def test_ticket47653_add(topology): except Exception as e: topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + topology.standalone.log.info("Try to add Add %s should be successful" % ENTRY_DN) topology.standalone.add_s(entry_with_member) - + + def test_ticket47653_search(topology): ''' - It checks that, bound as bind_entry, + It checks that, bound as bind_entry, - we can not search an entry without the proper SELFDN aci. - adding the ACI, we can search the entry ''' @@ -286,17 +285,16 @@ def test_ticket47653_search(topology): # bind as bind_entry topology.standalone.log.info("Bind as %s" % BIND_DN) topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - + # entry to search WITH member being BIND_DN but WITHOUT the ACI -> no entry returned topology.standalone.log.info("Try to search %s (aci is missing)" % ENTRY_DN) ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') assert len(ents) == 0 - - + # Ok Now add the proper ACI topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) topology.standalone.simple_bind_s(DN_DM, PASSWORD) - + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETATTR = "(targetattr = *)" ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME @@ -305,28 +303,29 @@ def test_ticket47653_search(topology): ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] topology.standalone.modify_s(SUFFIX, mod) - + # bind as bind_entry topology.standalone.log.info("Bind as %s" % BIND_DN) topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - + # entry to search with the proper aci topology.standalone.log.info("Try to search %s should be successful" % ENTRY_DN) ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') assert len(ents) == 1 - + + def test_ticket47653_modify(topology): ''' - It checks that, bound as bind_entry, + It checks that, bound as bind_entry, - we can not modify an entry without the proper SELFDN aci. - adding the ACI, we can modify the entry ''' # bind as bind_entry topology.standalone.log.info("Bind as %s" % BIND_DN) topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - + topology.standalone.log.info("\n\n######################### MODIFY ######################\n") - + # entry to modify WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS try: topology.standalone.log.info("Try to modify %s (aci is missing)" % ENTRY_DN) @@ -335,12 +334,12 @@ def test_ticket47653_modify(topology): except Exception as e: topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - - + + # Ok Now add the proper ACI topology.standalone.log.info("Bind as %s and add the WRITE SELFDN aci" % DN_DM) topology.standalone.simple_bind_s(DN_DM, PASSWORD) - + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETATTR = "(targetattr = *)" ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME @@ -349,32 +348,33 @@ def test_ticket47653_modify(topology): ACI_BODY = ACI_TARGET + ACI_TARGETATTR + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] topology.standalone.modify_s(SUFFIX, mod) - + # bind as bind_entry topology.standalone.log.info("Bind as %s" % BIND_DN) topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - + # modify the entry and checks the value topology.standalone.log.info("Try to modify %s. It should succeeds" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'postalCode', '1928')] topology.standalone.modify_s(ENTRY_DN, mod) - + ents = topology.standalone.search_s(ENTRY_DN, ldap.SCOPE_BASE, 'objectclass=*') assert len(ents) == 1 assert ents[0].postalCode == '1928' - + + def test_ticket47653_delete(topology): ''' - It checks that, bound as bind_entry, + It checks that, bound as bind_entry, - we can not delete an entry without the proper SELFDN aci. - adding the ACI, we can delete the entry ''' topology.standalone.log.info("\n\n######################### DELETE ######################\n") - + # bind as bind_entry topology.standalone.log.info("Bind as %s" % BIND_DN) topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - + # entry to delete WITH member being BIND_DN but WITHOUT the ACI -> ldap.INSUFFICIENT_ACCESS try: topology.standalone.log.info("Try to delete %s (aci is missing)" % ENTRY_DN) @@ -382,11 +382,11 @@ def test_ticket47653_delete(topology): except Exception as e: topology.standalone.log.info("Exception (expected): %s" % type(e).__name__) assert isinstance(e, ldap.INSUFFICIENT_ACCESS) - + # Ok Now add the proper ACI topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM) topology.standalone.simple_bind_s(DN_DM, PASSWORD) - + ACI_TARGET = "(target = \"ldap:///cn=*,%s\")" % SUFFIX ACI_TARGETFILTER = "(targetfilter =\"(objectClass=%s)\")" % OC_NAME ACI_ALLOW = "(version 3.0; acl \"SelfDN delete\"; allow (delete)" @@ -394,39 +394,39 @@ def test_ticket47653_delete(topology): ACI_BODY = ACI_TARGET + ACI_TARGETFILTER + ACI_ALLOW + ACI_SUBJECT mod = [(ldap.MOD_ADD, 'aci', ACI_BODY)] topology.standalone.modify_s(SUFFIX, mod) - + # bind as bind_entry topology.standalone.log.info("Bind as %s" % BIND_DN) topology.standalone.simple_bind_s(BIND_DN, BIND_PW) - + # entry to search with the proper aci topology.standalone.log.info("Try to delete %s should be successful" % ENTRY_DN) topology.standalone.delete_s(ENTRY_DN) + def test_ticket47653_final(topology): - topology.standalone.stop(timeout=10) - + topology.standalone.delete() def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix - installation_prefix = None - + installation_prefix = None + topo = topology(True) test_ticket47653_init(topo) - + test_ticket47653_add(topo) test_ticket47653_search(topo) test_ticket47653_modify(topo) test_ticket47653_delete(topo) - + test_ticket47653_final(topo) diff --git a/dirsrvtests/tickets/ticket47664_test.py b/dirsrvtests/tickets/ticket47664_test.py index eaad5dc..4bd100d 100644 --- a/dirsrvtests/tickets/ticket47664_test.py +++ b/dirsrvtests/tickets/ticket47664_test.py @@ -27,6 +27,7 @@ _MYLDIF = 'ticket47664.ldif' SEARCHFILTER = '(objectclass=*)' + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -39,7 +40,7 @@ def topology(request): This fixture is used to standalone topology for the 'module'. At the beginning, It may exists a standalone instance. It may also exists a backup for the standalone instance. - + Principle: If standalone instance exists: restart it @@ -58,60 +59,60 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + standalone = DirSrv(verbose=False) - + # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) - + # Get the status of the backups backup_standalone = standalone.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() + instance_standalone = standalone.exists() if instance_standalone: # assuming the instance is already stopped, just wait 5 sec max standalone.stop(timeout=5) standalone.start(timeout=10) if backup_standalone: - # The backup exist, assuming it is correct + # The backup exist, assuming it is correct # we just re-init the instance with it if not instance_standalone: standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # restore standalone instance from backup standalone.stop(timeout=10) standalone.restoreFS(backup_standalone) standalone.start(timeout=10) - + else: # We should be here only in two conditions # - This is the first time a test involve standalone instance # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove the backup. So even if we have a specific backup file # (e.g backup_standalone) we clear backup that an instance may have created if backup_standalone: standalone.clearBackupFS() - + # Remove the instance if instance_standalone: standalone.delete() - + # Create the instance standalone.create() - + # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # Time to create the backups standalone.stop(timeout=10) standalone.backupfile = standalone.backupFS() @@ -120,7 +121,7 @@ def topology(request): # clear the tmp directory standalone.clearTmpDir(__file__) - # + # # Here we have standalone instance up and running # Either coming from a backup recovery # or from a fresh (re)init @@ -140,7 +141,7 @@ def test_ticket47664_run(topology): # bind as directory manager topology.standalone.log.info("Bind as %s" % DN_DM) topology.standalone.simple_bind_s(DN_DM, PASSWORD) - + topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket47664.org ######################\n") topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) @@ -150,7 +151,7 @@ def test_ticket47664_run(topology): # get tmp dir mytmp = topology.standalone.getDir(__file__, TMP_DIR) - if mytmp == None: + if mytmp is None: mytmp = "/tmp" MYLDIF = '%s%s' % (mytmp, _MYLDIF) @@ -195,11 +196,11 @@ def test_ticket47664_run(topology): ger_req_ctrl = GetEffectiveRightsControl(True, "dn: " + DN_DM) known_ldap_resp_ctrls = { - SimplePagedResultsControl.controlType:SimplePagedResultsControl, + SimplePagedResultsControl.controlType: SimplePagedResultsControl, } topology.standalone.log.info("Calling search_ext...") - msgid = topology.standalone.search_ext(MYSUFFIX, + msgid = topology.standalone.search_ext(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER, ['cn'], @@ -214,21 +215,21 @@ def test_ticket47664_run(topology): rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) topology.standalone.log.info("%d results" % len(rdata)) pageddncnt += len(rdata) - + topology.standalone.log.info("Results:") for dn, attrs in rdata: topology.standalone.log.info("dn: %s" % dn) topology.standalone.log.info("attributeLevelRights: %s" % attrs['attributeLevelRights'][0]) if attrs['attributeLevelRights'][0] != "": attrlevelrightscnt += 1 - + pctrls = [ c for c in responcectrls if c.controlType == SimplePagedResultsControl.controlType ] if not pctrls: topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') break - + if pctrls[0].cookie: spr_req_ctrl.cookie = pctrls[0].cookie topology.standalone.log.info("cookie: %s" % spr_req_ctrl.cookie) @@ -246,25 +247,27 @@ def test_ticket47664_run(topology): assert dnnum == len(entries) assert dnnum == attrlevelrightscnt assert pages == (dnnum / page_size) - topology.standalone.log.info("ticket47664 was successfully verified."); + topology.standalone.log.info("ticket47664 was successfully verified.") + def test_ticket47664_final(topology): - topology.standalone.stop(timeout=10) - + topology.standalone.delete() + + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix installation_prefix = None - + topo = topology(True) test_ticket47664_run(topo) - + test_ticket47664_final(topo) diff --git a/dirsrvtests/tickets/ticket47676_test.py b/dirsrvtests/tickets/ticket47676_test.py index b462fc2..517e10f 100644 --- a/dirsrvtests/tickets/ticket47676_test.py +++ b/dirsrvtests/tickets/ticket47676_test.py @@ -56,7 +56,8 @@ ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) ENTRY_OC = "top person %s" % OC_NAME BASE_OID = "1.2.3.4.5.6.7.8.9.10" - + + def _oc_definition(oid_ext, name, must=None, may=None): oid = "%s.%d" % (BASE_OID, oid_ext) desc = 'To test ticket 47490' @@ -65,14 +66,16 @@ def _oc_definition(oid_ext, name, must=None, may=None): must = MUST if not may: may = MAY - + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return new_oc + + class TopologyMaster1Master2(object): def __init__(self, master1, master2): master1.open() self.master1 = master1 - + master2.open() self.master2 = master2 @@ -84,7 +87,7 @@ def topology(request): The replicated topology is MASTER1 <-> Master2. At the beginning, It may exists a master2 instance and/or a master2 instance. It may also exists a backup for the master1 and/or the master2. - + Principle: If master1 instance exists: restart it @@ -108,22 +111,22 @@ def topology(request): global installation2_prefix # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) + master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix - + # Args for the master1 instance args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master1.allocate(args_master) - + # allocate master1 on a given deployement master2 = DirSrv(verbose=False) if installation2_prefix: args_instance[SER_DEPLOYED_DIR] = installation2_prefix - + # Args for the consumer instance args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 @@ -131,40 +134,39 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups backup_master1 = master1.checkBackupFS() backup_master2 = master2.checkBackupFS() - + # Get the status of the instance and restart it if it exists instance_master1 = master1.exists() if instance_master1: master1.stop(timeout=10) master1.start(timeout=10) - + instance_master2 = master2.exists() if instance_master2: master2.stop(timeout=10) master2.start(timeout=10) - + if backup_master1 and backup_master2: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master1: master1.create() # Used to retrieve configuration information (dbdir, confdir...) master1.open() - + if not instance_master2: master2.create() # Used to retrieve configuration information (dbdir, confdir...) master2.open() - + # restore master1 from backup master1.stop(timeout=10) master1.restoreFS(backup_master1) master1.start(timeout=10) - + # restore master2 from backup master2.stop(timeout=10) master2.restoreFS(backup_master2) @@ -175,48 +177,48 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master1: master1.clearBackupFS() if backup_master2: master2.clearBackupFS() - + # Remove all the instances if instance_master1: master1.delete() if instance_master2: master2.delete() - + # Create the instances master1.create() master1.open() master2.create() master2.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], @@ -226,13 +228,14 @@ def topology(request): master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(repl_agreement) - + # Check replication is working fine master1.add_s(Entry((TEST_REPL_DN, { 'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) loop = 0 + ent = None while loop <= 10: try: ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -240,12 +243,14 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master1.stop(timeout=10) master1.backupfile = master1.backupFS() master1.start(timeout=10) - + master2.stop(timeout=10) master2.backupfile = master2.backupFS() master2.start(timeout=10) @@ -253,7 +258,7 @@ def topology(request): # clear the tmp directory master1.clearTmpDir(__file__) - # + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init @@ -267,15 +272,13 @@ def test_ticket47676_init(topology): - Objectclass with MAY 'member' - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation It deletes the anonymous aci - + """ - - + topology.master1.log.info("Add %s that allows 'member' attribute" % OC_NAME) - new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may = MAY) + new_oc = _oc_definition(OC_OID_EXT, OC_NAME, must = MUST, may = MAY) topology.master1.schema.add_schema('objectClasses', new_oc) - - + # entry used to bind with topology.master1.log.info("Add %s" % BIND_DN) topology.master1.add_s(Entry((BIND_DN, { @@ -283,12 +286,12 @@ def test_ticket47676_init(topology): 'sn': BIND_NAME, 'cn': BIND_NAME, 'userpassword': BIND_PW}))) - + # enable acl error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128+8192))] # ACL + REPL + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(128 + 8192))] # ACL + REPL topology.master1.modify_s(DN_CONFIG, mod) topology.master2.modify_s(DN_CONFIG, mod) - + # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) @@ -297,6 +300,7 @@ def test_ticket47676_init(topology): 'sn': name, 'cn': name}))) + def test_ticket47676_skip_oc_at(topology): ''' This test ADD an entry on MASTER1 where 47676 is fixed. Then it checks that entry is replicated @@ -304,11 +308,11 @@ def test_ticket47676_skip_oc_at(topology): If the schema has successfully been pushed, updating Master2 should succeed ''' topology.master1.log.info("\n\n######################### ADD ######################\n") - + # bind as 'cn=Directory manager' topology.master1.log.info("Bind as %s and add the add the entry with specific oc" % DN_DM) topology.master1.simple_bind_s(DN_DM, PASSWORD) - + # Prepare the entry with multivalued members entry = Entry(ENTRY_DN) entry.setValues('objectclass', 'top', 'person', 'OCticket47676') @@ -322,10 +326,10 @@ def test_ticket47676_skip_oc_at(topology): members.append("cn=%s,%s" % (name, SUFFIX)) members.append(BIND_DN) entry.setValues('member', members) - + topology.master1.log.info("Try to add Add %s should be successful" % ENTRY_DN) topology.master1.add_s(entry) - + # # Now check the entry as been replicated # @@ -340,12 +344,12 @@ def test_ticket47676_skip_oc_at(topology): time.sleep(2) loop += 1 assert loop <= 10 - + # Now update the entry on Master2 (as DM because 47676 is possibly not fixed on M2) topology.master1.log.info("Update %s on M2" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'description', 'test_add')] topology.master2.modify_s(ENTRY_DN, mod) - + topology.master1.simple_bind_s(DN_DM, PASSWORD) loop = 0 while loop <= 10: @@ -354,29 +358,30 @@ def test_ticket47676_skip_oc_at(topology): break time.sleep(1) loop += 1 - + assert ent.getValue('description') == 'test_add' + def test_ticket47676_reject_action(topology): - + topology.master1.log.info("\n\n######################### REJECT ACTION ######################\n") - + topology.master1.simple_bind_s(DN_DM, PASSWORD) topology.master2.simple_bind_s(DN_DM, PASSWORD) - + # make master1 to refuse to push the schema if OC_NAME is present in consumer schema - mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME) )] # ACL + REPL + mod = [(ldap.MOD_ADD, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) - + # Restart is required to take into account that policy topology.master1.stop(timeout=10) topology.master1.start(timeout=10) - + # Add a new OC on M1 so that schema CSN will change and M1 will try to push the schema topology.master1.log.info("Add %s on M1" % OC2_NAME) - new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must = MUST, may = MAY) + new_oc = _oc_definition(OC2_OID_EXT, OC2_NAME, must=MUST, may=MAY) topology.master1.schema.add_schema('objectClasses', new_oc) - + # Safety checking that the schema has been updated on M1 topology.master1.log.info("Check %s is in M1" % OC2_NAME) ent = topology.master1.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) @@ -387,12 +392,12 @@ def test_ticket47676_reject_action(topology): found = True break assert found - + # Do an update of M1 so that M1 will try to push the schema topology.master1.log.info("Update %s on M1" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'description', 'test_reject')] topology.master1.modify_s(ENTRY_DN, mod) - + # Check the replication occured and so also M1 attempted to push the schema topology.master1.log.info("Check updated %s on M2" % ENTRY_DN) loop = 0 @@ -404,7 +409,7 @@ def test_ticket47676_reject_action(topology): time.sleep(2) loop += 1 assert loop <= 10 - + # Check that the schema has not been pushed topology.master1.log.info("Check %s is not in M2" % OC2_NAME) ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) @@ -415,22 +420,22 @@ def test_ticket47676_reject_action(topology): found = True break assert not found - + topology.master1.log.info("\n\n######################### NO MORE REJECT ACTION ######################\n") - + # make master1 to do no specific action on OC_NAME - mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME) )] # ACL + REPL + mod = [(ldap.MOD_DELETE, 'schemaUpdateObjectclassReject', '%s' % (OC_NAME))] # ACL + REPL topology.master1.modify_s(REPL_SCHEMA_POLICY_SUPPLIER, mod) - + # Restart is required to take into account that policy topology.master1.stop(timeout=10) topology.master1.start(timeout=10) - + # Do an update of M1 so that M1 will try to push the schema topology.master1.log.info("Update %s on M1" % ENTRY_DN) mod = [(ldap.MOD_REPLACE, 'description', 'test_no_more_reject')] topology.master1.modify_s(ENTRY_DN, mod) - + # Check the replication occured and so also M1 attempted to push the schema topology.master1.log.info("Check updated %s on M2" % ENTRY_DN) loop = 0 @@ -442,7 +447,7 @@ def test_ticket47676_reject_action(topology): time.sleep(2) loop += 1 assert loop <= 10 - + # Check that the schema has been pushed topology.master1.log.info("Check %s is in M2" % OC2_NAME) ent = topology.master2.getEntry(SCHEMA_DN, ldap.SCOPE_BASE, "(objectclass=*)", ["objectclasses"]) @@ -453,15 +458,17 @@ def test_ticket47676_reject_action(topology): found = True break assert found - + + def test_ticket47676_final(topology): - topology.master1.stop(timeout=10) - topology.master2.stop(timeout=10) + topology.master1.delete() + topology.master2.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program @@ -470,17 +477,15 @@ def run_isolated(): global installation2_prefix installation1_prefix = None installation2_prefix = None - + topo = topology(True) topo.master1.log.info("\n\n######################### Ticket 47676 ######################\n") test_ticket47676_init(topo) - + test_ticket47676_skip_oc_at(topo) test_ticket47676_reject_action(topo) - - test_ticket47676_final(topo) - + test_ticket47676_final(topo) if __name__ == '__main__': diff --git a/dirsrvtests/tickets/ticket47714_test.py b/dirsrvtests/tickets/ticket47714_test.py index 6f32224..ef4ebd5 100644 --- a/dirsrvtests/tickets/ticket47714_test.py +++ b/dirsrvtests/tickets/ticket47714_test.py @@ -16,7 +16,7 @@ log = logging.getLogger(__name__) installation_prefix = None -ACCT_POLICY_CONFIG_DN = 'cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY +ACCT_POLICY_CONFIG_DN = 'cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY ACCT_POLICY_DN = 'cn=Account Inactivation Pplicy,%s' % SUFFIX INACTIVITY_LIMIT = '9' SEARCHFILTER = '(objectclass=*)' @@ -25,6 +25,7 @@ TEST_USER = 'ticket47714user' TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, SUFFIX) TEST_USER_PW = '%s' % TEST_USER + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -128,6 +129,7 @@ def topology(request): # Time to return the topology return TopologyStandalone(standalone) + def _header(topology, label): topology.standalone.log.info("\n\n###############################################") topology.standalone.log.info("#######") @@ -135,9 +137,10 @@ def _header(topology, label): topology.standalone.log.info("#######") topology.standalone.log.info("###############################################") + def test_ticket47714_init(topology): """ - 1. Add account policy entry to the DB + 1. Add account policy entry to the DB 2. Add a test user to the DB """ _header(topology, 'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.') @@ -156,6 +159,7 @@ def test_ticket47714_init(topology): 'userPassword': TEST_USER_PW, 'acctPolicySubentry': ACCT_POLICY_DN}))) + def test_ticket47714_run_0(topology): """ Check this change has no inpact to the existing functionality. @@ -224,6 +228,7 @@ def test_ticket47714_run_0(topology): log.info("%s was successfully inactivated." % TEST_USER_DN) pass + def test_ticket47714_run_1(topology): """ Verify a new config attr alwaysRecordLoginAttr @@ -278,14 +283,12 @@ def test_ticket47714_run_1(topology): log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1)) assert lastLoginTime0 < lastLoginTime1 - topology.standalone.log.info("ticket47714 was successfully verified."); + topology.standalone.log.info("ticket47714 was successfully verified.") + def test_ticket47714_final(topology): - log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN) - # Enabled the plugins - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.plugins.disable(name=PLUGIN_ACCT_POLICY) - topology.standalone.stop(timeout=10) + topology.standalone.delete() + def run_isolated(): ''' @@ -300,13 +303,13 @@ def run_isolated(): topo = topology(True) test_ticket47714_init(topo) - + test_ticket47714_run_0(topo) test_ticket47714_run_1(topo) - + test_ticket47714_final(topo) - + if __name__ == '__main__': run_isolated() diff --git a/dirsrvtests/tickets/ticket47721_test.py b/dirsrvtests/tickets/ticket47721_test.py index e27a5a8..3b1aa7b 100644 --- a/dirsrvtests/tickets/ticket47721_test.py +++ b/dirsrvtests/tickets/ticket47721_test.py @@ -57,18 +57,22 @@ ENTRY_OC = "top person %s" % OC_NAME BASE_OID = "1.2.3.4.5.6.7.8.9.10" + def _add_custom_at_definition(name='ATticket47721'): new_at = "( %s-oid NAME '%s' DESC 'test AT ticket 47721' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN ( 'Test 47721' 'user defined' ) )" % (name, name) return new_at + def _chg_std_at_defintion(): new_at = "( 2.16.840.1.113730.3.1.569 NAME 'cosPriority' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.27 X-ORIGIN 'Netscape Directory Server' )" return new_at + def _add_custom_oc_defintion(name='OCticket47721'): new_oc = "( %s-oid NAME '%s' DESC 'An group of related automount objects' SUP top STRUCTURAL MUST ou X-ORIGIN 'draft-howard-rfc2307bis' )" % (name, name) return new_oc + def _chg_std_oc_defintion(): new_oc = "( 5.3.6.1.1.1.2.0 NAME 'trustAccount' DESC 'Sets trust accounts information' SUP top AUXILIARY MUST trustModel MAY ( accessTo $ ou ) X-ORIGIN 'nss_ldap/pam_ldap' )" return new_oc @@ -78,7 +82,7 @@ class TopologyMaster1Master2(object): def __init__(self, master1, master2): master1.open() self.master1 = master1 - + master2.open() self.master2 = master2 @@ -90,7 +94,7 @@ def topology(request): The replicated topology is MASTER1 <-> Master2. At the beginning, It may exists a master2 instance and/or a master2 instance. It may also exists a backup for the master1 and/or the master2. - + Principle: If master1 instance exists: restart it @@ -114,22 +118,22 @@ def topology(request): global installation2_prefix # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) + master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix - + # Args for the master1 instance args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master1.allocate(args_master) - + # allocate master1 on a given deployement master2 = DirSrv(verbose=False) if installation2_prefix: args_instance[SER_DEPLOYED_DIR] = installation2_prefix - + # Args for the consumer instance args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 @@ -137,40 +141,39 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups backup_master1 = master1.checkBackupFS() backup_master2 = master2.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() + instance_master1 = master1.exists() if instance_master1: master1.stop(timeout=10) master1.start(timeout=10) - + instance_master2 = master2.exists() if instance_master2: master2.stop(timeout=10) master2.start(timeout=10) - + if backup_master1 and backup_master2: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master1: master1.create() # Used to retrieve configuration information (dbdir, confdir...) master1.open() - + if not instance_master2: master2.create() # Used to retrieve configuration information (dbdir, confdir...) master2.open() - + # restore master1 from backup master1.stop(timeout=10) master1.restoreFS(backup_master1) master1.start(timeout=10) - + # restore master2 from backup master2.stop(timeout=10) master2.restoreFS(backup_master2) @@ -181,48 +184,48 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master1: master1.clearBackupFS() if backup_master2: master2.clearBackupFS() - + # Remove all the instances if instance_master1: master1.delete() if instance_master2: master2.delete() - + # Create the instances master1.create() master1.open() master2.create() master2.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], @@ -232,13 +235,14 @@ def topology(request): master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(repl_agreement) - + # Check replication is working fine master1.add_s(Entry((TEST_REPL_DN, { 'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) loop = 0 + ent = None while loop <= 10: try: ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -246,12 +250,14 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master1.stop(timeout=10) master1.backupfile = master1.backupFS() master1.start(timeout=10) - + master2.stop(timeout=10) master2.backupfile = master2.backupFS() master2.start(timeout=10) @@ -259,7 +265,7 @@ def topology(request): # clear the tmp directory master1.clearTmpDir(__file__) - # + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init @@ -273,11 +279,9 @@ def test_ticket47721_init(topology): - Objectclass with MAY 'member' - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation It deletes the anonymous aci - + """ - - - + # entry used to bind with topology.master1.log.info("Add %s" % BIND_DN) topology.master1.add_s(Entry((BIND_DN, { @@ -285,12 +289,12 @@ def test_ticket47721_init(topology): 'sn': BIND_NAME, 'cn': BIND_NAME, 'userpassword': BIND_PW}))) - + # enable acl error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # ACL + REPL topology.master1.modify_s(DN_CONFIG, mod) topology.master2.modify_s(DN_CONFIG, mod) - + # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) @@ -298,9 +302,12 @@ def test_ticket47721_init(topology): 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) + + def test_ticket47721_0(topology): dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) loop = 0 + ent = None while loop <= 10: try: ent = topology.master2.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") @@ -308,32 +315,34 @@ def test_ticket47721_0(topology): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - assert loop <= 10 - + if ent is None: + assert False + + def test_ticket47721_1(topology): - topology.master1.log.info("Attach debugger\n\n" ) + #topology.master1.log.info("Attach debugger\n\n") #time.sleep(30) - + new = _add_custom_at_definition() topology.master1.log.info("Add (M2) %s " % new) topology.master2.schema.add_schema('attributetypes', new) - + new = _chg_std_at_defintion() topology.master1.log.info("Chg (M2) %s " % new) topology.master2.schema.add_schema('attributetypes', new) - + new = _add_custom_oc_defintion() topology.master1.log.info("Add (M2) %s " % new) topology.master2.schema.add_schema('objectClasses', new) - + new = _chg_std_oc_defintion() topology.master1.log.info("Chg (M2) %s " % new) topology.master2.schema.add_schema('objectClasses', new) - + mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 1')] dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) topology.master2.modify_s(dn, mod) - + loop = 0 while loop <= 10: try: @@ -344,12 +353,27 @@ def test_ticket47721_1(topology): loop += 1 time.sleep(1) assert loop <= 10 - + + time.sleep(2) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + if schema_csn_master1 != schema_csn_master2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' + % (schema_csn_master1, schema_csn_master2)) + time.sleep(30) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + + assert schema_csn_master1 is not None + assert schema_csn_master1 == schema_csn_master2 + + def test_ticket47721_2(topology): mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 2')] dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) topology.master1.modify_s(dn, mod) - + loop = 0 while loop <= 10: try: @@ -360,12 +384,22 @@ def test_ticket47721_2(topology): loop += 1 time.sleep(1) assert loop <= 10 - + + time.sleep(2) schema_csn_master1 = topology.master1.schema.get_schema_csn() schema_csn_master2 = topology.master2.schema.get_schema_csn() - assert schema_csn_master1 != None + if schema_csn_master1 != schema_csn_master2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' + % (schema_csn_master1, schema_csn_master2)) + time.sleep(30) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + + assert schema_csn_master1 is not None assert schema_csn_master1 == schema_csn_master2 - + + def test_ticket47721_3(topology): ''' Check that the supplier can update its schema from consumer schema @@ -379,15 +413,15 @@ def test_ticket47721_3(topology): new = _add_custom_at_definition('ATtest3') topology.master1.log.info("Update schema (M2) %s " % new) topology.master2.schema.add_schema('attributetypes', new) - + new = _add_custom_oc_defintion('OCtest3') topology.master1.log.info("Update schema (M2) %s " % new) topology.master2.schema.add_schema('objectClasses', new) - + mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 3')] dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) topology.master1.modify_s(dn, mod) - + loop = 0 while loop <= 10: try: @@ -398,13 +432,23 @@ def test_ticket47721_3(topology): loop += 1 time.sleep(1) assert loop <= 10 - + + time.sleep(2) schema_csn_master1 = topology.master1.schema.get_schema_csn() schema_csn_master2 = topology.master2.schema.get_schema_csn() - assert schema_csn_master1 != None + if schema_csn_master1 != schema_csn_master2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are not in sync yet: m1 (%s) vs m2 (%s), wait a little...' + % (schema_csn_master1, schema_csn_master2)) + time.sleep(30) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + + assert schema_csn_master1 is not None # schema csn on M2 is larger that on M1. M1 only took the new definitions assert schema_csn_master1 != schema_csn_master2 - + + def test_ticket47721_4(topology): ''' Here M2->M1 agreement is disabled. @@ -415,16 +459,16 @@ def test_ticket47721_4(topology): new = _add_custom_at_definition('ATtest4') topology.master1.log.info("Update schema (M1) %s " % new) topology.master1.schema.add_schema('attributetypes', new) - + new = _add_custom_oc_defintion('OCtest4') topology.master1.log.info("Update schema (M1) %s " % new) topology.master1.schema.add_schema('objectClasses', new) - + topology.master1.log.info("trigger replication M1->M2: to update the schema") mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 4')] dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) topology.master1.modify_s(dn, mod) - + loop = 0 while loop <= 10: try: @@ -435,12 +479,12 @@ def test_ticket47721_4(topology): loop += 1 time.sleep(1) assert loop <= 10 - + topology.master1.log.info("trigger replication M1->M2: to push the schema") mod = [(ldap.MOD_REPLACE, 'description', 'Hello world 5')] dn = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) topology.master1.modify_s(dn, mod) - + loop = 0 while loop <= 10: try: @@ -451,20 +495,30 @@ def test_ticket47721_4(topology): loop += 1 time.sleep(1) assert loop <= 10 - + + time.sleep(2) schema_csn_master1 = topology.master1.schema.get_schema_csn() schema_csn_master2 = topology.master2.schema.get_schema_csn() - assert schema_csn_master1 != None + if schema_csn_master1 != schema_csn_master2: + # We need to give the server a little more time, then check it again + log.info('Schema CSNs are not in sync yet, wait a little...') + time.sleep(30) + schema_csn_master1 = topology.master1.schema.get_schema_csn() + schema_csn_master2 = topology.master2.schema.get_schema_csn() + + assert schema_csn_master1 is not None assert schema_csn_master1 == schema_csn_master2 - + + def test_ticket47721_final(topology): - topology.master1.stop(timeout=10) - topology.master2.stop(timeout=10) + topology.master1.delete() + topology.master2.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program @@ -473,21 +527,18 @@ def run_isolated(): global installation2_prefix installation1_prefix = None installation2_prefix = None - + topo = topology(True) topo.master1.log.info("\n\n######################### Ticket 47721 ######################\n") test_ticket47721_init(topo) - + test_ticket47721_0(topo) test_ticket47721_1(topo) test_ticket47721_2(topo) test_ticket47721_3(topo) test_ticket47721_4(topo) - sys.exit(0) - - test_ticket47721_final(topo) - + test_ticket47721_final(topo) if __name__ == '__main__': diff --git a/dirsrvtests/tickets/ticket47781_test.py b/dirsrvtests/tickets/ticket47781_test.py index 9b58ef3..94f0893 100644 --- a/dirsrvtests/tickets/ticket47781_test.py +++ b/dirsrvtests/tickets/ticket47781_test.py @@ -120,7 +120,7 @@ def topology(request): def test_ticket47781(topology): """ - Testing for a deadlock after doing an online import of an LDIF with + Testing for a deadlock after doing an online import of an LDIF with replication data. The replication agreement should be invalid. """ @@ -130,7 +130,7 @@ def test_ticket47781(topology): # Setup Replication # log.info('Setting up replication...') - topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, + topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) properties = {RA_NAME: r'meTo_$host:$port', @@ -139,9 +139,9 @@ def test_ticket47781(topology): RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} # The agreement should point to a server that does NOT exist (invalid port) - repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX, - host=topology.standalone.host, - port=5555, + repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX, + host=topology.standalone.host, + port=5555, properties=properties) # @@ -196,13 +196,13 @@ def test_ticket47781(topology): except ValueError: os.remove("/tmp/export.ldif") assert False - + # # Search for tombstones - we should not hang/timeout # log.info('Search for tombstone entries(should find one and not hang)...') - topology.standalone.set_option(ldap.OPT_NETWORK_TIMEOUT, 5); - topology.standalone.set_option(ldap.OPT_TIMEOUT, 5); + topology.standalone.set_option(ldap.OPT_NETWORK_TIMEOUT, 5) + topology.standalone.set_option(ldap.OPT_TIMEOUT, 5) try: entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone') if not entries: @@ -217,7 +217,7 @@ def test_ticket47781(topology): def test_ticket47781_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -233,6 +233,8 @@ def run_isolated(): topo = topology(True) test_ticket47781(topo) + test_ticket47781_final(topo) + if __name__ == '__main__': run_isolated() diff --git a/dirsrvtests/tickets/ticket47787_test.py b/dirsrvtests/tickets/ticket47787_test.py index e9fa876..528b474 100644 --- a/dirsrvtests/tickets/ticket47787_test.py +++ b/dirsrvtests/tickets/ticket47787_test.py @@ -56,11 +56,12 @@ MAX_ACCOUNTS = 20 CONFIG_MODDN_ACI_ATTR = "nsslapd-moddn-aci" + class TopologyMaster1Master2(object): def __init__(self, master1, master2): master1.open() self.master1 = master1 - + master2.open() self.master2 = master2 @@ -72,7 +73,7 @@ def topology(request): The replicated topology is MASTER1 <-> Master2. At the beginning, It may exists a master2 instance and/or a master2 instance. It may also exists a backup for the master1 and/or the master2. - + Principle: If master1 instance exists: restart it @@ -96,22 +97,22 @@ def topology(request): global installation2_prefix # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) + master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix - + # Args for the master1 instance args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master1.allocate(args_master) - + # allocate master1 on a given deployement master2 = DirSrv(verbose=False) if installation2_prefix: args_instance[SER_DEPLOYED_DIR] = installation2_prefix - + # Args for the consumer instance args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 @@ -119,40 +120,39 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups backup_master1 = master1.checkBackupFS() backup_master2 = master2.checkBackupFS() - + # Get the status of the instance and restart it if it exists instance_master1 = master1.exists() if instance_master1: master1.stop(timeout=10) master1.start(timeout=10) - + instance_master2 = master2.exists() if instance_master2: master2.stop(timeout=10) master2.start(timeout=10) - + if backup_master1 and backup_master2: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master1: master1.create() # Used to retrieve configuration information (dbdir, confdir...) master1.open() - + if not instance_master2: master2.create() # Used to retrieve configuration information (dbdir, confdir...) master2.open() - + # restore master1 from backup master1.stop(timeout=10) master1.restoreFS(backup_master1) master1.start(timeout=10) - + # restore master2 from backup master2.stop(timeout=10) master2.restoreFS(backup_master2) @@ -163,48 +163,48 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master1: master1.clearBackupFS() if backup_master2: master2.clearBackupFS() - + # Remove all the instances if instance_master1: master1.delete() if instance_master2: master2.delete() - + # Create the instances master1.create() master1.open() master2.create() master2.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], @@ -214,13 +214,14 @@ def topology(request): master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(repl_agreement) - + # Check replication is working fine master1.add_s(Entry((TEST_REPL_DN, { 'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) loop = 0 + ent = None while loop <= 10: try: ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -228,12 +229,14 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master1.stop(timeout=10) master1.backupfile = master1.backupFS() master1.start(timeout=10) - + master2.stop(timeout=10) master2.backupfile = master2.backupFS() master2.start(timeout=10) @@ -241,7 +244,7 @@ def topology(request): # clear the tmp directory master1.clearTmpDir(__file__) - # + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init @@ -249,27 +252,29 @@ def topology(request): return TopologyMaster1Master2(master1, master2) - def _bind_manager(server): server.log.info("Bind as %s " % DN_DM) server.simple_bind_s(DN_DM, PASSWORD) + def _bind_normal(server): server.log.info("Bind as %s " % BIND_DN) server.simple_bind_s(BIND_DN, BIND_PW) - + + def _header(topology, label): topology.master1.log.info("\n\n###############################################") topology.master1.log.info("#######") topology.master1.log.info("####### %s" % label) topology.master1.log.info("#######") topology.master1.log.info("###############################################") - + + def _status_entry_both_server(topology, name=None, desc=None, debug=True): if not name: return topology.master1.log.info("\n\n######################### Tombstone on M1 ######################\n") - attr = 'description' + attr = 'description' found = False attempt = 0 while not found and attempt < 10: @@ -280,42 +285,42 @@ def _status_entry_both_server(topology, name=None, desc=None, debug=True): time.sleep(1) attempt = attempt + 1 assert ent_m1 - + topology.master1.log.info("\n\n######################### Tombstone on M2 ######################\n") ent_m2 = _find_tombstone(topology.master2, SUFFIX, 'sn', name) assert ent_m2 - + topology.master1.log.info("\n\n######################### Description ######################\n%s\n" % desc) topology.master1.log.info("M1 only\n") for attr in ent_m1.getAttrs(): - + if not debug: assert attr in ent_m2.getAttrs() - + if not attr in ent_m2.getAttrs(): topology.master1.log.info(" %s" % attr) for val in ent_m1.getValues(attr): topology.master1.log.info(" %s" % val) - + topology.master1.log.info("M2 only\n") for attr in ent_m2.getAttrs(): - + if not debug: assert attr in ent_m1.getAttrs() - + if not attr in ent_m1.getAttrs(): topology.master1.log.info(" %s" % attr) for val in ent_m2.getValues(attr): topology.master1.log.info(" %s" % val) - + topology.master1.log.info("M1 differs M2\n") - + if not debug: assert ent_m1.dn == ent_m2.dn - + if ent_m1.dn != ent_m2.dn: topology.master1.log.info(" M1[dn] = %s\n M2[dn] = %s" % (ent_m1.dn, ent_m2.dn)) - + for attr1 in ent_m1.getAttrs(): if attr1 in ent_m2.getAttrs(): for val1 in ent_m1.getValues(attr1): @@ -324,14 +329,13 @@ def _status_entry_both_server(topology, name=None, desc=None, debug=True): if val1 == val2: found = True break - + if not debug: assert found - + if not found: topology.master1.log.info(" M1[%s] = %s" % (attr1, val1)) - - + for attr2 in ent_m2.getAttrs(): if attr2 in ent_m1.getAttrs(): for val2 in ent_m2.getValues(attr2): @@ -340,43 +344,46 @@ def _status_entry_both_server(topology, name=None, desc=None, debug=True): if val2 == val1: found = True break - + if not debug: assert found - + if not found: topology.master1.log.info(" M2[%s] = %s" % (attr2, val2)) - + + def _pause_RAs(topology): topology.master1.log.info("\n\n######################### Pause RA M1<->M2 ######################\n") ents = topology.master1.agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology.master1.agreement.pause(ents[0].dn) - + ents = topology.master2.agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology.master2.agreement.pause(ents[0].dn) - + + def _resume_RAs(topology): topology.master1.log.info("\n\n######################### resume RA M1<->M2 ######################\n") ents = topology.master1.agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology.master1.agreement.resume(ents[0].dn) - + ents = topology.master2.agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology.master2.agreement.resume(ents[0].dn) - + + def _find_tombstone(instance, base, attr, value): # # we can not use a filter with a (&(objeclass=nsTombstone)(sn=name)) because # tombstone are not index in 'sn' so 'sn=name' will return NULL # and even if tombstone are indexed for objectclass the '&' will set # the candidate list to NULL - # + # filt = '(objectclass=%s)' % REPLICA_OC_TOMBSTONE ents = instance.search_s(base, ldap.SCOPE_SUBTREE, filt) - found = False + #found = False for ent in ents: if ent.hasAttr(attr): for val in ent.getValues(attr): @@ -384,43 +391,44 @@ def _find_tombstone(instance, base, attr, value): instance.log.debug("tombstone found: %r" % ent) return ent return None - + def _delete_entry(instance, entry_dn, name): instance.log.info("\n\n######################### DELETE %s (M1) ######################\n" % name) - + # delete the entry instance.delete_s(entry_dn) - assert _find_tombstone(instance, SUFFIX, 'sn', name) != None - + assert _find_tombstone(instance, SUFFIX, 'sn', name) is not None def _mod_entry(instance, entry_dn, attr, value): instance.log.info("\n\n######################### MOD %s (M2) ######################\n" % entry_dn) - mod = [(ldap.MOD_REPLACE, attr, value)] + mod = [(ldap.MOD_REPLACE, attr, value)] instance.modify_s(entry_dn, mod) - + + def _modrdn_entry(instance=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): - assert instance != None - assert entry_dn != None - + assert instance is not None + assert entry_dn is not None + if not new_rdn: pattern = 'cn=(.*),(.*)' - rdnre = re.compile(pattern) - match = rdnre.match(entry_dn) + rdnre = re.compile(pattern) + match = rdnre.match(entry_dn) old_value = match.group(1) new_rdn_val = "%s_modrdn" % old_value new_rdn = "cn=%s" % new_rdn_val - - + instance.log.info("\n\n######################### MODRDN %s (M2) ######################\n" % new_rdn) if new_superior: instance.rename_s(entry_dn, new_rdn, newsuperior=new_superior, delold=del_old) else: instance.rename_s(entry_dn, new_rdn, delold=del_old) - + + def _check_entry_exists(instance, entry_dn): loop = 0 + ent = None while loop <= 10: try: ent = instance.getEntry(entry_dn, ldap.SCOPE_BASE, "(objectclass=*)") @@ -428,8 +436,10 @@ def _check_entry_exists(instance, entry_dn): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - assert loop <= 10 - + if ent is None: + assert False + + def _check_mod_received(instance, base, filt, attr, value): instance.log.info("\n\n######################### Check MOD replicated on %s ######################\n" % instance.serverid) loop = 0 @@ -440,43 +450,44 @@ def _check_mod_received(instance, base, filt, attr, value): time.sleep(1) loop += 1 assert loop <= 10 - + + def _check_replication(topology, entry_dn): # prepare the filter to retrieve the entry filt = entry_dn.split(',')[0] - + topology.master1.log.info("\n######################### Check replicat M1->M2 ######################\n") loop = 0 while loop <= 10: attr = 'description' value = 'test_value_%d' % loop - mod = [(ldap.MOD_REPLACE, attr, value)] + mod = [(ldap.MOD_REPLACE, attr, value)] topology.master1.modify_s(entry_dn, mod) _check_mod_received(topology.master2, SUFFIX, filt, attr, value) loop += 1 - + topology.master1.log.info("\n######################### Check replicat M2->M1 ######################\n") loop = 0 while loop <= 10: attr = 'description' value = 'test_value_%d' % loop - mod = [(ldap.MOD_REPLACE, attr, value)] + mod = [(ldap.MOD_REPLACE, attr, value)] topology.master2.modify_s(entry_dn, mod) _check_mod_received(topology.master1, SUFFIX, filt, attr, value) loop += 1 - - + + def test_ticket47787_init(topology): """ Creates - a staging DIT - a production DIT - add accounts in staging DIT - + """ - + topology.master1.log.info("\n\n######################### INITIALIZATION ######################\n") - + # entry used to bind with topology.master1.log.info("Add %s" % BIND_DN) topology.master1.add_s(Entry((BIND_DN, { @@ -484,31 +495,26 @@ def test_ticket47787_init(topology): 'sn': BIND_CN, 'cn': BIND_CN, 'userpassword': BIND_PW}))) - + # DIT for staging topology.master1.log.info("Add %s" % STAGING_DN) topology.master1.add_s(Entry((STAGING_DN, { 'objectclass': "top organizationalRole".split(), 'cn': STAGING_CN, 'description': "staging DIT"}))) - + # DIT for production topology.master1.log.info("Add %s" % PRODUCTION_DN) topology.master1.add_s(Entry((PRODUCTION_DN, { 'objectclass': "top organizationalRole".split(), 'cn': PRODUCTION_CN, 'description': "production DIT"}))) - - + # enable replication error logging mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '8192')] topology.master1.modify_s(DN_CONFIG, mod) topology.master2.modify_s(DN_CONFIG, mod) - - - - # add dummy entries in the staging DIT for cpt in range(MAX_ACCOUNTS): name = "%s%d" % (NEW_ACCOUNT, cpt) @@ -517,64 +523,63 @@ def test_ticket47787_init(topology): 'sn': name, 'cn': name}))) - + def test_ticket47787_2(topology): ''' Disable replication so that updates are not replicated - Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior). + Delete an entry on M1. Modrdn it on M2 (chg rdn + delold=0 + same superior). update a test entry on M2 Reenable the RA. checks that entry was deleted on M2 (with the modified RDN) checks that test entry was replicated on M1 (replication M2->M1 not broken by modrdn) ''' - + _header(topology, "test_ticket47787_2") _bind_manager(topology.master1) _bind_manager(topology.master2) - + #entry to test the replication is still working - name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS -1) - test_rdn = "cn=%s" % (name) + name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 1) + test_rdn = "cn=%s" % (name) testentry_dn = "%s,%s" % (test_rdn, STAGING_DN) - + name = "%s%d" % (NEW_ACCOUNT, MAX_ACCOUNTS - 2) - test2_rdn = "cn=%s" % (name) + test2_rdn = "cn=%s" % (name) testentry2_dn = "%s,%s" % (test2_rdn, STAGING_DN) - + # value of updates to test the replication both ways - attr = 'description' + attr = 'description' value = 'test_ticket47787_2' - + # entry for the modrdn name = "%s%d" % (NEW_ACCOUNT, 1) - rdn = "cn=%s" % (name) + rdn = "cn=%s" % (name) entry_dn = "%s,%s" % (rdn, STAGING_DN) - + # created on M1, wait the entry exists on M2 _check_entry_exists(topology.master2, entry_dn) _check_entry_exists(topology.master2, testentry_dn) - + _pause_RAs(topology) - + # Delete 'entry_dn' on M1. # dummy update is only have a first CSN before the DEL # else the DEL will be in min_csn RUV and make diagnostic a bit more complex _mod_entry(topology.master1, testentry2_dn, attr, 'dummy') _delete_entry(topology.master1, entry_dn, name) _mod_entry(topology.master1, testentry2_dn, attr, value) - + time.sleep(1) # important to have MOD.csn != DEL.csn - + # MOD 'entry_dn' on M1. # dummy update is only have a first CSN before the MOD entry_dn # else the DEL will be in min_csn RUV and make diagnostic a bit more complex _mod_entry(topology.master2, testentry_dn, attr, 'dummy') _mod_entry(topology.master2, entry_dn, attr, value) _mod_entry(topology.master2, testentry_dn, attr, value) - - + _resume_RAs(topology) - + topology.master1.log.info("\n\n######################### Check DEL replicated on M2 ######################\n") loop = 0 while loop <= 10: @@ -592,9 +597,9 @@ def test_ticket47787_2(topology): #_check_mod_received(topology.master2, SUFFIX, "(%s)" % (test2_rdn), attr, value) # #_check_replication(topology, testentry_dn) - + _status_entry_both_server(topology, name=name, desc="DEL M1 - MOD M2", debug=DEBUG_FLAG) - + topology.master1.log.info("\n\n######################### Check MOD replicated on M1 ######################\n") loop = 0 while loop <= 10: @@ -605,18 +610,19 @@ def test_ticket47787_2(topology): loop += 1 assert loop <= 10 assert ent - assert ent.hasAttr(attr) + assert ent.hasAttr(attr) assert ent.getValue(attr) == value def test_ticket47787_final(topology): - topology.master1.stop(timeout=10) - topology.master2.stop(timeout=10) + topology.master1.delete() + topology.master2.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program @@ -625,16 +631,14 @@ def run_isolated(): global installation2_prefix installation1_prefix = None installation2_prefix = None - + topo = topology(True) topo.master1.log.info("\n\n######################### Ticket 47787 ######################\n") test_ticket47787_init(topo) - + test_ticket47787_2(topo) test_ticket47787_final(topo) - - if __name__ == '__main__': diff --git a/dirsrvtests/tickets/ticket47808_test.py b/dirsrvtests/tickets/ticket47808_test.py index 5a16a1f..eecfd4a 100644 --- a/dirsrvtests/tickets/ticket47808_test.py +++ b/dirsrvtests/tickets/ticket47808_test.py @@ -19,7 +19,7 @@ installation_prefix = None ATTRIBUTE_UNIQUENESS_PLUGIN = 'cn=attribute uniqueness,cn=plugins,cn=config' ENTRY_NAME = 'test_entry' - + class TopologyStandalone(object): def __init__(self, standalone): @@ -33,7 +33,7 @@ def topology(request): This fixture is used to standalone topology for the 'module'. At the beginning, It may exists a standalone instance. It may also exists a backup for the standalone instance. - + Principle: If standalone instance exists: restart it @@ -52,60 +52,60 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + standalone = DirSrv(verbose=True) - + # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) - + # Get the status of the backups backup_standalone = standalone.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() + instance_standalone = standalone.exists() if instance_standalone: # assuming the instance is already stopped, just wait 5 sec max standalone.stop(timeout=5) standalone.start(timeout=10) - + if backup_standalone: - # The backup exist, assuming it is correct + # The backup exist, assuming it is correct # we just re-init the instance with it if not instance_standalone: standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # restore standalone instance from backup standalone.stop(timeout=10) standalone.restoreFS(backup_standalone) standalone.start(timeout=10) - + else: # We should be here only in two conditions # - This is the first time a test involve standalone instance # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove the backup. So even if we have a specific backup file # (e.g backup_standalone) we clear backup that an instance may have created if backup_standalone: standalone.clearBackupFS() - + # Remove the instance if instance_standalone: standalone.delete() - + # Create the instance standalone.create() - + # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # Time to create the backups standalone.stop(timeout=10) standalone.backupfile = standalone.backupFS() @@ -114,7 +114,7 @@ def topology(request): # clear the tmp directory standalone.clearTmpDir(__file__) - # + # # Here we have standalone instance up and running # Either coming from a backup recovery # or from a fresh (re)init @@ -130,19 +130,19 @@ def test_ticket47808_run(topology): If the second add does not crash the server and the following search found none, the bug is fixed. """ - + # bind as directory manager topology.standalone.log.info("Bind as %s" % DN_DM) topology.standalone.simple_bind_s(DN_DM, PASSWORD) - + topology.standalone.log.info("\n\n######################### SETUP ATTR UNIQ PLUGIN ######################\n") # enable attribute uniqueness plugin mod = [(ldap.MOD_REPLACE, 'nsslapd-pluginEnabled', 'on'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg0', 'sn'), (ldap.MOD_REPLACE, 'nsslapd-pluginarg1', SUFFIX)] topology.standalone.modify_s(ATTRIBUTE_UNIQUENESS_PLUGIN, mod) - + topology.standalone.log.info("\n\n######################### ADD USER 1 ######################\n") - + # Prepare entry 1 entry_name = '%s 1' % (ENTRY_NAME) entry_dn_1 = 'cn=%s, %s' % (entry_name, SUFFIX) @@ -156,10 +156,10 @@ def test_ticket47808_run(topology): topology.standalone.log.info("\n\n######################### Restart Server ######################\n") topology.standalone.stop(timeout=10) topology.standalone.start(timeout=10) - + topology.standalone.log.info("\n\n######################### ADD USER 2 ######################\n") - - # Prepare entry 2 having the same sn, which crashes the server + + # Prepare entry 2 having the same sn, which crashes the server entry_name = '%s 2' % (ENTRY_NAME) entry_dn_2 = 'cn=%s, %s' % (entry_name, SUFFIX) entry_2 = Entry(entry_dn_2) @@ -168,10 +168,10 @@ def test_ticket47808_run(topology): entry_2.setValues('cn', entry_name) topology.standalone.log.info("Try to add Add %s: %r" % (entry_2, entry_2)) try: - topology.standalone.add_s(entry_2) + topology.standalone.add_s(entry_2) except: - topology.standalone.log.warn("Adding %s failed" % entry_dn_2) - pass + topology.standalone.log.warn("Adding %s failed" % entry_dn_2) + pass topology.standalone.log.info("\n\n######################### IS SERVER UP? ######################\n") ents = topology.standalone.search_s(entry_dn_1, ldap.SCOPE_BASE, '(objectclass=*)') @@ -184,29 +184,31 @@ def test_ticket47808_run(topology): ents = topology.standalone.search_s(entry_dn_2, ldap.SCOPE_BASE, '(objectclass=*)') except ldap.NO_SUCH_OBJECT: topology.standalone.log.info("Found none") - + topology.standalone.log.info("\n\n######################### DELETE USER 1 ######################\n") - + topology.standalone.log.info("Try to delete %s " % entry_dn_1) topology.standalone.delete_s(entry_dn_1) - + + def test_ticket47808_final(topology): - topology.standalone.stop(timeout=10) - + topology.standalone.delete() + + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix installation_prefix = None - + topo = topology(True) test_ticket47808_run(topo) - + test_ticket47808_final(topo) diff --git a/dirsrvtests/tickets/ticket47815_test.py b/dirsrvtests/tickets/ticket47815_test.py index 9d09240..eaaf616 100644 --- a/dirsrvtests/tickets/ticket47815_test.py +++ b/dirsrvtests/tickets/ticket47815_test.py @@ -209,7 +209,7 @@ def test_ticket47815(topology): def test_ticket47815_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -225,6 +225,7 @@ def run_isolated(): topo = topology(True) test_ticket47815(topo) + test_ticket47815_final(topo) if __name__ == '__main__': run_isolated() diff --git a/dirsrvtests/tickets/ticket47819_test.py b/dirsrvtests/tickets/ticket47819_test.py index 7b6f2d5..ba5ebc4 100644 --- a/dirsrvtests/tickets/ticket47819_test.py +++ b/dirsrvtests/tickets/ticket47819_test.py @@ -327,7 +327,7 @@ def test_ticket47819(topology): def test_ticket47819_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -343,6 +343,7 @@ def run_isolated(): topo = topology(True) test_ticket47819(topo) + test_ticket47819_final(topo) if __name__ == '__main__': run_isolated() \ No newline at end of file diff --git a/dirsrvtests/tickets/ticket47823_test.py b/dirsrvtests/tickets/ticket47823_test.py index f4d3695..e237ccd 100644 --- a/dirsrvtests/tickets/ticket47823_test.py +++ b/dirsrvtests/tickets/ticket47823_test.py @@ -18,20 +18,20 @@ log = logging.getLogger(__name__) installation_prefix = None -PROVISIONING_CN = "provisioning" -PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX) +PROVISIONING_CN = "provisioning" +PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX) -ACTIVE_CN = "accounts" -STAGE_CN = "staged users" -DELETE_CN = "deleted users" -ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX) -STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) -DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) +ACTIVE_CN = "accounts" +STAGE_CN = "staged users" +DELETE_CN = "deleted users" +ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX) +STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN) +DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN) -STAGE_USER_CN = "stage guy" -STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) +STAGE_USER_CN = "stage guy" +STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN) -ACTIVE_USER_CN = "active guy" +ACTIVE_USER_CN = "active guy" ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN) ACTIVE_USER_1_CN = "test_1" @@ -44,9 +44,10 @@ STAGE_USER_1_DN = "cn=%s,%s" % (STAGE_USER_1_CN, STAGE_DN) STAGE_USER_2_CN = ACTIVE_USER_2_CN STAGE_USER_2_DN = "cn=%s,%s" % (STAGE_USER_2_CN, STAGE_DN) -ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-pluginarg2', +ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-pluginarg2', 'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees'] + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -150,6 +151,7 @@ def topology(request): # Time to return the topology return TopologyStandalone(standalone) + def _header(topology, label): topology.standalone.log.info("\n\n###############################################") topology.standalone.log.info("#######") @@ -157,11 +159,12 @@ def _header(topology, label): topology.standalone.log.info("#######") topology.standalone.log.info("###############################################") + def _uniqueness_config_entry(topology, name=None): if not name: return None - - ent = topology.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE, + + ent = topology.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc', 'nsslapd-pluginType', 'nsslapd-pluginEnabled', 'nsslapd-plugin-depends-on-type', @@ -170,11 +173,12 @@ def _uniqueness_config_entry(topology, name=None): ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN) return ent + def _build_config(topology, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old', across_subtrees=False): assert topology assert attr_name assert subtree_1 - + if type_config == 'old': # enable the 'cn' uniqueness on Active config = _uniqueness_config_entry(topology, attr_name) @@ -193,6 +197,7 @@ def _build_config(topology, attr_name='cn', subtree_1=None, subtree_2=None, type config.setValue('uniqueness-across-all-subtrees', 'on') return config + def _active_container_invalid_cfg_add(topology): ''' Check uniqueness is not enforced with ADD (invalid config) @@ -206,28 +211,29 @@ def _active_container_invalid_cfg_add(topology): 'objectclass': "top person".split(), 'sn': ACTIVE_USER_2_CN, 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) - + topology.standalone.delete_s(ACTIVE_USER_1_DN) topology.standalone.delete_s(ACTIVE_USER_2_DN) + def _active_container_add(topology, type_config='old'): ''' Check uniqueness in a single container (Active) Add an entry with a given 'cn', then check we can not add an entry with the same 'cn' value - + ''' config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) - + # remove the 'cn' uniqueness entry try: topology.standalone.delete_s(config.dn) - + except ldap.NO_SUCH_OBJECT: pass topology.standalone.restart(timeout=120) - + topology.standalone.log.info('Uniqueness not enforced: create the entries') - + topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_1_CN, @@ -237,14 +243,14 @@ def _active_container_add(topology, type_config='old'): 'objectclass': "top person".split(), 'sn': ACTIVE_USER_2_CN, 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]}))) - + topology.standalone.delete_s(ACTIVE_USER_1_DN) topology.standalone.delete_s(ACTIVE_USER_2_DN) - - + + topology.standalone.log.info('Uniqueness enforced: checks second entry is rejected') - - # enable the 'cn' uniqueness on Active + + # enable the 'cn' uniqueness on Active topology.standalone.add_s(config) topology.standalone.restart(timeout=120) topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { @@ -260,26 +266,25 @@ def _active_container_add(topology, type_config='old'): except ldap.CONSTRAINT_VIOLATION: # yes it is expected pass - + # cleanup the stuff now topology.standalone.delete_s(config.dn) topology.standalone.delete_s(ACTIVE_USER_1_DN) - def _active_container_mod(topology, type_config='old'): ''' Check uniqueness in a single container (active) Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value - + ''' - + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) - + # enable the 'cn' uniqueness on Active topology.standalone.add_s(config) topology.standalone.restart(timeout=120) - + topology.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected') topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), @@ -296,33 +301,34 @@ def _active_container_mod(topology, type_config='old'): except ldap.CONSTRAINT_VIOLATION: # yes it is expected pass - + topology.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected') try: topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_REPLACE, 'cn', [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN])]) except ldap.CONSTRAINT_VIOLATION: # yes it is expected pass - + # cleanup the stuff now topology.standalone.delete_s(config.dn) topology.standalone.delete_s(ACTIVE_USER_1_DN) topology.standalone.delete_s(ACTIVE_USER_2_DN) - + + def _active_container_modrdn(topology, type_config='old'): ''' Check uniqueness in a single container Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value - + ''' config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False) - + # enable the 'cn' uniqueness on Active topology.standalone.add_s(config) topology.standalone.restart(timeout=120) - + topology.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected') - + topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { 'objectclass': "top person".split(), 'sn': ACTIVE_USER_1_CN, @@ -338,23 +344,23 @@ def _active_container_modrdn(topology, type_config='old'): except ldap.CONSTRAINT_VIOLATION: # yes it is expected pass - - + # cleanup the stuff now topology.standalone.delete_s(config.dn) topology.standalone.delete_s(ACTIVE_USER_1_DN) topology.standalone.delete_s(ACTIVE_USER_2_DN) + def _active_stage_containers_add(topology, type_config='old', across_subtrees=False): ''' Check uniqueness in several containers Add an entry on a container with a given 'cn' with across_subtrees=False check we CAN add an entry with the same 'cn' value on the other container with across_subtrees=True check we CAN NOT add an entry with the same 'cn' value on the other container - + ''' config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) - + topology.standalone.add_s(config) topology.standalone.restart(timeout=120) topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, { @@ -370,20 +376,21 @@ def _active_stage_containers_add(topology, type_config='old', across_subtrees=Fa 'cn': ACTIVE_USER_1_CN}))) except ldap.CONSTRAINT_VIOLATION: assert across_subtrees - + # cleanup the stuff now topology.standalone.delete_s(config.dn) topology.standalone.delete_s(ACTIVE_USER_1_DN) topology.standalone.delete_s(STAGE_USER_1_DN) - + + def _active_stage_containers_mod(topology, type_config='old', across_subtrees=False): ''' Check uniqueness in a several containers Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container - + ''' config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) - + topology.standalone.add_s(config) topology.standalone.restart(timeout=120) # adding an entry on active with a different 'cn' @@ -397,14 +404,14 @@ def _active_stage_containers_mod(topology, type_config='old', across_subtrees=Fa 'objectclass': "top person".split(), 'sn': STAGE_USER_1_CN, 'cn': STAGE_USER_1_CN}))) - + try: - + # modify add same value topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ACTIVE_USER_2_CN])]) except ldap.CONSTRAINT_VIOLATION: assert across_subtrees - + topology.standalone.delete_s(STAGE_USER_1_DN) topology.standalone.add_s(Entry((STAGE_USER_1_DN, { 'objectclass': "top person".split(), @@ -415,21 +422,22 @@ def _active_stage_containers_mod(topology, type_config='old', across_subtrees=Fa topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_REPLACE, 'cn', [STAGE_USER_2_CN, ACTIVE_USER_1_CN])]) except ldap.CONSTRAINT_VIOLATION: assert across_subtrees - + # cleanup the stuff now topology.standalone.delete_s(config.dn) topology.standalone.delete_s(ACTIVE_USER_1_DN) topology.standalone.delete_s(STAGE_USER_1_DN) - + + def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False): ''' Check uniqueness in a several containers Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container - + ''' - + config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False) - + # enable the 'cn' uniqueness on Active and Stage topology.standalone.add_s(config) topology.standalone.restart(timeout=120) @@ -443,11 +451,10 @@ def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees 'sn': STAGE_USER_1_CN, 'cn': STAGE_USER_1_CN}))) - try: - + topology.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0) - + # check stage entry has 'cn=dummy' stage_ent = topology.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*", ['cn']) assert stage_ent.hasAttr('cn') @@ -456,7 +463,7 @@ def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees if value == 'dummy': found = True assert found - + # check active entry has 'cn=dummy' active_ent = topology.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn']) assert active_ent.hasAttr('cn') @@ -465,18 +472,17 @@ def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees if value == 'dummy': found = True assert found - + topology.standalone.delete_s("cn=dummy,%s" % (STAGE_DN)) except ldap.CONSTRAINT_VIOLATION: assert across_subtrees topology.standalone.delete_s(STAGE_USER_1_DN) - - - + # cleanup the stuff now topology.standalone.delete_s(config.dn) topology.standalone.delete_s(ACTIVE_USER_1_DN) - + + def _config_file(topology, action='save'): dse_ldif = topology.standalone.confdir + '/dse.ldif' sav_file = topology.standalone.confdir + '/dse.ldif.ticket47823' @@ -484,17 +490,18 @@ def _config_file(topology, action='save'): shutil.copy(dse_ldif, sav_file) else: shutil.copy(sav_file, dse_ldif) - + + def _pattern_errorlog(file, log_pattern): try: _pattern_errorlog.last_pos += 1 except AttributeError: _pattern_errorlog.last_pos = 0 - + found = None log.debug("_pattern_errorlog: start at offset %d" % _pattern_errorlog.last_pos) file.seek(_pattern_errorlog.last_pos) - + # Use a while true iteration because 'for line in file: hit a # python bug that break file.tell() while True: @@ -503,14 +510,15 @@ def _pattern_errorlog(file, log_pattern): found = log_pattern.search(line) if ((line == '') or (found)): break - + log.debug("_pattern_errorlog: end at offset %d" % file.tell()) _pattern_errorlog.last_pos = file.tell() return found - + + def test_ticket47823_init(topology): """ - + """ # Enabled the plugins @@ -518,13 +526,13 @@ def test_ticket47823_init(topology): topology.standalone.restart(timeout=120) topology.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(), - 'cn': PROVISIONING_CN}))) - topology.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(), - 'cn': ACTIVE_CN}))) - topology.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(), - 'cn': STAGE_CN}))) - topology.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(), - 'cn': DELETE_CN}))) + 'cn': PROVISIONING_CN}))) + topology.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(), + 'cn': ACTIVE_CN}))) + topology.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(), + 'cn': STAGE_CN}))) + topology.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(), + 'cn': DELETE_CN}))) topology.standalone.errorlog_file = open(topology.standalone.errlog, "r") topology.standalone.stop(timeout=120) @@ -532,150 +540,155 @@ def test_ticket47823_init(topology): topology.standalone.start(timeout=120) time.sleep(3) - + def test_ticket47823_one_container_add(topology): ''' Check uniqueness in a single container Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value - + ''' _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") _active_container_add(topology, type_config='old') - + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") - + _active_container_add(topology, type_config='new') - + + def test_ticket47823_one_container_mod(topology): ''' Check uniqueness in a single container Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value - + ''' _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD)") - + _active_container_mod(topology, type_config='old') - + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD)") - + _active_container_mod(topology, type_config='new') - - - + + def test_ticket47823_one_container_modrdn(topology): ''' Check uniqueness in a single container Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value - + ''' _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") - + _active_container_modrdn(topology, type_config='old') - + _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)") - + _active_container_modrdn(topology, type_config='new') - + + def test_ticket47823_multi_containers_add(topology): ''' Check uniqueness in a several containers Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value - + ''' _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ") _active_stage_containers_add(topology, type_config='old', across_subtrees=False) - + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ") - + _active_stage_containers_add(topology, type_config='new', across_subtrees=False) - + + def test_ticket47823_multi_containers_mod(topology): ''' Check uniqueness in a several containers Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container - + ''' _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container") - - + topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') _active_stage_containers_mod(topology, type_config='old', across_subtrees=False) - + _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container") - - + topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers') _active_stage_containers_mod(topology, type_config='new', across_subtrees=False) - + + def test_ticket47823_multi_containers_modrdn(topology): ''' Check uniqueness in a several containers Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container - + ''' _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers") - + topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False) - + topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers') _active_stage_containers_modrdn(topology, type_config='old') + def test_ticket47823_across_multi_containers_add(topology): ''' Check uniqueness across several containers, uniquely with the new configuration Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value - + ''' _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers") _active_stage_containers_add(topology, type_config='old', across_subtrees=True) - + + def test_ticket47823_across_multi_containers_mod(topology): ''' Check uniqueness across several containers, uniquely with the new configuration Add and entry with a given 'cn', then check we can not modifiy an entry with the same 'cn' value - + ''' _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers") _active_stage_containers_mod(topology, type_config='old', across_subtrees=True) + def test_ticket47823_across_multi_containers_modrdn(topology): ''' Check uniqueness across several containers, uniquely with the new configuration Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value - + ''' _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers") _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=True) - + + def test_ticket47823_invalid_config_1(topology): ''' Check that an invalid config is detected. No uniqueness enforced Using old config: arg0 is missing ''' _header(topology, "Invalid config (old): arg0 is missing") - + _config_file(topology, action='save') - + # create an invalid config without arg0 config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) - + del config.data['nsslapd-pluginarg0'] # replace 'cn' uniqueness entry try: topology.standalone.delete_s(config.dn) - + except ldap.NO_SUCH_OBJECT: pass topology.standalone.add_s(config) - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + # Check the server did not restart try: topology.standalone.restart(timeout=5) @@ -686,15 +699,15 @@ def test_ticket47823_invalid_config_1(topology): assert not ent except ldap.SERVER_DOWN: pass - + # Check the expected error message regex = re.compile("Config info: attribute name not defined") - res =_pattern_errorlog(topology.standalone.errorlog_file, regex) + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert - _config_file(topology, action='restore') + _config_file(topology, action='restore') assert res - + # Check we can restart the server _config_file(topology, action='restore') topology.standalone.start(timeout=5) @@ -703,29 +716,30 @@ def test_ticket47823_invalid_config_1(topology): except ldap.NO_SUCH_OBJECT: pass + def test_ticket47823_invalid_config_2(topology): ''' Check that an invalid config is detected. No uniqueness enforced Using old config: arg1 is missing ''' _header(topology, "Invalid config (old): arg1 is missing") - + _config_file(topology, action='save') - + # create an invalid config without arg0 config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) - + del config.data['nsslapd-pluginarg1'] # replace 'cn' uniqueness entry try: topology.standalone.delete_s(config.dn) - + except ldap.NO_SUCH_OBJECT: pass topology.standalone.add_s(config) - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + # Check the server did not restart try: topology.standalone.restart(timeout=5) @@ -736,15 +750,15 @@ def test_ticket47823_invalid_config_2(topology): assert not ent except ldap.SERVER_DOWN: pass - + # Check the expected error message regex = re.compile("Config info: No valid subtree is defined") - res =_pattern_errorlog(topology.standalone.errorlog_file, regex) + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert - _config_file(topology, action='restore') + _config_file(topology, action='restore') assert res - + # Check we can restart the server _config_file(topology, action='restore') topology.standalone.start(timeout=5) @@ -753,30 +767,31 @@ def test_ticket47823_invalid_config_2(topology): except ldap.NO_SUCH_OBJECT: pass + def test_ticket47823_invalid_config_3(topology): ''' Check that an invalid config is detected. No uniqueness enforced Using old config: arg0 is missing ''' _header(topology, "Invalid config (old): arg0 is missing but new config attrname exists") - + _config_file(topology, action='save') - + # create an invalid config without arg0 config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) - + del config.data['nsslapd-pluginarg0'] config.data['uniqueness-attribute-name'] = 'cn' # replace 'cn' uniqueness entry try: topology.standalone.delete_s(config.dn) - + except ldap.NO_SUCH_OBJECT: pass topology.standalone.add_s(config) - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + # Check the server did not restart try: topology.standalone.restart(timeout=5) @@ -787,15 +802,15 @@ def test_ticket47823_invalid_config_3(topology): assert not ent except ldap.SERVER_DOWN: pass - + # Check the expected error message regex = re.compile("Config info: objectclass for subtree entries is not defined") - res =_pattern_errorlog(topology.standalone.errorlog_file, regex) + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert - _config_file(topology, action='restore') + _config_file(topology, action='restore') assert res - + # Check we can restart the server _config_file(topology, action='restore') topology.standalone.start(timeout=5) @@ -803,31 +818,32 @@ def test_ticket47823_invalid_config_3(topology): topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass - + + def test_ticket47823_invalid_config_4(topology): ''' Check that an invalid config is detected. No uniqueness enforced Using old config: arg1 is missing ''' _header(topology, "Invalid config (old): arg1 is missing but new config exist") - + _config_file(topology, action='save') - + # create an invalid config without arg0 config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False) - + del config.data['nsslapd-pluginarg1'] config.data['uniqueness-subtrees'] = ACTIVE_DN # replace 'cn' uniqueness entry try: topology.standalone.delete_s(config.dn) - + except ldap.NO_SUCH_OBJECT: pass topology.standalone.add_s(config) - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + # Check the server did not restart try: topology.standalone.restart(timeout=5) @@ -838,15 +854,15 @@ def test_ticket47823_invalid_config_4(topology): assert not ent except ldap.SERVER_DOWN: pass - + # Check the expected error message regex = re.compile("Config info: No valid subtree is defined") - res =_pattern_errorlog(topology.standalone.errorlog_file, regex) + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert - _config_file(topology, action='restore') + _config_file(topology, action='restore') assert res - + # Check we can restart the server _config_file(topology, action='restore') topology.standalone.start(timeout=5) @@ -854,30 +870,31 @@ def test_ticket47823_invalid_config_4(topology): topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass - + + def test_ticket47823_invalid_config_5(topology): ''' Check that an invalid config is detected. No uniqueness enforced Using new config: uniqueness-attribute-name is missing ''' _header(topology, "Invalid config (new): uniqueness-attribute-name is missing") - + _config_file(topology, action='save') - + # create an invalid config without arg0 config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False) - + del config.data['uniqueness-attribute-name'] # replace 'cn' uniqueness entry try: topology.standalone.delete_s(config.dn) - + except ldap.NO_SUCH_OBJECT: pass topology.standalone.add_s(config) - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + # Check the server did not restart try: topology.standalone.restart(timeout=5) @@ -888,15 +905,15 @@ def test_ticket47823_invalid_config_5(topology): assert not ent except ldap.SERVER_DOWN: pass - + # Check the expected error message regex = re.compile("Config info: attribute name not defined") - res =_pattern_errorlog(topology.standalone.errorlog_file, regex) + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert - _config_file(topology, action='restore') + _config_file(topology, action='restore') assert res - + # Check we can restart the server _config_file(topology, action='restore') topology.standalone.start(timeout=5) @@ -905,29 +922,30 @@ def test_ticket47823_invalid_config_5(topology): except ldap.NO_SUCH_OBJECT: pass + def test_ticket47823_invalid_config_6(topology): ''' Check that an invalid config is detected. No uniqueness enforced Using new config: uniqueness-subtrees is missing ''' _header(topology, "Invalid config (new): uniqueness-subtrees is missing") - + _config_file(topology, action='save') - + # create an invalid config without arg0 config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False) - + del config.data['uniqueness-subtrees'] # replace 'cn' uniqueness entry try: topology.standalone.delete_s(config.dn) - + except ldap.NO_SUCH_OBJECT: pass topology.standalone.add_s(config) - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + # Check the server did not restart try: topology.standalone.restart(timeout=5) @@ -938,15 +956,15 @@ def test_ticket47823_invalid_config_6(topology): assert not ent except ldap.SERVER_DOWN: pass - + # Check the expected error message regex = re.compile("Config info: objectclass for subtree entries is not defined") - res =_pattern_errorlog(topology.standalone.errorlog_file, regex) + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert - _config_file(topology, action='restore') + _config_file(topology, action='restore') assert res - + # Check we can restart the server _config_file(topology, action='restore') topology.standalone.start(timeout=5) @@ -954,29 +972,30 @@ def test_ticket47823_invalid_config_6(topology): topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass - + + def test_ticket47823_invalid_config_7(topology): ''' Check that an invalid config is detected. No uniqueness enforced Using new config: uniqueness-subtrees is missing ''' _header(topology, "Invalid config (new): uniqueness-subtrees are invalid") - + _config_file(topology, action='save') - + # create an invalid config without arg0 config = _build_config(topology, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN", type_config='new', across_subtrees=False) - + # replace 'cn' uniqueness entry try: topology.standalone.delete_s(config.dn) - + except ldap.NO_SUCH_OBJECT: pass topology.standalone.add_s(config) - topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) - + topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) + # Check the server did not restart try: topology.standalone.restart(timeout=5) @@ -987,15 +1006,15 @@ def test_ticket47823_invalid_config_7(topology): assert not ent except ldap.SERVER_DOWN: pass - + # Check the expected error message regex = re.compile("Config info: No valid subtree is defined") - res =_pattern_errorlog(topology.standalone.errorlog_file, regex) + res = _pattern_errorlog(topology.standalone.errorlog_file, regex) if not res: # be sure to restore a valid config before assert - _config_file(topology, action='restore') + _config_file(topology, action='restore') assert res - + # Check we can restart the server _config_file(topology, action='restore') topology.standalone.start(timeout=5) @@ -1003,8 +1022,10 @@ def test_ticket47823_invalid_config_7(topology): topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS) except ldap.NO_SUCH_OBJECT: pass + + def test_ticket47823_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -1020,12 +1041,12 @@ def run_isolated(): topo = topology(True) test_ticket47823_init(topo) - + # run old/new config style that makes uniqueness checking on one subtree test_ticket47823_one_container_add(topo) test_ticket47823_one_container_mod(topo) test_ticket47823_one_container_modrdn(topo) - + # run old config style that makes uniqueness checking on each defined subtrees test_ticket47823_multi_containers_add(topo) test_ticket47823_multi_containers_mod(topo) @@ -1033,7 +1054,7 @@ def run_isolated(): test_ticket47823_across_multi_containers_add(topo) test_ticket47823_across_multi_containers_mod(topo) test_ticket47823_across_multi_containers_modrdn(topo) - + test_ticket47823_invalid_config_1(topo) test_ticket47823_invalid_config_2(topo) test_ticket47823_invalid_config_3(topo) @@ -1041,9 +1062,9 @@ def run_isolated(): test_ticket47823_invalid_config_5(topo) test_ticket47823_invalid_config_6(topo) test_ticket47823_invalid_config_7(topo) - + test_ticket47823_final(topo) - + if __name__ == '__main__': run_isolated() diff --git a/dirsrvtests/tickets/ticket47824_test.py b/dirsrvtests/tickets/ticket47824_test.py index 98fca97..76e8471 100644 --- a/dirsrvtests/tickets/ticket47824_test.py +++ b/dirsrvtests/tickets/ticket47824_test.py @@ -38,6 +38,7 @@ _SUBLDIF2 = 'ticket47824_2.ldif' SEARCHFILTER = '(objectclass=*)' + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -50,7 +51,7 @@ def topology(request): This fixture is used to standalone topology for the 'module'. At the beginning, It may exists a standalone instance. It may also exists a backup for the standalone instance. - + Principle: If standalone instance exists: restart it @@ -69,60 +70,60 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + standalone = DirSrv(verbose=False) - + # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) - + # Get the status of the backups backup_standalone = standalone.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() + instance_standalone = standalone.exists() if instance_standalone: # assuming the instance is already stopped, just wait 5 sec max standalone.stop(timeout=5) standalone.start(timeout=10) if backup_standalone: - # The backup exist, assuming it is correct + # The backup exist, assuming it is correct # we just re-init the instance with it if not instance_standalone: standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # restore standalone instance from backup standalone.stop(timeout=10) standalone.restoreFS(backup_standalone) standalone.start(timeout=10) - + else: # We should be here only in two conditions # - This is the first time a test involve standalone instance # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove the backup. So even if we have a specific backup file # (e.g backup_standalone) we clear backup that an instance may have created if backup_standalone: standalone.clearBackupFS() - + # Remove the instance if instance_standalone: standalone.delete() - + # Create the instance standalone.create() - + # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # Time to create the backups standalone.stop(timeout=10) standalone.backupfile = standalone.backupFS() @@ -131,7 +132,7 @@ def topology(request): # clear the tmp directory standalone.clearTmpDir(__file__) - # + # # Here we have standalone instance up and running # Either coming from a backup recovery # or from a fresh (re)init @@ -151,7 +152,7 @@ def test_ticket47824_run(topology): # bind as directory manager topology.standalone.log.info("Bind as %s" % DN_DM) topology.standalone.simple_bind_s(DN_DM, PASSWORD) - + topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket47824.org ######################\n") topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE}) @@ -176,7 +177,7 @@ def test_ticket47824_run(topology): # get tmp dir mytmp = topology.standalone.getDir(__file__, TMP_DIR) - if mytmp == None: + if mytmp is None: mytmp = "/tmp" MYLDIF = '%s%s' % (mytmp, _MYLDIF) @@ -244,7 +245,7 @@ def test_ticket47824_run(topology): req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='') known_ldap_resp_ctrls = { - SimplePagedResultsControl.controlType:SimplePagedResultsControl, + SimplePagedResultsControl.controlType: SimplePagedResultsControl, } topology.standalone.log.info("Calling search_ext...") @@ -259,18 +260,18 @@ def test_ticket47824_run(topology): rtype, rdata, rmsgid, serverctrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls) topology.standalone.log.info("%d results" % len(rdata)) pageddncnt += len(rdata) - + topology.standalone.log.info("Results:") for dn, attrs in rdata: topology.standalone.log.info("dn: %s" % dn) - + pctrls = [ c for c in serverctrls if c.controlType == SimplePagedResultsControl.controlType ] if not pctrls: topology.standalone.log.info('Warning: Server ignores RFC 2696 control.') break - + if pctrls[0].cookie: req_ctrl.cookie = pctrls[0].cookie topology.standalone.log.info("cookie: %s" % req_ctrl.cookie) @@ -286,25 +287,27 @@ def test_ticket47824_run(topology): topology.standalone.log.info("Paged result search returned %d entries.\n", pageddncnt) assert dnnum == len(entries) - topology.standalone.log.info("ticket47824 was successfully verified."); + topology.standalone.log.info("ticket47824 was successfully verified.") + def test_ticket47824_final(topology): - topology.standalone.stop(timeout=10) - + topology.standalone.delete() + + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix installation_prefix = None - + topo = topology(True) test_ticket47824_run(topo) - + test_ticket47824_final(topo) diff --git a/dirsrvtests/tickets/ticket47829_test.py b/dirsrvtests/tickets/ticket47829_test.py index ab8be77..ffbab03 100644 --- a/dirsrvtests/tickets/ticket47829_test.py +++ b/dirsrvtests/tickets/ticket47829_test.py @@ -23,9 +23,6 @@ SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) PROVISIONING_CN = "provisioning" PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) - - - ACTIVE_CN = "accounts" STAGE_CN = "staged users" DELETE_CN = "deleted users" @@ -54,8 +51,6 @@ OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) INDIRECT_ACTIVE_GROUP_CN = "indirect active group" INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) - - log = logging.getLogger(__name__) installation_prefix = None @@ -67,14 +62,13 @@ class TopologyStandalone(object): self.standalone = standalone - @pytest.fixture(scope="module") def topology(request): ''' This fixture is used to standalone topology for the 'module'. At the beginning, It may exists a standalone instance. It may also exists a backup for the standalone instance. - + Principle: If standalone instance exists: restart it @@ -93,60 +87,60 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + standalone = DirSrv(verbose=False) - + # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) - + # Get the status of the backups backup_standalone = standalone.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() + instance_standalone = standalone.exists() if instance_standalone: # assuming the instance is already stopped, just wait 5 sec max standalone.stop(timeout=5) standalone.start(timeout=10) - + if backup_standalone: - # The backup exist, assuming it is correct + # The backup exist, assuming it is correct # we just re-init the instance with it if not instance_standalone: standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # restore standalone instance from backup standalone.stop(timeout=10) standalone.restoreFS(backup_standalone) standalone.start(timeout=10) - + else: # We should be here only in two conditions # - This is the first time a test involve standalone instance # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove the backup. So even if we have a specific backup file # (e.g backup_standalone) we clear backup that an instance may have created if backup_standalone: standalone.clearBackupFS() - + # Remove the instance if instance_standalone: standalone.delete() - + # Create the instance standalone.create() - + # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # Time to create the backups standalone.stop(timeout=10) standalone.backupfile = standalone.backupFS() @@ -155,20 +149,22 @@ def topology(request): # clear the tmp directory standalone.clearTmpDir(__file__) - # + # # Here we have standalone instance up and running # Either coming from a backup recovery # or from a fresh (re)init # Time to return the topology return TopologyStandalone(standalone) + def _header(topology, label): topology.standalone.log.info("\n\n###############################################") topology.standalone.log.info("#######") topology.standalone.log.info("####### %s" % label) topology.standalone.log.info("#######") topology.standalone.log.info("###############################################") - + + def _add_user(topology, type='active'): if type == 'active': topology.standalone.add_s(Entry((ACTIVE_USER_DN, { @@ -186,6 +182,7 @@ def _add_user(topology, type='active'): 'sn': OUT_USER_CN, 'cn': OUT_USER_CN}))) + def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True): assert(topology) assert(user_dn) @@ -199,12 +196,13 @@ def _find_memberof(topology, user_dn=None, group_dn=None, find_result=True): if val == group_dn: found = True break - + if find_result: - assert(found) + assert(found) else: assert(not found) + def _find_member(topology, user_dn=None, group_dn=None, find_result=True): assert(topology) assert(user_dn) @@ -218,18 +216,18 @@ def _find_member(topology, user_dn=None, group_dn=None, find_result=True): if val == user_dn: found = True break - + if find_result: - assert(found) + assert(found) else: assert(not found) - + + def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_superior=None): - assert topology != None - assert entry_dn != None - assert new_rdn != None - - + assert topology is not None + assert entry_dn is not None + assert new_rdn is not None + topology.standalone.log.info("\n\n######################### MODRDN %s ######################\n" % new_rdn) try: if new_superior: @@ -245,19 +243,20 @@ def _modrdn_entry(topology=None, entry_dn=None, new_rdn=None, del_old=0, new_sup else: base = ','.join(entry_dn.split(",")[1:]) dn = "%s, %s" % (new_rdn, base) - filter = entry_dn.split(',')[0] - + myfilter = entry_dn.split(',')[0] + while attempt < 10: try: - ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, filter) + ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) break except ldap.NO_SUCH_OBJECT: topology.standalone.log.info("Accept failure due to 47833: unable to find (base) a modrdn entry") attempt += 1 time.sleep(1) if attempt == 10: - ent = topology.standalone.getEntry(base, ldap.SCOPE_SUBTREE, filter) - ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, filter) + ent = topology.standalone.getEntry(base, ldap.SCOPE_SUBTREE, myfilter) + ent = topology.standalone.getEntry(dn, ldap.SCOPE_BASE, myfilter) + def _check_memberof(topology=None, action=None, user_dn=None, group_dn=None, find_result=None): assert(topology) @@ -271,12 +270,10 @@ def _check_memberof(topology=None, action=None, user_dn=None, group_dn=None, fin txt = 'replace' topology.standalone.log.info('\n%s entry %s' % (txt, user_dn)) topology.standalone.log.info('to group %s' % group_dn) - + topology.standalone.modify_s(group_dn, [(action, 'member', user_dn)]) time.sleep(1) _find_memberof(topology, user_dn=user_dn, group_dn=group_dn, find_result=find_result) - - def test_ticket47829_init(topology): @@ -298,7 +295,7 @@ def test_ticket47829_init(topology): topology.standalone.add_s(Entry((DELETE_DN, { 'objectclass': "top nscontainer".split(), 'cn': DELETE_CN}))) - + # add groups topology.standalone.add_s(Entry((ACTIVE_GROUP_DN, { 'objectclass': "top groupOfNames inetuser".split(), @@ -312,374 +309,372 @@ def test_ticket47829_init(topology): topology.standalone.add_s(Entry((INDIRECT_ACTIVE_GROUP_DN, { 'objectclass': "top groupOfNames".split(), 'cn': INDIRECT_ACTIVE_GROUP_CN}))) - + # add users _add_user(topology, 'active') _add_user(topology, 'stage') _add_user(topology, 'out') - - # enable memberof of with scope IN except provisioning topology.standalone.plugins.enable(name=PLUGIN_MEMBER_OF) dn = "cn=%s,%s" % (PLUGIN_MEMBER_OF, DN_PLUGIN) topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScope', SCOPE_IN_DN)]) topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'memberOfEntryScopeExcludeSubtree', PROVISIONING_DN)]) - + # enable RI with scope IN except provisioning topology.standalone.plugins.enable(name=PLUGIN_REFER_INTEGRITY) dn = "cn=%s,%s" % (PLUGIN_REFER_INTEGRITY, DN_PLUGIN) topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginentryscope', SCOPE_IN_DN)]) topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-plugincontainerscope', SCOPE_IN_DN)]) topology.standalone.modify_s(dn, [(ldap.MOD_REPLACE, 'nsslapd-pluginExcludeEntryScope', PROVISIONING_DN)]) - - + topology.standalone.restart(timeout=10) + def test_ticket47829_mod_active_user_1(topology): _header(topology, 'MOD: add an active user to an active group') - + # add active user to active group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member (topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + # remove active user to active group _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - + + def test_ticket47829_mod_active_user_2(topology): _header(topology, 'MOD: add an Active user to a Stage group') - + # add active user to stage group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + # remove active user to stage group _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - + + def test_ticket47829_mod_active_user_3(topology): _header(topology, 'MOD: add an Active user to a out of scope group') - + # add active user to out of scope group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - _find_member (topology, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + # remove active user to out of scope group _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - + def test_ticket47829_mod_stage_user_1(topology): _header(topology, 'MOD: add an Stage user to a Active group') - + # add stage user to active group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + # remove stage user to active group _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - + + def test_ticket47829_mod_stage_user_2(topology): _header(topology, 'MOD: add an Stage user to a Stage group') - + # add stage user to stage group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + # remove stage user to stage group _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - + def test_ticket47829_mod_stage_user_3(topology): _header(topology, 'MOD: add an Stage user to a out of scope group') - + # add stage user to an out of scope group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - _find_member (topology, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + # remove stage user to out of scope group _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=STAGE_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - def test_ticket47829_mod_out_user_1(topology): _header(topology, 'MOD: add an out of scope user to an active group') - + # add out of scope user to active group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + # remove out of scope user to active group _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - + def test_ticket47829_mod_out_user_2(topology): _header(topology, 'MOD: add an out of scope user to a Stage group') - + # add out of scope user to stage group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=True) + # remove out of scope user to stage group _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=STAGE_GROUP_DN, find_result=False) - + def test_ticket47829_mod_out_user_3(topology): _header(topology, 'MOD: add an out of scope user to an out of scope group') - + # add out of scope user to stage group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) - _find_member (topology, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=True) + # remove out of scope user to stage group _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=OUT_USER_DN, group_dn=OUT_GROUP_DN, find_result=False) def test_ticket47829_mod_active_user_modrdn_active_user_1(topology): _header(topology, 'add an Active user to a Active group. Then move Active user to Active') - + # add Active user to active group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member (topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + # move the Active entry to active, expect 'member' and 'memberof' _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=x%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn="cn=x%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn="cn=x%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_memberof(topology, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + # move the Active entry to active, expect 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn="cn=x%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - + _modrdn_entry(topology, entry_dn="cn=x%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + # remove active user to active group _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - - - - def test_ticket47829_mod_active_user_modrdn_stage_user_1(topology): _header(topology, 'add an Active user to a Active group. Then move Active user to Stage') - + # add Active user to active group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member (topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + # move the Active entry to stage, expect no 'member' and 'memberof' _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + # move the Active entry to Stage, expect 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn="cn=%s,%s" %(ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - + _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + def test_ticket47829_mod_active_user_modrdn_out_user_1(topology): _header(topology, 'add an Active user to a Active group. Then move Active user to out of scope') - + # add Active user to active group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member (topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + # move the Active entry to out of scope, expect no 'member' and no 'memberof' _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=OUT_GROUP_DN) - _find_memberof(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + # move the Active entry to out of scope, expect no 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn="cn=%s,%s" %(ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, OUT_GROUP_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + - def test_ticket47829_mod_modrdn_1(topology): _header(topology, 'add an Stage user to a Active group. Then move Stage user to Active') - + # add Stage user to active group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + # move the Stage entry to active, expect 'member' and 'memberof' _modrdn_entry(topology, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" %(STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member(topology, user_dn="cn=%s,%s" %(STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_memberof(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=True) + # move the Active entry to Stage, expect no 'member' and no 'memberof' - _modrdn_entry(topology, entry_dn="cn=%s,%s" %(STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) - _find_memberof(topology, user_dn="cn=%s,%s" %(STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" %(STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - - - - + _modrdn_entry(topology, entry_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) + _find_memberof(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + + def test_ticket47829_mod_stage_user_modrdn_active_user_1(topology): _header(topology, 'add an Stage user to a Active group. Then move Stage user to Active') - + stage_user_dn = STAGE_USER_DN stage_user_rdn = "cn=%s" % STAGE_USER_CN active_user_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) - + # add Stage user to active group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + # move the Stage entry to Actve, expect 'member' and 'memberof' - _modrdn_entry (topology, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN) + _modrdn_entry(topology, entry_dn=stage_user_dn, new_rdn=stage_user_rdn, new_superior=ACTIVE_DN) _find_memberof(topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member (topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=active_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + # move the Active entry to Stage, expect no 'member' and no 'memberof' - _modrdn_entry (topology, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN) - _find_memberof(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=stage_user_dn , group_dn=ACTIVE_GROUP_DN, find_result=False) - - + _modrdn_entry(topology, entry_dn=active_user_dn, new_rdn=stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + def test_ticket47829_mod_stage_user_modrdn_stage_user_1(topology): _header(topology, 'add an Stage user to a Active group. Then move Stage user to Stage') - + _header(topology, 'Return because it requires a fix for 47833') return - - old_stage_user_dn = STAGE_USER_DN + + old_stage_user_dn = STAGE_USER_DN old_stage_user_rdn = "cn=%s" % STAGE_USER_CN new_stage_user_rdn = "cn=x%s" % STAGE_USER_CN new_stage_user_dn = "%s,%s" % (new_stage_user_rdn, STAGE_DN) - + # add Stage user to active group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_member(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + # move the Stage entry to Stage, expect no 'member' and 'memberof' - _modrdn_entry (topology, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) + _modrdn_entry(topology, entry_dn=old_stage_user_dn, new_rdn=new_stage_user_rdn, new_superior=STAGE_DN) _find_memberof(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - + _find_member(topology, user_dn=new_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + # move the Stage entry to Stage, expect no 'member' and no 'memberof' - _modrdn_entry (topology, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN) - _find_memberof(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=old_stage_user_dn , group_dn=ACTIVE_GROUP_DN, find_result=False) - + _modrdn_entry(topology, entry_dn=new_stage_user_dn, new_rdn=old_stage_user_rdn, new_superior=STAGE_DN) + _find_memberof(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=old_stage_user_dn, group_dn=ACTIVE_GROUP_DN, find_result=False) + + def test_ticket47829_indirect_active_group_1(topology): _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1') - + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) - + # add an active user to G1. Checks that user is memberof G1 _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) - + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + # remove G1 from G0 topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)]) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + # remove active user from G1 _check_memberof(topology, action=ldap.MOD_DELETE, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - + + def test_ticket47829_indirect_active_group_2(topology): _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to stage') - + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) - + # add an active user to G1. Checks that user is memberof G1 _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) - + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + # remove G1 from G0 topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)]) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + # move active user to stage _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=STAGE_DN) - + # stage user is no long member of active group and indirect active group - _find_memberof(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + # active group and indirect active group do no longer have stage user as member - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + # return back the entry to active. It remains not member - _modrdn_entry(topology, entry_dn="cn=%s,%s" %(ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - + _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, STAGE_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + + def test_ticket47829_indirect_active_group_3(topology): _header(topology, 'add an Active group (G1) to an active group (G0). Then add active user to G1. Then move active user to out of the scope') - + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) - + # add an active user to G1. Checks that user is memberof G1 _check_memberof(topology, action=ldap.MOD_ADD, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) - + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) + # remove G1 from G0 topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_DELETE, 'member', ACTIVE_GROUP_DN)]) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn=ACTIVE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + # move active user to out of the scope _modrdn_entry(topology, entry_dn=ACTIVE_USER_DN, new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=SCOPE_OUT_DN) - + # stage user is no long member of active group and indirect active group - _find_memberof(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_memberof(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_memberof(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + # active group and indirect active group do no longer have stage user as member - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) - + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + # return back the entry to active. It remains not member - _modrdn_entry(topology, entry_dn="cn=%s,%s" %(ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member(topology, user_dn="cn=%s,%s" %(ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _modrdn_entry(topology, entry_dn="cn=%s,%s" % (ACTIVE_USER_CN, SCOPE_OUT_DN), new_rdn="cn=%s" % ACTIVE_USER_CN, new_superior=ACTIVE_DN) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn="cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN), group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + def test_ticket47829_indirect_active_group_4(topology): _header(topology, 'add an Active group (G1) to an active group (G0). Then add stage user to G1. Then move user to active. Then move it back') - + topology.standalone.modify_s(INDIRECT_ACTIVE_GROUP_DN, [(ldap.MOD_ADD, 'member', ACTIVE_GROUP_DN)]) - + # add stage user to active group _check_memberof(topology, action=ldap.MOD_ADD, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member (topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - + # move stage user to active _modrdn_entry(topology, entry_dn=STAGE_USER_DN, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=ACTIVE_DN) - renamed_stage_dn = "cn=%s,%s" %(STAGE_USER_CN, ACTIVE_DN) - _find_member (topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - _find_member (topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + renamed_stage_dn = "cn=%s,%s" % (STAGE_USER_CN, ACTIVE_DN) + _find_member(topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) + _find_member(topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology, user_dn=renamed_stage_dn, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=True) _find_memberof(topology, user_dn=renamed_stage_dn, group_dn=ACTIVE_GROUP_DN, find_result=True) - + # move back active to stage - _modrdn_entry(topology, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) - _find_member (topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - _find_member (topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) + _modrdn_entry(topology, entry_dn=renamed_stage_dn, new_rdn="cn=%s" % STAGE_USER_CN, new_superior=STAGE_DN) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) + _find_member(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=INDIRECT_ACTIVE_GROUP_DN, find_result=False) _find_memberof(topology, user_dn=STAGE_USER_DN, group_dn=ACTIVE_GROUP_DN, find_result=False) - + + def test_ticket47829_final(topology): - topology.standalone.stop(timeout=10) - + topology.standalone.delete() def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix - installation_prefix = None - + installation_prefix = None + topo = topology(True) test_ticket47829_init(topo) @@ -692,21 +687,22 @@ def run_isolated(): test_ticket47829_mod_out_user_1(topo) test_ticket47829_mod_out_user_2(topo) test_ticket47829_mod_out_user_3(topo) - + test_ticket47829_mod_active_user_modrdn_active_user_1(topo) test_ticket47829_mod_active_user_modrdn_stage_user_1(topo) test_ticket47829_mod_active_user_modrdn_out_user_1(topo) - + test_ticket47829_mod_stage_user_modrdn_active_user_1(topo) test_ticket47829_mod_stage_user_modrdn_stage_user_1(topo) - + test_ticket47829_indirect_active_group_1(topo) test_ticket47829_indirect_active_group_2(topo) test_ticket47829_indirect_active_group_3(topo) test_ticket47829_indirect_active_group_4(topo) - + test_ticket47829_final(topo) + if __name__ == '__main__': run_isolated() diff --git a/dirsrvtests/tickets/ticket47838_test.py b/dirsrvtests/tickets/ticket47838_test.py index 50a4b50..4a59e14 100644 --- a/dirsrvtests/tickets/ticket47838_test.py +++ b/dirsrvtests/tickets/ticket47838_test.py @@ -28,11 +28,13 @@ plus_all_dcount = 0 plus_all_ecount_noweak = 0 plus_all_dcount_noweak = 0 + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() self.standalone = standalone + @pytest.fixture(scope="module") def topology(request): ''' @@ -130,6 +132,7 @@ def topology(request): # Time to return the topology return TopologyStandalone(standalone) + def _header(topology, label): topology.standalone.log.info("\n\n###############################################") topology.standalone.log.info("#######") @@ -137,6 +140,7 @@ def _header(topology, label): topology.standalone.log.info("#######") topology.standalone.log.info("###############################################") + def test_ticket47838_init(topology): """ Generate self signed cert and import it to the DS cert db. @@ -217,6 +221,7 @@ def test_ticket47838_init(topology): 'nsSSLToken': 'internal (software)', 'nsSSLActivation': 'on'}))) + def comp_nsSSLEnableCipherCount(topology, ecount): """ Check nsSSLEnabledCipher count with ecount @@ -230,11 +235,12 @@ def comp_nsSSLEnableCipherCount(topology, ecount): topology.standalone.log.info("Results:") for dn, attrs in rdata: topology.standalone.log.info("dn: %s" % dn) - if attrs.has_key('nsSSLEnabledCiphers'): + if 'nsSSLEnabledCiphers' in attrs: enabledciphercnt = len(attrs['nsSSLEnabledCiphers']) topology.standalone.log.info("enabledCipherCount: %d" % enabledciphercnt) assert ecount == enabledciphercnt + def test_ticket47838_run_0(topology): """ Check nsSSL3Ciphers: +all @@ -269,6 +275,7 @@ def test_ticket47838_run_0(topology): comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_1(topology): """ Check nsSSL3Ciphers: +all @@ -310,6 +317,7 @@ def test_ticket47838_run_1(topology): comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_2(topology): """ Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha @@ -341,6 +349,7 @@ def test_ticket47838_run_2(topology): comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_3(topology): """ Check nsSSL3Ciphers: -all @@ -371,6 +380,7 @@ def test_ticket47838_run_3(topology): comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_4(topology): """ Check no nsSSL3Ciphers @@ -406,6 +416,7 @@ def test_ticket47838_run_4(topology): comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_5(topology): """ Check nsSSL3Ciphers: default @@ -441,6 +452,7 @@ def test_ticket47838_run_5(topology): comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_6(topology): """ Check nssSSL3Chiphers: +all,-rsa_rc4_128_md5 @@ -474,6 +486,7 @@ def test_ticket47838_run_6(topology): comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_7(topology): """ Check nssSSL3Chiphers: -all,+rsa_rc4_128_md5 @@ -505,6 +518,7 @@ def test_ticket47838_run_7(topology): comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_8(topology): """ Check nsSSL3Ciphers: default + allowWeakCipher: off @@ -540,6 +554,7 @@ def test_ticket47838_run_8(topology): comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_9(topology): """ Check no nsSSL3Ciphers @@ -576,6 +591,7 @@ def test_ticket47838_run_9(topology): comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_10(topology): """ Check nssSSL3Chiphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5, @@ -592,7 +608,7 @@ def test_ticket47838_run_10(topology): _header(topology, 'Test Case 11 - Check nssSSL3Chiphers: long list using the NSS Cipher Suite name with allowWeakCipher on') topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', + topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', '-TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,+TLS_RSA_EXPORT_WITH_RC4_40_MD5,+TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,+TLS_DHE_RSA_WITH_DES_CBC_SHA,+SSL_RSA_FIPS_WITH_DES_CBC_SHA,+TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,+SSL_RSA_FIPS_WITH_3DES_EDE_CBC_SHA,+TLS_RSA_EXPORT1024_WITH_RC4_56_SHA,+TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA,-SSL_CK_RC4_128_WITH_MD5,-SSL_CK_RC4_128_EXPORT40_WITH_MD5,-SSL_CK_RC2_128_CBC_WITH_MD5,-SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5,-SSL_CK_DES_64_CBC_WITH_MD5,-SSL_CK_DES_192_EDE3_CBC_WITH_MD5')]) log.info("\n######################### Restarting the server ######################\n") @@ -616,10 +632,11 @@ def test_ticket47838_run_10(topology): wcount = int(weak.readline().rstrip()) log.info("Weak ciphers in the default setting: %d" % wcount) - topology.standalone.log.info("ticket47838 was successfully verified."); + topology.standalone.log.info("ticket47838 was successfully verified.") comp_nsSSLEnableCipherCount(topology, ecount) + def test_ticket47838_run_11(topology): """ Check nssSSL3Chiphers: +fortezza @@ -646,6 +663,7 @@ def test_ticket47838_run_11(topology): comp_nsSSLEnableCipherCount(topology, 0) + def test_ticket47928_run_0(topology): """ No SSL version config parameters. @@ -679,6 +697,7 @@ def test_ticket47928_run_0(topology): log.info("Expected message was not found") assert False + def test_ticket47928_run_1(topology): """ No nsSSL3, nsTLS1; sslVersionMin > sslVersionMax @@ -712,6 +731,7 @@ def test_ticket47928_run_1(topology): log.info("Expected message was not found") assert False + def test_ticket47928_run_2(topology): """ nsSSL3: on; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2 @@ -754,6 +774,7 @@ def test_ticket47928_run_2(topology): log.info("Expected message was not found") assert False + def test_ticket47928_run_3(topology): """ nsSSL3: on; nsTLS1: off; sslVersionMin: TLS1.1; sslVersionMax: TLS1.2 @@ -797,6 +818,7 @@ def test_ticket47928_run_3(topology): log.info("Expected message was not found") assert False + def test_ticket47838_run_last(topology): """ Check nssSSL3Chiphers: all <== invalid value @@ -824,14 +846,12 @@ def test_ticket47838_run_last(topology): comp_nsSSLEnableCipherCount(topology, 0) - topology.standalone.log.info("ticket47838, 47880, 47908, 47928 were successfully verified."); + topology.standalone.log.info("ticket47838, 47880, 47908, 47928 were successfully verified.") + def test_ticket47838_final(topology): - topology.standalone.simple_bind_s(DN_DM, PASSWORD) - topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', None)]) - topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_REPLACE, 'nsSSL3Ciphers', 'default'), - (ldap.MOD_REPLACE, 'allowWeakCipher', 'on')]) - topology.standalone.stop(timeout=10) + topology.standalone.delete() + def run_isolated(): ''' @@ -846,7 +866,7 @@ def run_isolated(): topo = topology(True) test_ticket47838_init(topo) - + test_ticket47838_run_0(topo) test_ticket47838_run_1(topo) test_ticket47838_run_2(topo) @@ -865,8 +885,9 @@ def run_isolated(): test_ticket47928_run_3(topo) test_ticket47838_run_last(topo) - + test_ticket47838_final(topo) + if __name__ == '__main__': run_isolated() diff --git a/dirsrvtests/tickets/ticket47869MMR_test.py b/dirsrvtests/tickets/ticket47869MMR_test.py index 47ac5b2..3e21aed 100644 --- a/dirsrvtests/tickets/ticket47869MMR_test.py +++ b/dirsrvtests/tickets/ticket47869MMR_test.py @@ -32,14 +32,16 @@ BIND_NAME = 'bind_entry' BIND_DN = 'cn=%s, %s' % (BIND_NAME, SUFFIX) BIND_PW = 'password' + class TopologyMaster1Master2(object): def __init__(self, master1, master2): master1.open() self.master1 = master1 - + master2.open() self.master2 = master2 + @pytest.fixture(scope="module") def topology(request): ''' @@ -47,7 +49,7 @@ def topology(request): The replicated topology is MASTER1 <-> Master2. At the beginning, It may exists a master2 instance and/or a master2 instance. It may also exists a backup for the master1 and/or the master2. - + Principle: If master1 instance exists: restart it @@ -74,19 +76,19 @@ def topology(request): master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix - + # Args for the master1 instance args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master1.allocate(args_master) - + # allocate master1 on a given deployement master2 = DirSrv(verbose=False) if installation2_prefix: args_instance[SER_DEPLOYED_DIR] = installation2_prefix - + # Args for the consumer instance args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 @@ -94,40 +96,39 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups backup_master1 = master1.checkBackupFS() backup_master2 = master2.checkBackupFS() - + # Get the status of the instance and restart it if it exists instance_master1 = master1.exists() if instance_master1: master1.stop(timeout=10) master1.start(timeout=10) - + instance_master2 = master2.exists() if instance_master2: master2.stop(timeout=10) master2.start(timeout=10) - + if backup_master1 and backup_master2: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master1: master1.create() # Used to retrieve configuration information (dbdir, confdir...) master1.open() - + if not instance_master2: master2.create() # Used to retrieve configuration information (dbdir, confdir...) master2.open() - + # restore master1 from backup master1.stop(timeout=10) master1.restoreFS(backup_master1) master1.start(timeout=10) - + # restore master2 from backup master2.stop(timeout=10) master2.restoreFS(backup_master2) @@ -138,48 +139,48 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master1: master1.clearBackupFS() if backup_master2: master2.clearBackupFS() - + # Remove all the instances if instance_master1: master1.delete() if instance_master2: master2.delete() - + # Create the instances master1.create() master1.open() master2.create() master2.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], @@ -189,12 +190,13 @@ def topology(request): master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) master1.waitForReplInit(repl_agreement) - + # Check replication is working fine master1.add_s(Entry((TEST_REPL_DN, {'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) loop = 0 + ent = None while loop <= 10: try: ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -202,12 +204,14 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master1.stop(timeout=10) master1.backupfile = master1.backupFS() master1.start(timeout=10) - + master2.stop(timeout=10) master2.backupfile = master2.backupFS() master2.start(timeout=10) @@ -215,24 +219,25 @@ def topology(request): # clear the tmp directory master1.clearTmpDir(__file__) - # + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init # Time to return the topology return TopologyMaster1Master2(master1, master2) + def test_ticket47869_init(topology): """ It adds an entry ('bind_entry') and 10 test entries It sets the anonymous aci - + """ # enable acl error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL topology.master1.modify_s(DN_CONFIG, mod) topology.master2.modify_s(DN_CONFIG, mod) - + # entry used to bind with topology.master1.log.info("Add %s" % BIND_DN) topology.master1.add_s(Entry((BIND_DN, { @@ -241,6 +246,7 @@ def test_ticket47869_init(topology): 'cn': BIND_NAME, 'userpassword': BIND_PW}))) loop = 0 + ent = None while loop <= 10: try: ent = topology.master2.getEntry(BIND_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -248,13 +254,15 @@ def test_ticket47869_init(topology): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # keep anonymous ACI for use 'read-search' aci in SEARCH test ACI_ANONYMOUS = "(targetattr!=\"userPassword\")(version 3.0; acl \"Enable anonymous access\"; allow (read, search, compare) userdn=\"ldap:///anyone\";)" mod = [(ldap.MOD_REPLACE, 'aci', ACI_ANONYMOUS)] topology.master1.modify_s(SUFFIX, mod) topology.master2.modify_s(SUFFIX, mod) - + # add entries for cpt in range(MAX_ENTRIES): name = "%s%d" % (ENTRY_NAME, cpt) @@ -264,6 +272,7 @@ def test_ticket47869_init(topology): 'sn': name, 'cn': name}))) loop = 0 + ent = None while loop <= 10: try: ent = topology.master2.getEntry(mydn, ldap.SCOPE_BASE, "(objectclass=*)") @@ -271,6 +280,9 @@ def test_ticket47869_init(topology): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 + if ent is None: + assert False + def test_ticket47869_check(topology): ''' @@ -288,7 +300,7 @@ def test_ticket47869_check(topology): Check nscpEntryWsi is not returned. ''' topology.master1.log.info("\n\n######################### CHECK nscpentrywsi ######################\n") - + topology.master1.log.info("##### Master1: Bind as %s #####" % DN_DM) topology.master1.simple_bind_s(DN_DM, PASSWORD) @@ -297,12 +309,13 @@ def test_ticket47869_check(topology): nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology.master1.result2(msgid) topology.master1.log.info("%d results" % len(rdata)) - + topology.master1.log.info("Results:") for dn, attrs in rdata: topology.master1.log.info("dn: %s" % dn) - if attrs.has_key('nscpentrywsi'): + if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 + topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) topology.master2.log.info("##### Master2: Bind as %s #####" % DN_DM) @@ -313,26 +326,27 @@ def test_ticket47869_check(topology): nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology.master2.result2(msgid) topology.master2.log.info("%d results" % len(rdata)) - + topology.master2.log.info("Results:") for dn, attrs in rdata: topology.master2.log.info("dn: %s" % dn) - if attrs.has_key('nscpentrywsi'): + if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 + topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) # bind as bind_entry topology.master1.log.info("##### Master1: Bind as %s #####" % BIND_DN) topology.master1.simple_bind_s(BIND_DN, BIND_PW) - + topology.master1.log.info("Master1: Calling search_ext...") msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology.master1.result2(msgid) topology.master1.log.info("%d results" % len(rdata)) - + for dn, attrs in rdata: - if attrs.has_key('nscpentrywsi'): + if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 assert nscpentrywsicnt == 0 topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) @@ -346,9 +360,9 @@ def test_ticket47869_check(topology): nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology.master2.result2(msgid) topology.master2.log.info("%d results" % len(rdata)) - + for dn, attrs in rdata: - if attrs.has_key('nscpentrywsi'): + if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 assert nscpentrywsicnt == 0 topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) @@ -356,15 +370,15 @@ def test_ticket47869_check(topology): # bind as anonymous topology.master1.log.info("##### Master1: Bind as anonymous #####") topology.master1.simple_bind_s("", "") - + topology.master1.log.info("Master1: Calling search_ext...") msgid = topology.master1.search_ext(SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=*', ['nscpentrywsi']) nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology.master1.result2(msgid) topology.master1.log.info("%d results" % len(rdata)) - + for dn, attrs in rdata: - if attrs.has_key('nscpentrywsi'): + if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 assert nscpentrywsicnt == 0 topology.master1.log.info("Master1: count of nscpentrywsi: %d" % nscpentrywsicnt) @@ -378,23 +392,25 @@ def test_ticket47869_check(topology): nscpentrywsicnt = 0 rtype, rdata, rmsgid = topology.master2.result2(msgid) topology.master2.log.info("%d results" % len(rdata)) - + for dn, attrs in rdata: - if attrs.has_key('nscpentrywsi'): + if 'nscpentrywsi' in attrs: nscpentrywsicnt += 1 assert nscpentrywsicnt == 0 topology.master2.log.info("Master2: count of nscpentrywsi: %d" % nscpentrywsicnt) - topology.master1.log.info("##### ticket47869 was successfully verified. #####"); + topology.master1.log.info("##### ticket47869 was successfully verified. #####") + def test_ticket47869_final(topology): - topology.master1.stop(timeout=10) - topology.master2.stop(timeout=10) + topology.master1.delete() + topology.master2.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program @@ -403,14 +419,15 @@ def run_isolated(): global installation2_prefix installation1_prefix = None installation2_prefix = None - + topo = topology(True) test_ticket47869_init(topo) - + test_ticket47869_check(topo) - + test_ticket47869_final(topo) + if __name__ == '__main__': run_isolated() diff --git a/dirsrvtests/tickets/ticket47871_test.py b/dirsrvtests/tickets/ticket47871_test.py index 1d1351b..5ddf315 100644 --- a/dirsrvtests/tickets/ticket47871_test.py +++ b/dirsrvtests/tickets/ticket47871_test.py @@ -30,13 +30,14 @@ ENTRY_DN = "cn=test_entry, %s" % SUFFIX OTHER_NAME = 'other_entry' MAX_OTHERS = 10 -ATTRIBUTES = [ 'street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber' ] +ATTRIBUTES = ['street', 'countryName', 'description', 'postalAddress', 'postalCode', 'title', 'l', 'roomNumber'] + class TopologyMasterConsumer(object): def __init__(self, master, consumer): master.open() self.master = master - + consumer.open() self.consumer = consumer @@ -51,7 +52,7 @@ def topology(request): The replicated topology is MASTER -> Consumer. At the beginning, It may exists a master instance and/or a consumer instance. It may also exists a backup for the master and/or the consumer. - + Principle: If master instance exists: restart it @@ -75,17 +76,17 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + master = DirSrv(verbose=False) consumer = DirSrv(verbose=False) - + # Args for the master instance args_instance[SER_HOST] = HOST_MASTER args_instance[SER_PORT] = PORT_MASTER args_instance[SER_SERVERID_PROP] = SERVERID_MASTER args_master = args_instance.copy() master.allocate(args_master) - + # Args for the consumer instance args_instance[SER_HOST] = HOST_CONSUMER args_instance[SER_PORT] = PORT_CONSUMER @@ -93,40 +94,40 @@ def topology(request): args_consumer = args_instance.copy() consumer.allocate(args_consumer) - + # Get the status of the backups backup_master = master.checkBackupFS() backup_consumer = consumer.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_master = master.exists() + instance_master = master.exists() if instance_master: master.stop(timeout=10) master.start(timeout=10) - + instance_consumer = consumer.exists() if instance_consumer: consumer.stop(timeout=10) consumer.start(timeout=10) - + if backup_master and backup_consumer: - # The backups exist, assuming they are correct + # The backups exist, assuming they are correct # we just re-init the instances with them if not instance_master: master.create() # Used to retrieve configuration information (dbdir, confdir...) master.open() - + if not instance_consumer: consumer.create() # Used to retrieve configuration information (dbdir, confdir...) consumer.open() - + # restore master from backup master.stop(timeout=10) master.restoreFS(backup_master) master.start(timeout=10) - + # restore consumer from backup consumer.stop(timeout=10) consumer.restoreFS(backup_consumer) @@ -137,56 +138,57 @@ def topology(request): # so we need to create everything # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove all the backups. So even if we have a specific backup file # (e.g backup_master) we clear all backups that an instance my have created if backup_master: master.clearBackupFS() if backup_consumer: consumer.clearBackupFS() - + # Remove all the instances if instance_master: master.delete() if instance_consumer: consumer.delete() - + # Create the instances master.create() master.open() consumer.create() consumer.open() - - # + + # # Now prepare the Master-Consumer topology # # First Enable replication master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - + # Initialize the supplier->consumer - + properties = {RA_NAME: r'meTo_$host:$port', RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - + if not repl_agreement: log.fatal("Fail to create a replica agreement") sys.exit(1) - + log.debug("%s created" % repl_agreement) master.agreement.init(SUFFIX, HOST_CONSUMER, PORT_CONSUMER) master.waitForReplInit(repl_agreement) - + # Check replication is working fine master.add_s(Entry((TEST_REPL_DN, { 'objectclass': "top person".split(), 'sn': 'test_repl', 'cn': 'test_repl'}))) loop = 0 + ent = None while loop <= 10: try: ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") @@ -194,12 +196,14 @@ def topology(request): except ldap.NO_SUCH_OBJECT: time.sleep(1) loop += 1 - + if ent is None: + assert False + # Time to create the backups master.stop(timeout=10) master.backupfile = master.backupFS() master.start(timeout=10) - + consumer.stop(timeout=10) consumer.backupfile = consumer.backupFS() consumer.start(timeout=10) @@ -207,7 +211,7 @@ def topology(request): # clear the tmp directory master.clearTmpDir(__file__) - # + # # Here we have two instances master and consumer # with replication working. Either coming from a backup recovery # or from a fresh (re)init @@ -216,24 +220,24 @@ def topology(request): def test_ticket47871_init(topology): - """ + """ Initialize the test environment """ topology.master.plugins.enable(name=PLUGIN_RETRO_CHANGELOG) - mod = [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', "10s"), # 10 second triming - (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', "5s")] + mod = [(ldap.MOD_REPLACE, 'nsslapd-changelogmaxage', "10s"), # 10 second triming + (ldap.MOD_REPLACE, 'nsslapd-changelog-trim-interval', "5s")] topology.master.modify_s("cn=%s,%s" % (PLUGIN_RETRO_CHANGELOG, DN_PLUGIN), mod) #topology.master.plugins.enable(name=PLUGIN_MEMBER_OF) #topology.master.plugins.enable(name=PLUGIN_REFER_INTEGRITY) topology.master.stop(timeout=10) topology.master.start(timeout=10) - + topology.master.log.info("test_ticket47871_init topology %r" % (topology)) - # the test case will check if a warning message is logged in the + # the test case will check if a warning message is logged in the # error log of the supplier topology.master.errorlog_file = open(topology.master.errlog, "r") - - + + def test_ticket47871_1(topology): ''' ADD entries and check they are all in the retrocl @@ -245,9 +249,9 @@ def test_ticket47871_1(topology): 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) - - topology.master.log.info("test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS-1)) - + + topology.master.log.info("test_ticket47871_init: %d entries ADDed %s[0..%d]" % (MAX_OTHERS, OTHER_NAME, MAX_OTHERS - 1)) + # Check the number of entries in the retro changelog time.sleep(1) ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") @@ -256,6 +260,7 @@ def test_ticket47871_1(topology): for ent in ents: topology.master.log.info("%s" % ent.dn) + def test_ticket47871_2(topology): ''' Wait until there is just a last entries @@ -263,7 +268,7 @@ def test_ticket47871_2(topology): MAX_TRIES = 10 TRY_NO = 1 while TRY_NO <= MAX_TRIES: - time.sleep(6) # at least 1 trimming occurred + time.sleep(6) # at least 1 trimming occurred ents = topology.master.search_s(RETROCL_SUFFIX, ldap.SCOPE_ONELEVEL, "(objectclass=*)") assert len(ents) <= MAX_OTHERS topology.master.log.info("\nTry no %d it remains %d entries" % (TRY_NO, len(ents))) @@ -275,24 +280,24 @@ def test_ticket47871_2(topology): break assert TRY_NO <= MAX_TRIES assert len(ents) <= 1 - - + def test_ticket47871_final(topology): - topology.master.stop(timeout=10) - topology.consumer.stop(timeout=10) + topology.master.delete() + topology.consumer.delete() + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix - installation_prefix = None - + installation_prefix = None + topo = topology(True) test_ticket47871_init(topo) test_ticket47871_1(topo) @@ -303,4 +308,3 @@ def run_isolated(): if __name__ == '__main__': run_isolated() - diff --git a/dirsrvtests/tickets/ticket47900_test.py b/dirsrvtests/tickets/ticket47900_test.py index 4e6f5d7..2200eba 100644 --- a/dirsrvtests/tickets/ticket47900_test.py +++ b/dirsrvtests/tickets/ticket47900_test.py @@ -23,6 +23,7 @@ ENTRY_NAME = 'Joe Schmo' ENTRY_DN = 'cn=%s,%s' % (ENTRY_NAME, SUFFIX) INVALID_PWDS = ('2_Short', 'No_Number', 'N0Special', '{SSHA}bBy8UdtPZwu8uZna9QOYG3Pr41RpIRVDl8wddw==') + class TopologyStandalone(object): def __init__(self, standalone): standalone.open() @@ -131,10 +132,10 @@ def test_ticket47900(topology): We need to test how passwords are modified in existing entries, and when adding new entries. - + Create the Password Admin entry, but do not set it as an admin yet. Use the entry to verify invalid - passwords are caught. Then activate the password + passwords are caught. Then activate the password admin and make sure it can bypass password policy. """ @@ -224,27 +225,26 @@ def test_ticket47900(topology): "with an invalid password (%s)" % (passwd)) assert False - # # Now activate a password administator, bind as root dn to do the config # update, then rebind as the password admin # topology.standalone.log.info("Activate the Password Administator...") - + # Bind as Root DN try: topology.standalone.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError, e: topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) - assert False - + assert False + # Update config try: topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'passwordAdminDN', ADMIN_DN)]) except ldap.LDAPError, e: topology.standalone.log.error('Failed to add password admin to config: ' + e.message['desc']) assert False - + # Bind as Password Admin try: topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) @@ -274,7 +274,6 @@ def test_ticket47900(topology): topology.standalone.log.error('Failed to delete entry: %s' % (e.message['desc'])) assert False - # # Add the entry for the next round of testing (modify password) # @@ -290,13 +289,13 @@ def test_ticket47900(topology): # Deactivate the password admin and make sure invalid password updates fail # topology.standalone.log.info("Deactivate Password Administator and try invalid password updates...") - + # Bind as root DN try: topology.standalone.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError, e: topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) - assert False + assert False # Update config try: @@ -311,7 +310,7 @@ def test_ticket47900(topology): except ldap.LDAPError, e: topology.standalone.log.error('Failed to bind as the Password Admin: ' + e.message['desc']) assert False - + # # Make invalid password updates that should fail # @@ -335,13 +334,13 @@ def test_ticket47900(topology): # Now activate a password administator # topology.standalone.log.info("Activate Password Administator and try updates again...") - + # Bind as root DN try: topology.standalone.simple_bind_s(DN_DM, PASSWORD) except ldap.LDAPError, e: topology.standalone.log.error('Root DN failed to authenticate: ' + e.message['desc']) - assert False + assert False # Update config try: @@ -349,7 +348,7 @@ def test_ticket47900(topology): except ldap.LDAPError, e: topology.standalone.log.error('Failed to add password admin to config: ' + e.message['desc']) assert False - + # Bind as Password Admin try: topology.standalone.simple_bind_s(ADMIN_DN, ADMIN_PWD) @@ -368,7 +367,7 @@ def test_ticket47900(topology): topology.standalone.log.error('Password update failed unexpectedly: password (%s) result (%s)' % (passwd, e.message['desc'])) assert False - topology.standalone.log.info('Password update succeeded (%s)' % passwd) + topology.standalone.log.info('Password update succeeded (%s)' % passwd) # # Test passed # @@ -376,7 +375,7 @@ def test_ticket47900(topology): def test_ticket47900_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -392,7 +391,7 @@ def run_isolated(): topo = topology(True) test_ticket47900(topo) + test_ticket47900_final(topo) if __name__ == '__main__': run_isolated() - diff --git a/dirsrvtests/tickets/ticket47920_test.py b/dirsrvtests/tickets/ticket47920_test.py index 1e04626..1b6455d 100644 --- a/dirsrvtests/tickets/ticket47920_test.py +++ b/dirsrvtests/tickets/ticket47920_test.py @@ -24,9 +24,6 @@ SCOPE_OUT_DN = 'cn=%s,%s' % (SCOPE_OUT_CN, SUFFIX) PROVISIONING_CN = "provisioning" PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SCOPE_IN_DN) - - - ACTIVE_CN = "accounts" STAGE_CN = "staged users" DELETE_CN = "deleted users" @@ -55,10 +52,8 @@ OUT_GROUP_DN = "cn=%s,%s" % (OUT_GROUP_CN, SCOPE_OUT_DN) INDIRECT_ACTIVE_GROUP_CN = "indirect active group" INDIRECT_ACTIVE_GROUP_DN = "cn=%s,%s" % (INDIRECT_ACTIVE_GROUP_CN, ACTIVE_DN) -INITIAL_DESC="inital description" -FINAL_DESC ="final description" - - +INITIAL_DESC = "inital description" +FINAL_DESC = "final description" log = logging.getLogger(__name__) @@ -71,14 +66,13 @@ class TopologyStandalone(object): self.standalone = standalone - @pytest.fixture(scope="module") def topology(request): ''' This fixture is used to standalone topology for the 'module'. At the beginning, It may exists a standalone instance. It may also exists a backup for the standalone instance. - + Principle: If standalone instance exists: restart it @@ -97,60 +91,60 @@ def topology(request): if installation_prefix: args_instance[SER_DEPLOYED_DIR] = installation_prefix - + standalone = DirSrv(verbose=False) - + # Args for the standalone instance args_instance[SER_HOST] = HOST_STANDALONE args_instance[SER_PORT] = PORT_STANDALONE args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE args_standalone = args_instance.copy() standalone.allocate(args_standalone) - + # Get the status of the backups backup_standalone = standalone.checkBackupFS() - + # Get the status of the instance and restart it if it exists - instance_standalone = standalone.exists() + instance_standalone = standalone.exists() if instance_standalone: # assuming the instance is already stopped, just wait 5 sec max standalone.stop(timeout=5) standalone.start(timeout=10) - + if backup_standalone: - # The backup exist, assuming it is correct + # The backup exist, assuming it is correct # we just re-init the instance with it if not instance_standalone: standalone.create() # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # restore standalone instance from backup standalone.stop(timeout=10) standalone.restoreFS(backup_standalone) standalone.start(timeout=10) - + else: # We should be here only in two conditions # - This is the first time a test involve standalone instance # - Something weird happened (instance/backup destroyed) # so we discard everything and recreate all - + # Remove the backup. So even if we have a specific backup file # (e.g backup_standalone) we clear backup that an instance may have created if backup_standalone: standalone.clearBackupFS() - + # Remove the instance if instance_standalone: standalone.delete() - + # Create the instance standalone.create() - + # Used to retrieve configuration information (dbdir, confdir...) standalone.open() - + # Time to create the backups standalone.stop(timeout=10) standalone.backupfile = standalone.backupFS() @@ -159,20 +153,22 @@ def topology(request): # clear the tmp directory standalone.clearTmpDir(__file__) - # + # # Here we have standalone instance up and running # Either coming from a backup recovery # or from a fresh (re)init # Time to return the topology return TopologyStandalone(standalone) + def _header(topology, label): topology.standalone.log.info("\n\n###############################################") topology.standalone.log.info("#######") topology.standalone.log.info("####### %s" % label) topology.standalone.log.info("#######") topology.standalone.log.info("###############################################") - + + def _add_user(topology, type='active'): if type == 'active': topology.standalone.add_s(Entry((ACTIVE_USER_DN, { @@ -191,6 +187,7 @@ def _add_user(topology, type='active'): 'sn': OUT_USER_CN, 'cn': OUT_USER_CN}))) + def test_ticket47920_init(topology): topology.standalone.add_s(Entry((SCOPE_IN_DN, { 'objectclass': "top nscontainer".split(), @@ -205,47 +202,47 @@ def test_ticket47920_init(topology): def test_ticket47920_mod_readentry_ctrl(topology): _header(topology, 'MOD: with a readentry control') - + topology.standalone.log.info("Check the initial value of the entry") ent = topology.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) assert ent.hasAttr('description') assert ent.getValue('description') == INITIAL_DESC - pr = PostReadControl(criticality=True,attrList=['cn', 'description']) - _,_,_,resp_ctrls = topology.standalone.modify_ext_s(ACTIVE_USER_DN, [(ldap.MOD_REPLACE, 'description', [FINAL_DESC])], serverctrls= [pr]) + pr = PostReadControl(criticality=True, attrList=['cn', 'description']) + _, _, _, resp_ctrls = topology.standalone.modify_ext_s(ACTIVE_USER_DN, [(ldap.MOD_REPLACE, 'description', [FINAL_DESC])], serverctrls=[pr]) assert resp_ctrls[0].dn == ACTIVE_USER_DN - assert resp_ctrls[0].entry.has_key('description') - assert resp_ctrls[0].entry.has_key('cn') + assert 'description' in resp_ctrls[0].entry + assert 'cn' in resp_ctrls[0].entry print resp_ctrls[0].entry['description'] ent = topology.standalone.getEntry(ACTIVE_USER_DN, ldap.SCOPE_BASE, "(objectclass=*)", ['description']) assert ent.hasAttr('description') assert ent.getValue('description') == FINAL_DESC - + + def test_ticket47920_final(topology): - topology.standalone.stop(timeout=10) - + topology.standalone.delete() def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program ''' global installation_prefix - installation_prefix = None - + installation_prefix = None + topo = topology(True) test_ticket47920_init(topo) test_ticket47920_mod_readentry_ctrl(topo) - + test_ticket47920_final(topo) + if __name__ == '__main__': run_isolated() - diff --git a/dirsrvtests/tickets/ticket47937_test.py b/dirsrvtests/tickets/ticket47937_test.py index 09ee714..3554a71 100644 --- a/dirsrvtests/tickets/ticket47937_test.py +++ b/dirsrvtests/tickets/ticket47937_test.py @@ -216,7 +216,7 @@ def test_ticket47937(topology): def test_ticket47937_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -232,6 +232,8 @@ def run_isolated(): topo = topology(True) test_ticket47937(topo) + test_ticket47937_final(topo) + if __name__ == '__main__': - run_isolated() \ No newline at end of file + run_isolated() diff --git a/dirsrvtests/tickets/ticket47950_test.py b/dirsrvtests/tickets/ticket47950_test.py index 976f964..81ce0f7 100644 --- a/dirsrvtests/tickets/ticket47950_test.py +++ b/dirsrvtests/tickets/ticket47950_test.py @@ -252,7 +252,7 @@ def test_ticket47950(topology): def test_ticket47953_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -268,6 +268,8 @@ def run_isolated(): topo = topology(True) test_ticket47950(topo) + test_ticket47953_final(topo) + if __name__ == '__main__': - run_isolated() \ No newline at end of file + run_isolated() diff --git a/dirsrvtests/tickets/ticket47953_test.py b/dirsrvtests/tickets/ticket47953_test.py index 5a1241b..b9d0670 100644 --- a/dirsrvtests/tickets/ticket47953_test.py +++ b/dirsrvtests/tickets/ticket47953_test.py @@ -157,7 +157,7 @@ def test_ticket47953(topology): def test_ticket47953_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -173,6 +173,8 @@ def run_isolated(): topo = topology(True) test_ticket47953(topo) + test_ticket47953_final(topo) + if __name__ == '__main__': - run_isolated() \ No newline at end of file + run_isolated() diff --git a/dirsrvtests/tickets/ticket47970_test.py b/dirsrvtests/tickets/ticket47970_test.py index 49d505a..54895eb 100644 --- a/dirsrvtests/tickets/ticket47970_test.py +++ b/dirsrvtests/tickets/ticket47970_test.py @@ -185,7 +185,7 @@ def test_ticket47970(topology): def test_ticket47970_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -201,6 +201,8 @@ def run_isolated(): topo = topology(True) test_ticket47970(topo) + test_ticket47970_final(topo) + if __name__ == '__main__': - run_isolated() \ No newline at end of file + run_isolated() diff --git a/dirsrvtests/tickets/ticket47973_test.py b/dirsrvtests/tickets/ticket47973_test.py index 11b1ac8..259d9ac 100644 --- a/dirsrvtests/tickets/ticket47973_test.py +++ b/dirsrvtests/tickets/ticket47973_test.py @@ -214,7 +214,7 @@ def test_ticket47973(topology): def test_ticket47973_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -230,6 +230,8 @@ def run_isolated(): topo = topology(True) test_ticket47973(topo) + test_ticket47973_final(topo) + if __name__ == '__main__': - run_isolated() \ No newline at end of file + run_isolated() diff --git a/dirsrvtests/tickets/ticket47980_test.py b/dirsrvtests/tickets/ticket47980_test.py index 406d72d..51e37dc 100644 --- a/dirsrvtests/tickets/ticket47980_test.py +++ b/dirsrvtests/tickets/ticket47980_test.py @@ -689,7 +689,7 @@ def test_ticket47980(topology): def test_ticket47980_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -705,6 +705,8 @@ def run_isolated(): topo = topology(True) test_ticket47980(topo) + test_ticket47980_final(topo) + if __name__ == '__main__': - run_isolated() \ No newline at end of file + run_isolated() diff --git a/dirsrvtests/tickets/ticket47981_test.py b/dirsrvtests/tickets/ticket47981_test.py index 2a16ce6..26de221 100644 --- a/dirsrvtests/tickets/ticket47981_test.py +++ b/dirsrvtests/tickets/ticket47981_test.py @@ -324,7 +324,7 @@ def test_ticket47981(topology): def test_ticket47981_final(topology): - topology.standalone.stop(timeout=10) + topology.standalone.delete() def run_isolated(): @@ -340,6 +340,8 @@ def run_isolated(): topo = topology(True) test_ticket47981(topo) + test_ticket47981_final(topo) + if __name__ == '__main__': - run_isolated() \ No newline at end of file + run_isolated()