From d602e30c8712a71b663f2db8a78ee7904443bc6d Mon Sep 17 00:00:00 2001 From: Mark Reynolds Date: Feb 03 2015 13:09:09 +0000 Subject: Ticket 48019 - Remove refs to constants.py and backup/restore from lib389 tests Bug Description: The backup and restore process did not work well with mutliple tests that use replication and the same server identified. The backup and restore process takes just as long as installing a fresh instance. constants.py is now deprecated as its contents were moved into the core lib389 module(_constants.py). Fix Description: Remove the backup and restore logic from the topology functions. Cleaned up the imports which included removing the references to constants.py. Added a consistent "success" message to each test. https://fedorahosted.org/389/ticket/48019 Reviewed by: nhosoi(Thanks!) --- diff --git a/dirsrvtests/suites/dynamic-plugins/constants.py b/dirsrvtests/suites/dynamic-plugins/constants.py deleted file mode 100644 index cbc310e..0000000 --- a/dirsrvtests/suites/dynamic-plugins/constants.py +++ /dev/null @@ -1,33 +0,0 @@ -''' -Created on Dec 09, 2014 - -@author: mreynolds -''' -import os -from lib389 import DN_DM -from lib389._constants import * -from lib389.properties import * - -SUFFIX = 'dc=example,dc=com' -PASSWORD = 'password' - -# Used for standalone topology -HOST_STANDALONE = LOCALHOST -PORT_STANDALONE = 33389 -SERVERID_STANDALONE = 'dynamic-plugins' - -# Each defined instance above must be added in that list -ALL_INSTANCES = [ {SER_HOST: HOST_STANDALONE, SER_PORT: PORT_STANDALONE, SER_SERVERID_PROP: SERVERID_STANDALONE}, - ] -# This is a template -args_instance = { - SER_DEPLOYED_DIR: os.environ.get('PREFIX', None), - SER_BACKUP_INST_DIR: os.environ.get('BACKUPDIR', DEFAULT_BACKUPDIR), - SER_ROOT_DN: DN_DM, - SER_ROOT_PW: PASSWORD, - SER_HOST: LOCALHOST, - SER_PORT: DEFAULT_PORT, - SER_SERVERID_PROP: "template", - SER_CREATION_SUFFIX: DEFAULT_SUFFIX} - - diff --git a/dirsrvtests/suites/dynamic-plugins/finalizer.py b/dirsrvtests/suites/dynamic-plugins/finalizer.py deleted file mode 100644 index eb02332..0000000 --- a/dirsrvtests/suites/dynamic-plugins/finalizer.py +++ /dev/null @@ -1,57 +0,0 @@ -''' -Created on Nov 5, 2013 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import socket -import time -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import DN_DM -from lib389.properties import * -from constants import * - -log = logging.getLogger(__name__) - -global installation_prefix -installation_prefix=os.getenv('PREFIX') - -def test_finalizer(): - global installation_prefix - - # for each defined instance, remove it - for args_instance in ALL_INSTANCES: - if installation_prefix: - # overwrite the environment setting - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - instance = DirSrv(verbose=True) - instance.allocate(args_instance) - if instance.exists(): - instance.delete() - - # remove any existing backup for this instance - instance.clearBackupFS() - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - set the installation prefix - - run this program - ''' - global installation_prefix - installation_prefix = None - - test_finalizer() - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/dynamic-plugins/plugin_tests.py b/dirsrvtests/suites/dynamic-plugins/plugin_tests.py index e147be5..315547b 100644 --- a/dirsrvtests/suites/dynamic-plugins/plugin_tests.py +++ b/dirsrvtests/suites/dynamic-plugins/plugin_tests.py @@ -7,16 +7,13 @@ import os import sys import time import ldap -import time import logging -import socket import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * log = logging.getLogger(__name__) diff --git a/dirsrvtests/suites/dynamic-plugins/stress_tests.py b/dirsrvtests/suites/dynamic-plugins/stress_tests.py index f1a34b4..ff830dd 100644 --- a/dirsrvtests/suites/dynamic-plugins/stress_tests.py +++ b/dirsrvtests/suites/dynamic-plugins/stress_tests.py @@ -7,16 +7,13 @@ import os import sys import time import ldap -import time import logging -import socket import pytest import threading from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) diff --git a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py index 2460ecc..6724c82 100644 --- a/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py +++ b/dirsrvtests/suites/dynamic-plugins/test_dynamic_plugins.py @@ -9,7 +9,6 @@ import time import ldap import ldap.sasl import logging -import socket import pytest import plugin_tests import stress_tests @@ -18,7 +17,6 @@ from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * log = logging.getLogger(__name__) @@ -41,22 +39,6 @@ def repl_fail(replica): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -72,60 +54,20 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) diff --git a/dirsrvtests/suites/schema/constants.py b/dirsrvtests/suites/schema/constants.py deleted file mode 100644 index 5751d97..0000000 --- a/dirsrvtests/suites/schema/constants.py +++ /dev/null @@ -1,33 +0,0 @@ -''' -Created on Dec 18, 2013 - -@author: rmeggins -''' -import os -from lib389 import DN_DM -from lib389._constants import * -from lib389.properties import * - -SUFFIX = 'dc=example,dc=com' -PASSWORD = 'password' - -# Used for standalone topology -HOST_STANDALONE = LOCALHOST -PORT_STANDALONE = 33389 -SERVERID_STANDALONE = 'schematest' - -# Each defined instance above must be added in that list -ALL_INSTANCES = [ {SER_HOST: HOST_STANDALONE, SER_PORT: PORT_STANDALONE, SER_SERVERID_PROP: SERVERID_STANDALONE}, - ] -# This is a template -args_instance = { - SER_DEPLOYED_DIR: os.environ.get('PREFIX', None), - SER_BACKUP_INST_DIR: os.environ.get('BACKUPDIR', DEFAULT_BACKUPDIR), - SER_ROOT_DN: DN_DM, - SER_ROOT_PW: PASSWORD, - SER_HOST: LOCALHOST, - SER_PORT: DEFAULT_PORT, - SER_SERVERID_PROP: "template", - SER_CREATION_SUFFIX: DEFAULT_SUFFIX} - - diff --git a/dirsrvtests/suites/schema/finalizer.py b/dirsrvtests/suites/schema/finalizer.py deleted file mode 100644 index ff256f6..0000000 --- a/dirsrvtests/suites/schema/finalizer.py +++ /dev/null @@ -1,51 +0,0 @@ -''' -Created on Nov 5, 2013 - -@author: tbordaz -''' -import os -import sys -import time -import ldap -import logging -import socket -import time -import logging -import pytest -from lib389 import DirSrv, Entry, tools -from lib389.tools import DirSrvTools -from lib389._constants import DN_DM -from lib389.properties import * -from constants import * - -log = logging.getLogger(__name__) - -global installation_prefix -installation_prefix=os.getenv('PREFIX') - -def test_finalizer(): - global installation_prefix - - # for each defined instance, remove it - for args_instance in ALL_INSTANCES: - if installation_prefix: - # overwrite the environment setting - args_instance[SER_DEPLOYED_DIR] = installation_prefix - - instance = DirSrv(verbose=True) - instance.allocate(args_instance) - if instance.exists(): - instance.delete() - -def run_isolated(): - ''' - run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to - - set the installation prefix - - run this program - ''' - test_finalizer() - -if __name__ == '__main__': - run_isolated() - diff --git a/dirsrvtests/suites/schema/test_schema.py b/dirsrvtests/suites/schema/test_schema.py index 4629cc6..a1b3791 100644 --- a/dirsrvtests/suites/schema/test_schema.py +++ b/dirsrvtests/suites/schema/test_schema.py @@ -10,16 +10,12 @@ import ldap from ldap.cidict import cidict from ldap.schema import SubSchema import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * + logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -37,21 +33,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to create a DirSrv instance for the 'module'. - At the beginning, there may already be an instance. - There may also be a backup for the instance. - - Principle: - If instance exists: - restart it - If backup exists: - create or rebind to instance - restore instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -65,51 +46,14 @@ def topology(request): args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE schemainst.allocate(args_instance) - # Get the status of the backups - backup = schemainst.checkBackupFS() - - # Get the status of the instance and restart it if it exists + # Remove all the instance if schemainst.exists(): - schemainst.stop(timeout=10) - schemainst.start(timeout=10) - - if backup: - # The backup exists, assuming it is correct - # we just re-init the instance with it - if not schemainst.exists(): - schemainst.create() - # Used to retrieve configuration information (dbdir, confdir...) - schemainst.open() - - # restore from backup - schemainst.stop(timeout=10) - schemainst.restoreFS(backup) - schemainst.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first test - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup) we clear all backups that an instance may have created - if backup: - schemainst.clearBackupFS() - - # Remove all the instances - if schemainst.exists(): - schemainst.delete() - - # Create the instances - schemainst.create() - schemainst.open() - - # Time to create the backup - schemainst.stop(timeout=10) - schemainst.backupfile = schemainst.backupFS() - schemainst.start(timeout=10) - # + schemainst.delete() + + # Create the instance + schemainst.create() + schemainst.open() + return TopologyStandalone(schemainst) attrclass = ldap.schema.models.AttributeType diff --git a/dirsrvtests/tickets/constants.py b/dirsrvtests/tickets/constants.py deleted file mode 100644 index f4fe8fc..0000000 --- a/dirsrvtests/tickets/constants.py +++ /dev/null @@ -1,69 +0,0 @@ -''' -Created on Oct 31, 2013 - -@author: tbordaz -''' -import os -from lib389 import DN_DM -from lib389._constants import * -from lib389.properties import * - -SUFFIX = 'dc=example,dc=com' -PASSWORD = 'password' - - -# Used for standalone topology -HOST_STANDALONE = LOCALHOST -PORT_STANDALONE = 33389 -SERVERID_STANDALONE = 'standalone' - -# Used for One master / One consumer topology -HOST_MASTER = LOCALHOST -PORT_MASTER = 40389 -SERVERID_MASTER = 'master' -REPLICAID_MASTER = 1 - -HOST_CONSUMER = LOCALHOST -PORT_CONSUMER = 50389 -SERVERID_CONSUMER = 'consumer' - -# Used for two masters / two consumers toplogy -HOST_MASTER_1 = LOCALHOST -PORT_MASTER_1 = 44389 -SERVERID_MASTER_1 = 'master_1' -REPLICAID_MASTER_1 = 1 - -HOST_MASTER_2 = LOCALHOST -PORT_MASTER_2 = 45389 -SERVERID_MASTER_2 = 'master_2' -REPLICAID_MASTER_2 = 2 - -HOST_CONSUMER_1 = LOCALHOST -PORT_CONSUMER_1 = 54389 -SERVERID_CONSUMER_1 = 'consumer_1' - -HOST_CONSUMER_2 = LOCALHOST -PORT_CONSUMER_2 = 55389 -SERVERID_CONSUMER_2 = 'consumer_2' - -# Each defined instance above must be added in that list -ALL_INSTANCES = [ {SER_HOST: HOST_STANDALONE, SER_PORT: PORT_STANDALONE, SER_SERVERID_PROP: SERVERID_STANDALONE}, - {SER_HOST: HOST_MASTER, SER_PORT: PORT_MASTER, SER_SERVERID_PROP: SERVERID_MASTER}, - {SER_HOST: HOST_CONSUMER, SER_PORT: PORT_CONSUMER, SER_SERVERID_PROP: SERVERID_CONSUMER}, - {SER_HOST: HOST_MASTER_1, SER_PORT: PORT_MASTER_1, SER_SERVERID_PROP: SERVERID_MASTER_1}, - {SER_HOST: HOST_MASTER_2, SER_PORT: PORT_MASTER_2, SER_SERVERID_PROP: SERVERID_MASTER_2}, - {SER_HOST: HOST_CONSUMER_1, SER_PORT: PORT_CONSUMER_1, SER_SERVERID_PROP: SERVERID_CONSUMER_1}, - {SER_HOST: HOST_CONSUMER_2, SER_PORT: PORT_CONSUMER_2, SER_SERVERID_PROP: SERVERID_CONSUMER_2}, - ] -# This is a template -args_instance = { - SER_DEPLOYED_DIR: os.environ.get('PREFIX', None), - SER_BACKUP_INST_DIR: os.environ.get('BACKUPDIR', DEFAULT_BACKUPDIR), - SER_ROOT_DN: DN_DM, - SER_ROOT_PW: PASSWORD, - SER_HOST: LOCALHOST, - SER_PORT: DEFAULT_PORT, - SER_SERVERID_PROP: "template", - SER_CREATION_SUFFIX: DEFAULT_SUFFIX} - - diff --git a/dirsrvtests/tickets/create_testcase.py b/dirsrvtests/tickets/create_testcase.py index 6697823..092c56d 100644 --- a/dirsrvtests/tickets/create_testcase.py +++ b/dirsrvtests/tickets/create_testcase.py @@ -98,8 +98,7 @@ if len(sys.argv) > 0: # # Write the imports # - TEST.write('import os\nimport sys\nimport time\nimport ldap\nimport ldap.sasl\n' + - 'import logging\nimport socket\nimport pytest\n') + TEST.write('import os\nimport sys\nimport time\nimport ldap\nimport logging\nimport pytest\n') TEST.write('from lib389 import DirSrv, Entry, tools, tasks\nfrom lib389.tools import DirSrvTools\n' + 'from lib389._constants import *\nfrom lib389.properties import *\nfrom lib389.tasks import *\n\n') @@ -185,6 +184,7 @@ if len(sys.argv) > 0: TEST.write(' args_instance[SER_HOST] = HOST_MASTER_' + idx + '\n') TEST.write(' args_instance[SER_PORT] = PORT_MASTER_' + idx + '\n') TEST.write(' args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_' + idx + '\n') + TEST.write(' args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n') TEST.write(' args_master = args_instance.copy()\n') TEST.write(' master' + idx + '.allocate(args_master)\n') TEST.write(' instance_master' + idx + ' = master' + idx + '.exists()\n') @@ -203,6 +203,7 @@ if len(sys.argv) > 0: TEST.write(' args_instance[SER_HOST] = HOST_HUB_' + idx + '\n') TEST.write(' args_instance[SER_PORT] = PORT_HUB_' + idx + '\n') TEST.write(' args_instance[SER_SERVERID_PROP] = SERVERID_HUB_' + idx + '\n') + TEST.write(' args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n') TEST.write(' args_hub = args_instance.copy()\n') TEST.write(' hub' + idx + '.allocate(args_hub)\n') TEST.write(' instance_hub' + idx + ' = hub' + idx + '.exists()\n') @@ -221,6 +222,7 @@ if len(sys.argv) > 0: TEST.write(' args_instance[SER_HOST] = HOST_CONSUMER_' + idx + '\n') TEST.write(' args_instance[SER_PORT] = PORT_CONSUMER_' + idx + '\n') TEST.write(' args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_' + idx + '\n') + TEST.write(' args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n') TEST.write(' args_consumer = args_instance.copy()\n') TEST.write(' consumer' + idx + '.allocate(args_consumer)\n') TEST.write(' instance_consumer' + idx + ' = consumer' + idx + '.exists()\n') @@ -450,6 +452,7 @@ if len(sys.argv) > 0: TEST.write(' args_instance[SER_HOST] = HOST_STANDALONE' + idx + '\n') TEST.write(' args_instance[SER_PORT] = PORT_STANDALONE' + idx + '\n') TEST.write(' args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE' + idx + '\n') + TEST.write(' args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX\n') TEST.write(' args_standalone' + idx + ' = args_instance.copy()\n') TEST.write(' standalone' + idx + '.allocate(args_standalone' + idx + ')\n') diff --git a/dirsrvtests/tickets/finalizer.py b/dirsrvtests/tickets/finalizer.py index eb02332..f7864eb 100644 --- a/dirsrvtests/tickets/finalizer.py +++ b/dirsrvtests/tickets/finalizer.py @@ -16,7 +16,6 @@ from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import DN_DM from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -25,31 +24,31 @@ installation_prefix=os.getenv('PREFIX') def test_finalizer(): global installation_prefix - + # for each defined instance, remove it for args_instance in ALL_INSTANCES: if installation_prefix: # overwrite the environment setting args_instance[SER_DEPLOYED_DIR] = installation_prefix - + instance = DirSrv(verbose=True) instance.allocate(args_instance) if instance.exists(): instance.delete() - + # remove any existing backup for this instance instance.clearBackupFS() - + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - set the installation prefix - run this program ''' global installation_prefix installation_prefix = None - + test_finalizer() if __name__ == '__main__': diff --git a/dirsrvtests/tickets/ticket47313_test.py b/dirsrvtests/tickets/ticket47313_test.py index 1907faa..e85fd08 100644 --- a/dirsrvtests/tickets/ticket47313_test.py +++ b/dirsrvtests/tickets/ticket47313_test.py @@ -3,15 +3,12 @@ import sys import time import ldap import logging -import socket import time -import logging import pytest from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -30,22 +27,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -61,63 +42,22 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - - # Get the status of the instance and restart it if it exists + # Get the status of the instance instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() + + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # - # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -133,8 +73,8 @@ def test_ticket47313_run(topology): topology.standalone.simple_bind_s(DN_DM, PASSWORD) # enable filter error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')] - topology.standalone.modify_s(DN_CONFIG, mod) + #mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '32')] + #topology.standalone.modify_s(DN_CONFIG, mod) topology.standalone.log.info("\n\n######################### ADD ######################\n") @@ -201,6 +141,7 @@ def test_ticket47313_run(topology): def test_ticket47313_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47462_test.py b/dirsrvtests/tickets/ticket47462_test.py index 67b43d7..e385f1e 100644 --- a/dirsrvtests/tickets/ticket47462_test.py +++ b/dirsrvtests/tickets/ticket47462_test.py @@ -1,19 +1,12 @@ -import os import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * -from lib389._constants import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -47,27 +40,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER1 <-> Master2. - At the beginning, It may exists a master2 instance and/or a master2 instance. - It may also exists a backup for the master1 and/or the master2. - - Principle: - If master1 instance exists: - restart it - If master2 instance exists: - restart it - If backup of master1 AND backup of master2 exists: - create or rebind to master1 - create or rebind to master2 - - restore master1 from backup - restore master2 from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation1_prefix global installation2_prefix @@ -96,134 +68,73 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups - backup_master1 = master1.checkBackupFS() - backup_master2 = master2.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_master1 = master1.exists() - if instance_master1: - master1.stop(timeout=10) - master1.start(timeout=10) - instance_master2 = master2.exists() - if instance_master2: - master2.stop(timeout=10) - master2.start(timeout=10) - - if backup_master1 and backup_master2: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master1: - master1.create() - # Used to retrieve configuration information (dbdir, confdir...) - master1.open() - - if not instance_master2: - master2.create() - # Used to retrieve configuration information (dbdir, confdir...) - master2.open() - - # restore master1 from backup - master1.stop(timeout=10) - master1.restoreFS(backup_master1) - master1.start(timeout=10) - - # restore master2 from backup - master2.stop(timeout=10) - master2.restoreFS(backup_master2) - master2.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master1: - master1.clearBackupFS() - if backup_master2: - master2.clearBackupFS() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - AGMT_DN = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - master1.agreement - if not AGMT_DN: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % AGMT_DN) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(AGMT_DN) - - # Check replication is working fine - master1.add_s(Entry((TEST_REPL_DN, {'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - while loop <= 10: - try: - ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if not ent: - log.fatal('Replication is not working!') - assert False + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() - # Time to create the backups - master1.stop(timeout=10) - master1.backupfile = master1.backupFS() - master1.start(timeout=10) + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() - master2.stop(timeout=10) - master2.backupfile = master2.backupFS() - master2.start(timeout=10) + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + AGMT_DN = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + master1.agreement + if not AGMT_DN: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % AGMT_DN) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=DEFAULT_SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(AGMT_DN) + + # Check replication is working fine + master1.add_s(Entry((TEST_REPL_DN, {'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + while loop <= 10: + try: + ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if not ent: + log.fatal('Replication is not working!') + assert False # clear the tmp directory master1.clearTmpDir(__file__) - # - # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyMaster1Master2(master1, master2) @@ -426,15 +337,11 @@ def test_ticket47462(topology): log.fatal('Failed to add test user: ' + e.message['desc']) assert False - # - # If we got here the test passed - # - log.info('Test PASSED') - def test_ticket47462_final(topology): topology.master1.delete() topology.master2.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47490_test.py b/dirsrvtests/tickets/ticket47490_test.py index 29d8d04..fc8e15a 100644 --- a/dirsrvtests/tickets/ticket47490_test.py +++ b/dirsrvtests/tickets/ticket47490_test.py @@ -5,9 +5,7 @@ Created on Nov 7, 2013 ''' import os import sys -import time import ldap -import logging import socket import time import logging @@ -17,7 +15,6 @@ from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -180,27 +177,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER -> Consumer. - At the beginning, It may exists a master instance and/or a consumer instance. - It may also exists a backup for the master and/or the consumer. - - Principle: - If master instance exists: - restart it - If consumer instance exists: - restart it - If backup of master AND backup of consumer exists: - create or rebind to consumer - create or rebind to master - - restore master from backup - restore consumer from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation_prefix @@ -211,140 +187,81 @@ def topology(request): consumer = DirSrv(verbose=False) # Args for the master instance - args_instance[SER_HOST] = HOST_MASTER - args_instance[SER_PORT] = PORT_MASTER - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master.allocate(args_master) # Args for the consumer instance - args_instance[SER_HOST] = HOST_CONSUMER - args_instance[SER_PORT] = PORT_CONSUMER - args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 args_consumer = args_instance.copy() consumer.allocate(args_consumer) - # Get the status of the backups - backup_master = master.checkBackupFS() - backup_consumer = consumer.checkBackupFS() - - # Get the status of the instance and restart it if it exists + # Get the status of the instance instance_master = master.exists() - if instance_master: - master.stop(timeout=10) - master.start(timeout=10) - instance_consumer = consumer.exists() + + # Remove all the instances + if instance_master: + master.delete() if instance_consumer: - consumer.stop(timeout=10) - consumer.start(timeout=10) - - if backup_master and backup_consumer: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master: - master.create() - # Used to retrieve configuration information (dbdir, confdir...) - master.open() - - if not instance_consumer: - consumer.create() - # Used to retrieve configuration information (dbdir, confdir...) - consumer.open() - - # restore master from backup - master.stop(timeout=10) - master.restoreFS(backup_master) - master.start(timeout=10) - - # restore consumer from backup - consumer.stop(timeout=10) - consumer.restoreFS(backup_consumer) - consumer.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master: - master.clearBackupFS() - if backup_consumer: - consumer.clearBackupFS() - - # Remove all the instances - if instance_master: - master.delete() - if instance_consumer: - consumer.delete() - - # Create the instances - master.create() - master.open() - consumer.create() - consumer.open() + consumer.delete() + + # Create the instances + master.create() + master.open() + consumer.create() + consumer.open() - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) - consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - master.agreement.init(SUFFIX, HOST_CONSUMER, PORT_CONSUMER) - master.waitForReplInit(repl_agreement) - - # Check replication is working fine - master.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - ent = None - loop = 0 - while loop <= 10: - try: - ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - # Time to create the backups - master.stop(timeout=10) - master.backupfile = master.backupFS() - master.start(timeout=10) - - consumer.stop(timeout=10) - consumer.backupfile = consumer.backupFS() - consumer.start(timeout=10) + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) + + # Initialize the supplier->consumer + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + master.waitForReplInit(repl_agreement) + + # Check replication is working fine + master.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + ent = None + loop = 0 + while loop <= 10: + try: + ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master.clearTmpDir(__file__) # # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology + # with replication working. return TopologyMasterConsumer(master, consumer) @@ -742,6 +659,7 @@ def test_ticket47490_nine(topology): def test_ticket47490_final(topology): topology.master.delete() topology.consumer.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47553_ger.py b/dirsrvtests/tickets/ticket47553_ger.py index 72df885..5f0fbf2 100644 --- a/dirsrvtests/tickets/ticket47553_ger.py +++ b/dirsrvtests/tickets/ticket47553_ger.py @@ -8,17 +8,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * -from lib389._constants import REPLICAROLE_MASTER from ldap.controls.simple import GetEffectiveRightsControl logging.getLogger(__name__).setLevel(logging.DEBUG) @@ -69,27 +63,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER1 <-> Master2. - At the beginning, It may exists a master2 instance and/or a master2 instance. - It may also exists a backup for the master1 and/or the master2. - - Principle: - If master1 instance exists: - restart it - If master2 instance exists: - restart it - If backup of master1 AND backup of master2 exists: - create or rebind to master1 - create or rebind to master2 - - restore master1 from backup - restore master2 from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation1_prefix global installation2_prefix @@ -118,135 +91,76 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups - backup_master1 = master1.checkBackupFS() - backup_master2 = master2.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_master1 = master1.exists() - if instance_master1: - master1.stop(timeout=10) - master1.start(timeout=10) - instance_master2 = master2.exists() - if instance_master2: - master2.stop(timeout=10) - master2.start(timeout=10) - - if backup_master1 and backup_master2: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master1: - master1.create() - # Used to retrieve configuration information (dbdir, confdir...) - master1.open() - - if not instance_master2: - master2.create() - # Used to retrieve configuration information (dbdir, confdir...) - master2.open() - - # restore master1 from backup - master1.stop(timeout=10) - master1.restoreFS(backup_master1) - master1.start(timeout=10) - - # restore master2 from backup - master2.stop(timeout=10) - master2.restoreFS(backup_master2) - master2.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master1: - master1.clearBackupFS() - if backup_master2: - master2.clearBackupFS() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() - # Check replication is working fine - master1.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - # Time to create the backups - master1.stop(timeout=10) - master1.backupfile = master1.backupFS() - master1.start(timeout=10) - - master2.stop(timeout=10) - master2.backupfile = master2.backupFS() - master2.start(timeout=10) + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + master1.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master1.clearTmpDir(__file__) - # # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology + # with replication working. return TopologyMaster1Master2(master1, master2) @@ -526,6 +440,7 @@ def test_ticket47553_mode_legacy_ger_with_moddn(topology): def test_ticket47553_final(topology): topology.master1.delete() topology.master2.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47553_single_aci_test.py b/dirsrvtests/tickets/ticket47553_single_aci_test.py index 34e3eec..30b7238 100644 --- a/dirsrvtests/tickets/ticket47553_single_aci_test.py +++ b/dirsrvtests/tickets/ticket47553_single_aci_test.py @@ -8,16 +8,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * from lib389._constants import REPLICAROLE_MASTER logging.getLogger(__name__).setLevel(logging.DEBUG) @@ -68,27 +63,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER1 <-> Master2. - At the beginning, It may exists a master2 instance and/or a master2 instance. - It may also exists a backup for the master1 and/or the master2. - - Principle: - If master1 instance exists: - restart it - If master2 instance exists: - restart it - If backup of master1 AND backup of master2 exists: - create or rebind to master1 - create or rebind to master2 - - restore master1 from backup - restore master2 from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation1_prefix global installation2_prefix @@ -117,135 +91,76 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups - backup_master1 = master1.checkBackupFS() - backup_master2 = master2.checkBackupFS() + # Get the status of the instance + instance_master1 = master1.exists() + instance_master2 = master2.exists() - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() + # Remove all the instances if instance_master1: - master1.stop(timeout=10) - master1.start(timeout=10) - - instance_master2 = master2.exists() + master1.delete() if instance_master2: - master2.stop(timeout=10) - master2.start(timeout=10) - - if backup_master1 and backup_master2: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master1: - master1.create() - # Used to retrieve configuration information (dbdir, confdir...) - master1.open() - - if not instance_master2: - master2.create() - # Used to retrieve configuration information (dbdir, confdir...) - master2.open() - - # restore master1 from backup - master1.stop(timeout=10) - master1.restoreFS(backup_master1) - master1.start(timeout=10) - - # restore master2 from backup - master2.stop(timeout=10) - master2.restoreFS(backup_master2) - master2.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master1: - master1.clearBackupFS() - if backup_master2: - master2.clearBackupFS() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) + master2.delete() - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - master1.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - # Time to create the backups - master1.stop(timeout=10) - master1.backupfile = master1.backupFS() - master1.start(timeout=10) - - master2.stop(timeout=10) - master2.backupfile = master2.backupFS() - master2.start(timeout=10) + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + master1.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master1.clearTmpDir(__file__) - # # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology + # with replication working. return TopologyMaster1Master2(master1, master2) @@ -1128,6 +1043,7 @@ def test_ticket47553_moddn_staging_prod_except(topology): def test_ticket47553_final(topology): topology.master1.delete() topology.master2.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47560_test.py b/dirsrvtests/tickets/ticket47560_test.py index a4b4433..e2a8e45 100644 --- a/dirsrvtests/tickets/ticket47560_test.py +++ b/dirsrvtests/tickets/ticket47560_test.py @@ -3,15 +3,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -28,22 +24,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -59,63 +39,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - - # Get the status of the instance and restart it if it exists + # Get the status of the instance instance_standalone = standalone.exists() - if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() + # Remove the instance + if instance_standalone: + standalone.delete() - # Create the instance - standalone.create() + # Create the instance + standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -281,6 +221,7 @@ def test_ticket47560(topology): def test_ticket47560_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47573_test.py b/dirsrvtests/tickets/ticket47573_test.py index 2641d0b..1771049 100644 --- a/dirsrvtests/tickets/ticket47573_test.py +++ b/dirsrvtests/tickets/ticket47573_test.py @@ -8,16 +8,12 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -126,27 +122,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER -> Consumer. - At the beginning, It may exists a master instance and/or a consumer instance. - It may also exists a backup for the master and/or the consumer. - - Principle: - If master instance exists: - restart it - If consumer instance exists: - restart it - If backup of master AND backup of consumer exists: - create or rebind to consumer - create or rebind to master - - restore master from backup - restore consumer from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation_prefix @@ -157,141 +132,81 @@ def topology(request): consumer = DirSrv(verbose=False) # Args for the master instance - args_instance[SER_HOST] = HOST_MASTER - args_instance[SER_PORT] = PORT_MASTER - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master.allocate(args_master) # Args for the consumer instance - args_instance[SER_HOST] = HOST_CONSUMER - args_instance[SER_PORT] = PORT_CONSUMER - args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 args_consumer = args_instance.copy() consumer.allocate(args_consumer) - - # Get the status of the backups - backup_master = master.checkBackupFS() - backup_consumer = consumer.checkBackupFS() - - # Get the status of the instance and restart it if it exists + # Get the status of the instance instance_master = master.exists() - if instance_master: - master.stop(timeout=10) - master.start(timeout=10) - instance_consumer = consumer.exists() + + # Remove all the instances + if instance_master: + master.delete() if instance_consumer: - consumer.stop(timeout=10) - consumer.start(timeout=10) - - if backup_master and backup_consumer: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master: - master.create() - # Used to retrieve configuration information (dbdir, confdir...) - master.open() - - if not instance_consumer: - consumer.create() - # Used to retrieve configuration information (dbdir, confdir...) - consumer.open() - - # restore master from backup - master.stop(timeout=10) - master.restoreFS(backup_master) - master.start(timeout=10) - - # restore consumer from backup - consumer.stop(timeout=10) - consumer.restoreFS(backup_consumer) - consumer.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master: - master.clearBackupFS() - if backup_consumer: - consumer.clearBackupFS() - - # Remove all the instances - if instance_master: - master.delete() - if instance_consumer: - consumer.delete() - - # Create the instances - master.create() - master.open() - consumer.create() - consumer.open() + consumer.delete() + + # Create the instances + master.create() + master.open() + consumer.create() + consumer.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) + + # Initialize the supplier->consumer - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) - consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - master.agreement.init(SUFFIX, HOST_CONSUMER, PORT_CONSUMER) - master.waitForReplInit(repl_agreement) - - # Check replication is working fine - master.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - # Time to create the backups - master.stop(timeout=10) - master.backupfile = master.backupFS() - master.start(timeout=10) - - consumer.stop(timeout=10) - consumer.backupfile = consumer.backupFS() - consumer.start(timeout=10) + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + master.waitForReplInit(repl_agreement) + + # Check replication is working fine + master.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master.clearTmpDir(__file__) - # # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology + # with replication working. return TopologyMasterConsumer(master, consumer) @@ -405,6 +320,7 @@ def test_ticket47573_three(topology): def test_ticket47573_final(topology): topology.master.delete() topology.consumer.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47619_test.py b/dirsrvtests/tickets/ticket47619_test.py index e3e7846..8475be1 100644 --- a/dirsrvtests/tickets/ticket47619_test.py +++ b/dirsrvtests/tickets/ticket47619_test.py @@ -8,16 +8,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -50,27 +45,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER -> Consumer. - At the beginning, It may exists a master instance and/or a consumer instance. - It may also exists a backup for the master and/or the consumer. - - Principle: - If master instance exists: - restart it - If consumer instance exists: - restart it - If backup of master AND backup of consumer exists: - create or rebind to consumer - create or rebind to master - - restore master from backup - restore consumer from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation_prefix @@ -81,141 +55,80 @@ def topology(request): consumer = DirSrv(verbose=False) # Args for the master instance - args_instance[SER_HOST] = HOST_MASTER - args_instance[SER_PORT] = PORT_MASTER - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master.allocate(args_master) # Args for the consumer instance - args_instance[SER_HOST] = HOST_CONSUMER - args_instance[SER_PORT] = PORT_CONSUMER - args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 args_consumer = args_instance.copy() consumer.allocate(args_consumer) - - # Get the status of the backups - backup_master = master.checkBackupFS() - backup_consumer = consumer.checkBackupFS() - - # Get the status of the instance and restart it if it exists + # Get the status of the instance instance_master = master.exists() - if instance_master: - master.stop(timeout=10) - master.start(timeout=10) - instance_consumer = consumer.exists() + + # Remove all the instances + if instance_master: + master.delete() if instance_consumer: - consumer.stop(timeout=10) - consumer.start(timeout=10) - - if backup_master and backup_consumer: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master: - master.create() - # Used to retrieve configuration information (dbdir, confdir...) - master.open() - - if not instance_consumer: - consumer.create() - # Used to retrieve configuration information (dbdir, confdir...) - consumer.open() - - # restore master from backup - master.stop(timeout=10) - master.restoreFS(backup_master) - master.start(timeout=10) - - # restore consumer from backup - consumer.stop(timeout=10) - consumer.restoreFS(backup_consumer) - consumer.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master: - master.clearBackupFS() - if backup_consumer: - consumer.clearBackupFS() - - # Remove all the instances - if instance_master: - master.delete() - if instance_consumer: - consumer.delete() - - # Create the instances - master.create() - master.open() - consumer.create() - consumer.open() + consumer.delete() - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) - consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - master.agreement.init(SUFFIX, HOST_CONSUMER, PORT_CONSUMER) - master.waitForReplInit(repl_agreement) - - # Check replication is working fine - master.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - # Time to create the backups - master.stop(timeout=10) - master.backupfile = master.backupFS() - master.start(timeout=10) - - consumer.stop(timeout=10) - consumer.backupfile = consumer.backupFS() - consumer.start(timeout=10) + # Create the instances + master.create() + master.open() + consumer.create() + consumer.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) + + # Initialize the supplier->consumer + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + master.waitForReplInit(repl_agreement) + + # Check replication is working fine + master.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master.clearTmpDir(__file__) - # # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology + # with replication working. return TopologyMasterConsumer(master, consumer) @@ -275,6 +188,7 @@ def test_ticket47619_check_indexed_search(topology): def test_ticket47619_final(topology): topology.master.delete() topology.consumer.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47653MMR_test.py b/dirsrvtests/tickets/ticket47653MMR_test.py index 5791479..7370332 100644 --- a/dirsrvtests/tickets/ticket47653MMR_test.py +++ b/dirsrvtests/tickets/ticket47653MMR_test.py @@ -8,17 +8,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * -from lib389._constants import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -73,27 +67,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER1 <-> Master2. - At the beginning, It may exists a master2 instance and/or a master2 instance. - It may also exists a backup for the master1 and/or the master2. - - Principle: - If master1 instance exists: - restart it - If master2 instance exists: - restart it - If backup of master1 AND backup of master2 exists: - create or rebind to master1 - create or rebind to master2 - - restore master1 from backup - restore master2 from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation1_prefix global installation2_prefix @@ -122,135 +95,77 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups - backup_master1 = master1.checkBackupFS() - backup_master2 = master2.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_master1 = master1.exists() - if instance_master1: - master1.stop(timeout=10) - master1.start(timeout=10) - instance_master2 = master2.exists() - if instance_master2: - master2.stop(timeout=10) - master2.start(timeout=10) - - if backup_master1 and backup_master2: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master1: - master1.create() - # Used to retrieve configuration information (dbdir, confdir...) - master1.open() - - if not instance_master2: - master2.create() - # Used to retrieve configuration information (dbdir, confdir...) - master2.open() - - # restore master1 from backup - master1.stop(timeout=10) - master1.restoreFS(backup_master1) - master1.start(timeout=10) - - # restore master2 from backup - master2.stop(timeout=10) - master2.restoreFS(backup_master2) - master2.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master1: - master1.clearBackupFS() - if backup_master2: - master2.clearBackupFS() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - master1.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - # Time to create the backups - master1.stop(timeout=10) - master1.backupfile = master1.backupFS() - master1.start(timeout=10) + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() - master2.stop(timeout=10) - master2.backupfile = master2.backupFS() - master2.start(timeout=10) + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + master1.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master1.clearTmpDir(__file__) - # # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology + # with replication working. return TopologyMaster1Master2(master1, master2) @@ -532,6 +447,7 @@ def test_ticket47653_modify(topology): def test_ticket47653_final(topology): topology.master1.delete() topology.master2.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47653_test.py b/dirsrvtests/tickets/ticket47653_test.py index c217596..5d881c3 100644 --- a/dirsrvtests/tickets/ticket47653_test.py +++ b/dirsrvtests/tickets/ticket47653_test.py @@ -3,15 +3,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -54,25 +50,6 @@ class TopologyStandalone(object): @pytest.fixture(scope="module") def topology(request): - ''' - This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup - ''' global installation_prefix if installation_prefix: @@ -87,63 +64,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() + + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -406,6 +343,7 @@ def test_ticket47653_delete(topology): def test_ticket47653_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47664_test.py b/dirsrvtests/tickets/ticket47664_test.py index 4bd100d..c38ac0a 100644 --- a/dirsrvtests/tickets/ticket47664_test.py +++ b/dirsrvtests/tickets/ticket47664_test.py @@ -3,16 +3,12 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * from ldap.controls import SimplePagedResultsControl from ldap.controls.simple import GetEffectiveRightsControl @@ -38,22 +34,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -69,63 +49,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() - if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - # Create the instance - standalone.create() + # Remove the instance + if instance_standalone: + standalone.delete() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -252,6 +192,7 @@ def test_ticket47664_run(topology): def test_ticket47664_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47676_test.py b/dirsrvtests/tickets/ticket47676_test.py index 517e10f..b4175ec 100644 --- a/dirsrvtests/tickets/ticket47676_test.py +++ b/dirsrvtests/tickets/ticket47676_test.py @@ -8,17 +8,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * -from lib389._constants import REPLICAROLE_MASTER logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -85,27 +79,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER1 <-> Master2. - At the beginning, It may exists a master2 instance and/or a master2 instance. - It may also exists a backup for the master1 and/or the master2. - - Principle: - If master1 instance exists: - restart it - If master2 instance exists: - restart it - If backup of master1 AND backup of master2 exists: - create or rebind to master1 - create or rebind to master2 - - restore master1 from backup - restore master2 from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation1_prefix global installation2_prefix @@ -134,135 +107,76 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups - backup_master1 = master1.checkBackupFS() - backup_master2 = master2.checkBackupFS() - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() - if instance_master1: - master1.stop(timeout=10) - master1.start(timeout=10) - + instance_master1 = master1.exists() instance_master2 = master2.exists() + + # Remove all the instances + if instance_master1: + master1.delete() if instance_master2: - master2.stop(timeout=10) - master2.start(timeout=10) - - if backup_master1 and backup_master2: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master1: - master1.create() - # Used to retrieve configuration information (dbdir, confdir...) - master1.open() - - if not instance_master2: - master2.create() - # Used to retrieve configuration information (dbdir, confdir...) - master2.open() - - # restore master1 from backup - master1.stop(timeout=10) - master1.restoreFS(backup_master1) - master1.start(timeout=10) - - # restore master2 from backup - master2.stop(timeout=10) - master2.restoreFS(backup_master2) - master2.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master1: - master1.clearBackupFS() - if backup_master2: - master2.clearBackupFS() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() + master2.delete() - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - master1.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - # Time to create the backups - master1.stop(timeout=10) - master1.backupfile = master1.backupFS() - master1.start(timeout=10) - - master2.stop(timeout=10) - master2.backupfile = master2.backupFS() - master2.start(timeout=10) + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + master1.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master1.clearTmpDir(__file__) - # # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology + # with replication working. return TopologyMaster1Master2(master1, master2) @@ -463,6 +377,7 @@ def test_ticket47676_reject_action(topology): def test_ticket47676_final(topology): topology.master1.delete() topology.master2.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47714_test.py b/dirsrvtests/tickets/ticket47714_test.py index ef4ebd5..2dd7325 100644 --- a/dirsrvtests/tickets/ticket47714_test.py +++ b/dirsrvtests/tickets/ticket47714_test.py @@ -3,14 +3,12 @@ import sys import time import ldap import logging -import socket import pytest import shutil from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -36,22 +34,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -67,66 +49,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - try: - standalone.start(timeout=10) - except ldap.SERVER_DOWN: - pass - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() + + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -288,6 +227,7 @@ def test_ticket47714_run_1(topology): def test_ticket47714_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47721_test.py b/dirsrvtests/tickets/ticket47721_test.py index e9e0488..3f1b9f2 100644 --- a/dirsrvtests/tickets/ticket47721_test.py +++ b/dirsrvtests/tickets/ticket47721_test.py @@ -8,16 +8,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * from lib389._constants import REPLICAROLE_MASTER logging.getLogger(__name__).setLevel(logging.DEBUG) @@ -93,27 +88,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER1 <-> Master2. - At the beginning, It may exists a master2 instance and/or a master2 instance. - It may also exists a backup for the master1 and/or the master2. - - Principle: - If master1 instance exists: - restart it - If master2 instance exists: - restart it - If backup of master1 AND backup of master2 exists: - create or rebind to master1 - create or rebind to master2 - - restore master1 from backup - restore master2 from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation1_prefix global installation2_prefix @@ -142,126 +116,70 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups - backup_master1 = master1.checkBackupFS() - backup_master2 = master2.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_master1 = master1.exists() - if instance_master1: - master1.stop(timeout=10) - master1.start(timeout=10) - instance_master2 = master2.exists() - if instance_master2: - master2.stop(timeout=10) - master2.start(timeout=10) - - if backup_master1 and backup_master2: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master1: - master1.create() - # Used to retrieve configuration information (dbdir, confdir...) - master1.open() - - if not instance_master2: - master2.create() - # Used to retrieve configuration information (dbdir, confdir...) - master2.open() - - # restore master1 from backup - master1.stop(timeout=10) - master1.restoreFS(backup_master1) - master1.start(timeout=10) - - # restore master2 from backup - master2.stop(timeout=10) - master2.restoreFS(backup_master2) - master2.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master1: - master1.clearBackupFS() - if backup_master2: - master2.clearBackupFS() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - master1.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() - # Time to create the backups - master1.stop(timeout=10) - master1.backupfile = master1.backupFS() - master1.start(timeout=10) + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() - master2.stop(timeout=10) - master2.backupfile = master2.backupFS() - master2.start(timeout=10) + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + master1.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master1.clearTmpDir(__file__) @@ -519,6 +437,7 @@ def test_ticket47721_4(topology): def test_ticket47721_final(topology): topology.master1.delete() topology.master2.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47781_test.py b/dirsrvtests/tickets/ticket47781_test.py index 94f0893..cec3841 100644 --- a/dirsrvtests/tickets/ticket47781_test.py +++ b/dirsrvtests/tickets/ticket47781_test.py @@ -3,14 +3,12 @@ import sys import time import ldap import logging -import socket import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * log = logging.getLogger(__name__) @@ -27,22 +25,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -58,63 +40,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -212,12 +154,10 @@ def test_ticket47781(topology): log.fatal('Search failed: ' + e.message['desc']) assert PR_False - # If we got here we passed! - log.info('Ticket47781 Test - Passed') - def test_ticket47781_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47787_test.py b/dirsrvtests/tickets/ticket47787_test.py index 528b474..567f85b 100644 --- a/dirsrvtests/tickets/ticket47787_test.py +++ b/dirsrvtests/tickets/ticket47787_test.py @@ -8,16 +8,12 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest import re from lib389 import DirSrv, Entry, tools, NoSuchEntryError from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * from lib389._constants import REPLICAROLE_MASTER logging.getLogger(__name__).setLevel(logging.DEBUG) @@ -71,27 +67,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER1 <-> Master2. - At the beginning, It may exists a master2 instance and/or a master2 instance. - It may also exists a backup for the master1 and/or the master2. - - Principle: - If master1 instance exists: - restart it - If master2 instance exists: - restart it - If backup of master1 AND backup of master2 exists: - create or rebind to master1 - create or rebind to master2 - - restore master1 from backup - restore master2 from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation1_prefix global installation2_prefix @@ -120,135 +95,76 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups - backup_master1 = master1.checkBackupFS() - backup_master2 = master2.checkBackupFS() - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() - if instance_master1: - master1.stop(timeout=10) - master1.start(timeout=10) - + instance_master1 = master1.exists() instance_master2 = master2.exists() - if instance_master2: - master2.stop(timeout=10) - master2.start(timeout=10) - - if backup_master1 and backup_master2: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master1: - master1.create() - # Used to retrieve configuration information (dbdir, confdir...) - master1.open() - - if not instance_master2: - master2.create() - # Used to retrieve configuration information (dbdir, confdir...) - master2.open() - - # restore master1 from backup - master1.stop(timeout=10) - master1.restoreFS(backup_master1) - master1.start(timeout=10) - - # restore master2 from backup - master2.stop(timeout=10) - master2.restoreFS(backup_master2) - master2.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master1: - master1.clearBackupFS() - if backup_master2: - master2.clearBackupFS() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - master1.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() - # Time to create the backups - master1.stop(timeout=10) - master1.backupfile = master1.backupFS() - master1.start(timeout=10) + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() - master2.stop(timeout=10) - master2.backupfile = master2.backupFS() - master2.start(timeout=10) + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + master1.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master1.clearTmpDir(__file__) - # # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology + # with replication working. return TopologyMaster1Master2(master1, master2) @@ -617,6 +533,7 @@ def test_ticket47787_2(topology): def test_ticket47787_final(topology): topology.master1.delete() topology.master2.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47808_test.py b/dirsrvtests/tickets/ticket47808_test.py index eecfd4a..8123ed8 100644 --- a/dirsrvtests/tickets/ticket47808_test.py +++ b/dirsrvtests/tickets/ticket47808_test.py @@ -3,15 +3,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -31,22 +27,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -62,63 +42,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() + + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -193,6 +133,7 @@ def test_ticket47808_run(topology): def test_ticket47808_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47815_test.py b/dirsrvtests/tickets/ticket47815_test.py index eaaf616..2bd3239 100644 --- a/dirsrvtests/tickets/ticket47815_test.py +++ b/dirsrvtests/tickets/ticket47815_test.py @@ -3,13 +3,11 @@ import sys import time import ldap import logging -import socket import pytest from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -26,22 +24,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -57,63 +39,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -204,12 +146,10 @@ def test_ticket47815(topology): log.error('2nd Add operation unexpectedly succeeded') assert False - # If we got here we passed! - log.info('Ticket47815 Test - Passed') - def test_ticket47815_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47819_test.py b/dirsrvtests/tickets/ticket47819_test.py index ba5ebc4..a737683 100644 --- a/dirsrvtests/tickets/ticket47819_test.py +++ b/dirsrvtests/tickets/ticket47819_test.py @@ -3,14 +3,12 @@ import sys import time import ldap import logging -import socket import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * log = logging.getLogger(__name__) @@ -27,22 +25,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -58,62 +40,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=60) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=60) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() + + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=60) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -320,14 +263,10 @@ def test_ticket47819(topology): log.info('Part 4 - passed') - # - # If we got here we passed! - # - log.info('Ticket47819 Test - Passed') - def test_ticket47819_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47823_test.py b/dirsrvtests/tickets/ticket47823_test.py index e237ccd..317ebf5 100644 --- a/dirsrvtests/tickets/ticket47823_test.py +++ b/dirsrvtests/tickets/ticket47823_test.py @@ -3,7 +3,6 @@ import sys import time import ldap import logging -import socket import pytest import re import shutil @@ -11,7 +10,6 @@ from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -58,22 +56,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -89,66 +71,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() - if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - try: - standalone.start(timeout=10) - except ldap.SERVER_DOWN: - pass - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() + # Remove the instance + if instance_standalone: + standalone.delete() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -1026,6 +965,7 @@ def test_ticket47823_invalid_config_7(topology): def test_ticket47823_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47824_test.py b/dirsrvtests/tickets/ticket47824_test.py index 76e8471..790c9cc 100644 --- a/dirsrvtests/tickets/ticket47824_test.py +++ b/dirsrvtests/tickets/ticket47824_test.py @@ -3,16 +3,12 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * from ldap.controls import SimplePagedResultsControl log = logging.getLogger(__name__) @@ -49,22 +45,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -80,63 +60,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() - if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - # Create the instance - standalone.create() + # Remove the instance + if instance_standalone: + standalone.delete() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -292,6 +232,7 @@ def test_ticket47824_run(topology): def test_ticket47824_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47829_test.py b/dirsrvtests/tickets/ticket47829_test.py index ffbab03..d1e61b2 100644 --- a/dirsrvtests/tickets/ticket47829_test.py +++ b/dirsrvtests/tickets/ticket47829_test.py @@ -3,16 +3,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * SCOPE_IN_CN = 'in' @@ -66,22 +61,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -97,63 +76,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() - if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - # Create the instance - standalone.create() + # Remove the instance + if instance_standalone: + standalone.delete() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -662,6 +601,7 @@ def test_ticket47829_indirect_active_group_4(topology): def test_ticket47829_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47838_test.py b/dirsrvtests/tickets/ticket47838_test.py index 4a59e14..19a5aba 100644 --- a/dirsrvtests/tickets/ticket47838_test.py +++ b/dirsrvtests/tickets/ticket47838_test.py @@ -3,7 +3,6 @@ import sys import time import ldap import logging -import socket import pytest import shutil from lib389 import DirSrv, Entry, tools @@ -11,7 +10,6 @@ from lib389 import DirSrvTools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -39,22 +37,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -70,66 +52,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() - if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - try: - standalone.start(timeout=10) - except ldap.SERVER_DOWN: - pass - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() + # Remove the instance + if instance_standalone: + standalone.delete() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -851,6 +790,7 @@ def test_ticket47838_run_last(topology): def test_ticket47838_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47869MMR_test.py b/dirsrvtests/tickets/ticket47869MMR_test.py index 3e21aed..a65d5c4 100644 --- a/dirsrvtests/tickets/ticket47869MMR_test.py +++ b/dirsrvtests/tickets/ticket47869MMR_test.py @@ -3,17 +3,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * -from lib389._constants import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -47,27 +41,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER1 <-> Master2. - At the beginning, It may exists a master2 instance and/or a master2 instance. - It may also exists a backup for the master1 and/or the master2. - - Principle: - If master1 instance exists: - restart it - If master2 instance exists: - restart it - If backup of master1 AND backup of master2 exists: - create or rebind to master1 - create or rebind to master2 - - restore master1 from backup - restore master2 from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation1_prefix global installation2_prefix @@ -96,134 +69,74 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - # Get the status of the backups - backup_master1 = master1.checkBackupFS() - backup_master2 = master2.checkBackupFS() - - # Get the status of the instance and restart it if it exists + # Get the status of the instance instance_master1 = master1.exists() - if instance_master1: - master1.stop(timeout=10) - master1.start(timeout=10) - instance_master2 = master2.exists() - if instance_master2: - master2.stop(timeout=10) - master2.start(timeout=10) - - if backup_master1 and backup_master2: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master1: - master1.create() - # Used to retrieve configuration information (dbdir, confdir...) - master1.open() - - if not instance_master2: - master2.create() - # Used to retrieve configuration information (dbdir, confdir...) - master2.open() - - # restore master1 from backup - master1.stop(timeout=10) - master1.restoreFS(backup_master1) - master1.start(timeout=10) - - # restore master2 from backup - master2.stop(timeout=10) - master2.restoreFS(backup_master2) - master2.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master1: - master1.clearBackupFS() - if backup_master2: - master2.clearBackupFS() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - master1.add_s(Entry((TEST_REPL_DN, {'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False + # Remove all the instances + if instance_master1: + master1.delete() + if instance_master2: + master2.delete() - # Time to create the backups - master1.stop(timeout=10) - master1.backupfile = master1.backupFS() - master1.start(timeout=10) + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() - master2.stop(timeout=10) - master2.backupfile = master2.backupFS() - master2.start(timeout=10) + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + master1.add_s(Entry((TEST_REPL_DN, {'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master1.clearTmpDir(__file__) - # # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyMaster1Master2(master1, master2) @@ -405,6 +318,7 @@ def test_ticket47869_check(topology): def test_ticket47869_final(topology): topology.master1.delete() topology.master2.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47871_test.py b/dirsrvtests/tickets/ticket47871_test.py index 5ddf315..8147764 100644 --- a/dirsrvtests/tickets/ticket47871_test.py +++ b/dirsrvtests/tickets/ticket47871_test.py @@ -8,16 +8,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -50,27 +45,6 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER -> Consumer. - At the beginning, It may exists a master instance and/or a consumer instance. - It may also exists a backup for the master and/or the consumer. - - Principle: - If master instance exists: - restart it - If consumer instance exists: - restart it - If backup of master AND backup of consumer exists: - create or rebind to consumer - create or rebind to master - - restore master from backup - restore consumer from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation_prefix @@ -81,132 +55,75 @@ def topology(request): consumer = DirSrv(verbose=False) # Args for the master instance - args_instance[SER_HOST] = HOST_MASTER - args_instance[SER_PORT] = PORT_MASTER - args_instance[SER_SERVERID_PROP] = SERVERID_MASTER + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master.allocate(args_master) # Args for the consumer instance - args_instance[SER_HOST] = HOST_CONSUMER - args_instance[SER_PORT] = PORT_CONSUMER - args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER + args_instance[SER_HOST] = HOST_CONSUMER_1 + args_instance[SER_PORT] = PORT_CONSUMER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_CONSUMER_1 args_consumer = args_instance.copy() consumer.allocate(args_consumer) - - # Get the status of the backups - backup_master = master.checkBackupFS() - backup_consumer = consumer.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_master = master.exists() - if instance_master: - master.stop(timeout=10) - master.start(timeout=10) - instance_consumer = consumer.exists() + + # Remove all the instances + if instance_master: + master.delete() if instance_consumer: - consumer.stop(timeout=10) - consumer.start(timeout=10) - - if backup_master and backup_consumer: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master: - master.create() - # Used to retrieve configuration information (dbdir, confdir...) - master.open() - - if not instance_consumer: - consumer.create() - # Used to retrieve configuration information (dbdir, confdir...) - consumer.open() - - # restore master from backup - master.stop(timeout=10) - master.restoreFS(backup_master) - master.start(timeout=10) - - # restore consumer from backup - consumer.stop(timeout=10) - consumer.restoreFS(backup_consumer) - consumer.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master: - master.clearBackupFS() - if backup_consumer: - consumer.clearBackupFS() - - # Remove all the instances - if instance_master: - master.delete() - if instance_consumer: - consumer.delete() - - # Create the instances - master.create() - master.open() - consumer.create() - consumer.open() + consumer.delete() + + # Create the instances + master.create() + master.open() + consumer.create() + consumer.open() - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER) - consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - master.agreement.init(SUFFIX, HOST_CONSUMER, PORT_CONSUMER) - master.waitForReplInit(repl_agreement) - - # Check replication is working fine - master.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - ent = None - while loop <= 10: - try: - ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - if ent is None: - assert False - - # Time to create the backups - master.stop(timeout=10) - master.backupfile = master.backupFS() - master.start(timeout=10) - - consumer.stop(timeout=10) - consumer.backupfile = consumer.backupFS() - consumer.start(timeout=10) + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + consumer.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_CONSUMER) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master.agreement.create(suffix=SUFFIX, host=consumer.host, port=consumer.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + master.agreement.init(SUFFIX, HOST_CONSUMER_1, PORT_CONSUMER_1) + master.waitForReplInit(repl_agreement) + + # Check replication is working fine + master.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = consumer.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False # clear the tmp directory master.clearTmpDir(__file__) @@ -285,6 +202,7 @@ def test_ticket47871_2(topology): def test_ticket47871_final(topology): topology.master.delete() topology.consumer.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47900_test.py b/dirsrvtests/tickets/ticket47900_test.py index 2200eba..397f4a2 100644 --- a/dirsrvtests/tickets/ticket47900_test.py +++ b/dirsrvtests/tickets/ticket47900_test.py @@ -3,13 +3,11 @@ import sys import time import ldap import logging -import socket import pytest from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -34,22 +32,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -65,63 +47,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() + + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -368,14 +310,11 @@ def test_ticket47900(topology): % (passwd, e.message['desc'])) assert False topology.standalone.log.info('Password update succeeded (%s)' % passwd) - # - # Test passed - # - topology.standalone.log.info('Test 47900 Passed.') def test_ticket47900_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47920_test.py b/dirsrvtests/tickets/ticket47920_test.py index 1b6455d..6a23f4d 100644 --- a/dirsrvtests/tickets/ticket47920_test.py +++ b/dirsrvtests/tickets/ticket47920_test.py @@ -3,16 +3,11 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * from ldap.controls.readentry import PreReadControl,PostReadControl @@ -70,22 +65,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -101,63 +80,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() - if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() + # Remove the instance + if instance_standalone: + standalone.delete() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -223,6 +162,7 @@ def test_ticket47920_mod_readentry_ctrl(topology): def test_ticket47920_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): @@ -238,9 +178,7 @@ def run_isolated(): topo = topology(True) test_ticket47920_init(topo) - test_ticket47920_mod_readentry_ctrl(topo) - test_ticket47920_final(topo) diff --git a/dirsrvtests/tickets/ticket47937_test.py b/dirsrvtests/tickets/ticket47937_test.py index 3554a71..56fd2ae 100644 --- a/dirsrvtests/tickets/ticket47937_test.py +++ b/dirsrvtests/tickets/ticket47937_test.py @@ -3,13 +3,11 @@ import sys import time import ldap import logging -import socket import pytest from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from constants import * log = logging.getLogger(__name__) @@ -26,22 +24,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -57,63 +39,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() - if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() + # Remove the instance + if instance_standalone: + standalone.delete() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + # Create the instance + standalone.create() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -212,11 +154,10 @@ def test_ticket47937(topology): log.error('Operation incorectly succeeded! Test Failed!') assert False - topology.standalone.log.info('Test 47937 Passed.') - def test_ticket47937_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47950_test.py b/dirsrvtests/tickets/ticket47950_test.py index 81ce0f7..224833f 100644 --- a/dirsrvtests/tickets/ticket47950_test.py +++ b/dirsrvtests/tickets/ticket47950_test.py @@ -3,14 +3,12 @@ import sys import time import ldap import logging -import socket import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * log = logging.getLogger(__name__) @@ -30,22 +28,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -61,63 +43,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -205,7 +147,7 @@ def test_ticket47950(topology): try: topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER, - replicaId=REPLICAID_MASTER) + replicaId=REPLICAID_MASTER_1) log.info('Successfully enabled replication.') except ValueError: log.error('Failed to enable replication') @@ -247,12 +189,10 @@ def test_ticket47950(topology): log.error('Failed to update replica agreement: ' + repl_agreement) assert False - # We passed - log.info("Test Passed.") - def test_ticket47953_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47953_test.py b/dirsrvtests/tickets/ticket47953_test.py index b9d0670..4e1ec60 100644 --- a/dirsrvtests/tickets/ticket47953_test.py +++ b/dirsrvtests/tickets/ticket47953_test.py @@ -3,14 +3,12 @@ import sys import time import ldap import logging -import socket import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * log = logging.getLogger(__name__) @@ -27,22 +25,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -58,63 +40,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -152,12 +94,10 @@ def test_ticket47953(topology): log.error('Failed to remove invalid aci: ' + e.message['desc']) assert False - # If we got here we passed! - log.info('Ticket47953 Test - Passed') - def test_ticket47953_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47970_test.py b/dirsrvtests/tickets/ticket47970_test.py index 54895eb..217547a 100644 --- a/dirsrvtests/tickets/ticket47970_test.py +++ b/dirsrvtests/tickets/ticket47970_test.py @@ -4,14 +4,12 @@ import time import ldap import ldap.sasl import logging -import socket import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * log = logging.getLogger(__name__) @@ -31,22 +29,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -62,63 +44,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -181,11 +123,11 @@ def test_ticket47970(topology): # We passed log.info('Root DSE was correctly not updated') - log.info("Test Passed.") def test_ticket47970_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47973_test.py b/dirsrvtests/tickets/ticket47973_test.py index 259d9ac..5506822 100644 --- a/dirsrvtests/tickets/ticket47973_test.py +++ b/dirsrvtests/tickets/ticket47973_test.py @@ -4,14 +4,12 @@ import time import ldap import ldap.sasl import logging -import socket import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * log = logging.getLogger(__name__) @@ -31,22 +29,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -62,63 +44,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -209,12 +151,10 @@ def test_ticket47973(topology): task_count += 1 - # If we got here the test passed - log.info('Test PASSED') - def test_ticket47973_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47980_test.py b/dirsrvtests/tickets/ticket47980_test.py index da9246c..567301e 100644 --- a/dirsrvtests/tickets/ticket47980_test.py +++ b/dirsrvtests/tickets/ticket47980_test.py @@ -4,14 +4,12 @@ import time import ldap import ldap.sasl import logging -import socket import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * log = logging.getLogger(__name__) @@ -83,22 +81,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -114,63 +96,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -686,12 +628,10 @@ def test_ticket47980(topology): log.fatal('Unable to search for entry %s: error %s' % (USER6_DN, e.message['desc'])) assert False - # If we got here the test passed - log.info('Test PASSED') - def test_ticket47980_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47981_test.py b/dirsrvtests/tickets/ticket47981_test.py index 26de221..59141b5 100644 --- a/dirsrvtests/tickets/ticket47981_test.py +++ b/dirsrvtests/tickets/ticket47981_test.py @@ -4,14 +4,12 @@ import time import ldap import ldap.sasl import logging -import socket import pytest from lib389 import DirSrv, Entry, tools, tasks from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * from lib389.tasks import * -from constants import * log = logging.getLogger(__name__) @@ -39,22 +37,6 @@ class TopologyStandalone(object): def topology(request): ''' This fixture is used to standalone topology for the 'module'. - At the beginning, It may exists a standalone instance. - It may also exists a backup for the standalone instance. - - Principle: - If standalone instance exists: - restart it - If backup of standalone exists: - create/rebind to standalone - - restore standalone instance from backup - else: - Cleanup everything - remove instance - remove backup - Create instance - Create backup ''' global installation_prefix @@ -70,63 +52,23 @@ def topology(request): args_standalone = args_instance.copy() standalone.allocate(args_standalone) - # Get the status of the backups - backup_standalone = standalone.checkBackupFS() - # Get the status of the instance and restart it if it exists instance_standalone = standalone.exists() + + # Remove the instance if instance_standalone: - # assuming the instance is already stopped, just wait 5 sec max - standalone.stop(timeout=5) - standalone.start(timeout=10) - - if backup_standalone: - # The backup exist, assuming it is correct - # we just re-init the instance with it - if not instance_standalone: - standalone.create() - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() - - # restore standalone instance from backup - standalone.stop(timeout=10) - standalone.restoreFS(backup_standalone) - standalone.start(timeout=10) - - else: - # We should be here only in two conditions - # - This is the first time a test involve standalone instance - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove the backup. So even if we have a specific backup file - # (e.g backup_standalone) we clear backup that an instance may have created - if backup_standalone: - standalone.clearBackupFS() - - # Remove the instance - if instance_standalone: - standalone.delete() - - # Create the instance - standalone.create() - - # Used to retrieve configuration information (dbdir, confdir...) - standalone.open() + standalone.delete() - # Time to create the backups - standalone.stop(timeout=10) - standalone.backupfile = standalone.backupFS() - standalone.start(timeout=10) + # Create the instance + standalone.create() + + # Used to retrieve configuration information (dbdir, confdir...) + standalone.open() # clear the tmp directory standalone.clearTmpDir(__file__) - # # Here we have standalone instance up and running - # Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyStandalone(standalone) @@ -319,12 +261,10 @@ def test_ticket47981(topology): log.fatal('Unable to search for entry %s: error %s' % (USER_DN, e.message['desc'])) assert False - # If we got here the test passed - log.info('Test PASSED') - def test_ticket47981_final(topology): topology.standalone.delete() + log.info('Testcase PASSED') def run_isolated(): diff --git a/dirsrvtests/tickets/ticket47988_test.py b/dirsrvtests/tickets/ticket47988_test.py index 2e5e6fd..2541260 100644 --- a/dirsrvtests/tickets/ticket47988_test.py +++ b/dirsrvtests/tickets/ticket47988_test.py @@ -8,11 +8,7 @@ import sys import time import ldap import logging -import socket -import time -import logging import pytest -import re import tarfile import stat import shutil @@ -21,8 +17,7 @@ from lib389 import DirSrv, Entry, tools from lib389.tools import DirSrvTools from lib389._constants import * from lib389.properties import * -from lib389._constants import * -from constants import * + logging.getLogger(__name__).setLevel(logging.DEBUG) log = logging.getLogger(__name__) @@ -48,7 +43,7 @@ BIND_PW = 'password' ENTRY_NAME = 'test_entry' ENTRY_DN = 'cn=%s, %s' % (ENTRY_NAME, SUFFIX) ENTRY_OC = "top person %s" % OC_NAME - + def _oc_definition(oid_ext, name, must=None, may=None): oid = "1.2.3.4.5.6.7.8.9.10.%d" % oid_ext desc = 'To test ticket 47490' @@ -57,14 +52,14 @@ def _oc_definition(oid_ext, name, must=None, may=None): must = MUST if not may: may = MAY - + new_oc = "( %s NAME '%s' DESC '%s' SUP %s AUXILIARY MUST %s MAY %s )" % (oid, name, desc, sup, must, may) return new_oc class TopologyMaster1Master2(object): def __init__(self, master1, master2): master1.open() self.master1 = master1 - + master2.open() self.master2 = master2 @@ -74,50 +69,29 @@ def topology(request): ''' This fixture is used to create a replicated topology for the 'module'. The replicated topology is MASTER1 <-> Master2. - At the beginning, It may exists a master2 instance and/or a master2 instance. - It may also exists a backup for the master1 and/or the master2. - - Principle: - If master1 instance exists: - restart it - If master2 instance exists: - restart it - If backup of master1 AND backup of master2 exists: - create or rebind to master1 - create or rebind to master2 - - restore master1 from backup - restore master2 from backup - else: - Cleanup everything - remove instances - remove backups - Create instances - Initialize replication - Create backups ''' global installation1_prefix global installation2_prefix #os.environ['USE_VALGRIND'] = '1' - + # allocate master1 on a given deployement - master1 = DirSrv(verbose=False) + master1 = DirSrv(verbose=False) if installation1_prefix: args_instance[SER_DEPLOYED_DIR] = installation1_prefix - + # Args for the master1 instance args_instance[SER_HOST] = HOST_MASTER_1 args_instance[SER_PORT] = PORT_MASTER_1 args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 args_master = args_instance.copy() master1.allocate(args_master) - + # allocate master1 on a given deployement master2 = DirSrv(verbose=False) if installation2_prefix: args_instance[SER_DEPLOYED_DIR] = installation2_prefix - + # Args for the consumer instance args_instance[SER_HOST] = HOST_MASTER_2 args_instance[SER_PORT] = PORT_MASTER_2 @@ -125,162 +99,104 @@ def topology(request): args_master = args_instance.copy() master2.allocate(args_master) - - # Get the status of the backups - backup_master1 = master1.checkBackupFS() - backup_master2 = master2.checkBackupFS() - # Get the status of the instance and restart it if it exists - instance_master1 = master1.exists() - if instance_master1: - master1.stop(timeout=10) - master1.start(timeout=10) - + instance_master1 = master1.exists() instance_master2 = master2.exists() + + # Remove all the instances + if instance_master1: + master1.delete() if instance_master2: - master2.stop(timeout=10) - master2.start(timeout=10) - - if backup_master1 and backup_master2: - # The backups exist, assuming they are correct - # we just re-init the instances with them - if not instance_master1: - master1.create() - # Used to retrieve configuration information (dbdir, confdir...) - master1.open() - - if not instance_master2: - master2.create() - # Used to retrieve configuration information (dbdir, confdir...) - master2.open() - - # restore master1 from backup - master1.stop(timeout=10) - master1.restoreFS(backup_master1) - master1.start(timeout=10) - - # restore master2 from backup - master2.stop(timeout=10) - master2.restoreFS(backup_master2) - master2.start(timeout=10) - else: - # We should be here only in two conditions - # - This is the first time a test involve master-consumer - # so we need to create everything - # - Something weird happened (instance/backup destroyed) - # so we discard everything and recreate all - - # Remove all the backups. So even if we have a specific backup file - # (e.g backup_master) we clear all backups that an instance my have created - if backup_master1: - master1.clearBackupFS() - if backup_master2: - master2.clearBackupFS() - - # Remove all the instances - if instance_master1: - master1.delete() - if instance_master2: - master2.delete() - - # Create the instances - master1.create() - master1.open() - master2.create() - master2.open() - - # - # Now prepare the Master-Consumer topology - # - # First Enable replication - master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) - master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) - - # Initialize the supplier->consumer - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) - - if not repl_agreement: - log.fatal("Fail to create a replica agreement") - sys.exit(1) - - log.debug("%s created" % repl_agreement) - - properties = {RA_NAME: r'meTo_$host:$port', - RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], - RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], - RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], - RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} - master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) - - master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) - master1.waitForReplInit(repl_agreement) - - # Check replication is working fine - master1.add_s(Entry((TEST_REPL_DN, { - 'objectclass': "top person".split(), - 'sn': 'test_repl', - 'cn': 'test_repl'}))) - loop = 0 - while loop <= 10: - try: - ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") - break - except ldap.NO_SUCH_OBJECT: - time.sleep(1) - loop += 1 - - # Time to create the backups - master1.stop(timeout=10) - master1.backupfile = master1.backupFS() - master1.start(timeout=10) - - master2.stop(timeout=10) - master2.backupfile = master2.backupFS() - master2.start(timeout=10) - - # + master2.delete() + + # Create the instances + master1.create() + master1.open() + master2.create() + master2.open() + + # + # Now prepare the Master-Consumer topology + # + # First Enable replication + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + # Initialize the supplier->consumer + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + repl_agreement = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + + if not repl_agreement: + log.fatal("Fail to create a replica agreement") + sys.exit(1) + + log.debug("%s created" % repl_agreement) + + properties = {RA_NAME: r'meTo_$host:$port', + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(repl_agreement) + + # Check replication is working fine + master1.add_s(Entry((TEST_REPL_DN, { + 'objectclass': "top person".split(), + 'sn': 'test_repl', + 'cn': 'test_repl'}))) + loop = 0 + ent = None + while loop <= 10: + try: + ent = master2.getEntry(TEST_REPL_DN, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + time.sleep(1) + loop += 1 + if ent is None: + assert False + # Here we have two instances master and consumer - # with replication working. Either coming from a backup recovery - # or from a fresh (re)init - # Time to return the topology return TopologyMaster1Master2(master1, master2) + def _header(topology, label): topology.master1.log.info("\n\n###############################################") topology.master1.log.info("#######") topology.master1.log.info("####### %s" % label) topology.master1.log.info("#######") topology.master1.log.info("###################################################") - + + def _install_schema(server, tarFile): server.stop(timeout=10) - - here = os.getcwd() - + tmpSchema = '/tmp/schema_47988' if not os.path.isdir(tmpSchema): os.mkdir(tmpSchema) - + for the_file in os.listdir(tmpSchema): file_path = os.path.join(tmpSchema, the_file) if os.path.isfile(file_path): os.unlink(file_path) - + os.chdir(tmpSchema) tar = tarfile.open(tarFile, 'r:gz') for member in tar.getmembers(): tar.extract(member.name) tar.close() - + st = os.stat(server.schemadir) - os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR ) + os.chmod(server.schemadir, st.st_mode | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRUSR) for the_file in os.listdir(tmpSchema): schemaFile = os.path.join(server.schemadir, the_file) if os.path.isfile(schemaFile): @@ -289,13 +205,13 @@ def _install_schema(server, tarFile): os.chmod(schemaFile, stat.S_IWUSR | stat.S_IRUSR) server.log.info("replace %s" % schemaFile) shutil.copy(the_file, schemaFile) - + else: server.log.info("add %s" % schemaFile) shutil.copy(the_file, schemaFile) os.chmod(schemaFile, stat.S_IRUSR | stat.S_IRGRP) os.chmod(server.schemadir, st.st_mode | stat.S_IRUSR | stat.S_IRGRP) - + def test_ticket47988_init(topology): """ @@ -303,20 +219,20 @@ def test_ticket47988_init(topology): - Objectclass with MAY 'member' - an entry ('bind_entry') with which we bind to test the 'SELFDN' operation It deletes the anonymous aci - + """ - + _header(topology, 'test_ticket47988_init') - + # enable acl error logging - mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL + mod = [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', str(8192))] # REPL topology.master1.modify_s(DN_CONFIG, mod) topology.master2.modify_s(DN_CONFIG, mod) - mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260))] # Internal op + mod = [(ldap.MOD_REPLACE, 'nsslapd-accesslog-level', str(260))] # Internal op topology.master1.modify_s(DN_CONFIG, mod) topology.master2.modify_s(DN_CONFIG, mod) - + # add dummy entries for cpt in range(MAX_OTHERS): name = "%s%d" % (OTHER_NAME, cpt) @@ -324,8 +240,8 @@ def test_ticket47988_init(topology): 'objectclass': "top person".split(), 'sn': name, 'cn': name}))) - - # check that entry 0 is replicated before + + # check that entry 0 is replicated before loop = 0 entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) while loop <= 10: @@ -336,10 +252,10 @@ def test_ticket47988_init(topology): time.sleep(1) loop += 1 assert (loop <= 10) - + topology.master1.stop(timeout=10) topology.master2.stop(timeout=10) - + #install the specific schema M1: ipa3.3, M2: ipa4.1 schema_file = os.path.join(topology.master1.getDir(__file__, DATA_DIR), "ticket47988/schema_ipa3.3.tar.gz") _install_schema(topology.master1, schema_file) @@ -349,17 +265,19 @@ def test_ticket47988_init(topology): topology.master1.start(timeout=10) topology.master2.start(timeout=10) + def _do_update_schema(server, range=3999): ''' Update the schema of the M2 (IPA4.1). to generate a nsSchemaCSN ''' - postfix = str(randint(range, range+1000)) + postfix = str(randint(range, range + 1000)) OID = '2.16.840.1.113730.3.8.12.%s' % postfix NAME = 'thierry%s' % postfix value = '( %s NAME \'%s\' DESC \'Override for Group Attributes\' STRUCTURAL MUST ( cn ) MAY sn X-ORIGIN ( \'IPA v4.1.2\' \'user defined\' ) )' % (OID, NAME) mod = [(ldap.MOD_ADD, 'objectclasses', value)] server.modify_s('cn=schema', mod) + def _do_update_entry(supplier=None, consumer=None, attempts=10): ''' This is doing an update on M2 (IPA4.1) and checks the update has been @@ -368,10 +286,10 @@ def _do_update_entry(supplier=None, consumer=None, attempts=10): assert(supplier) assert(consumer) entryDN = "cn=%s0,%s" % (OTHER_NAME, SUFFIX) - value = str(randint(100,200)) + value = str(randint(100, 200)) mod = [(ldap.MOD_REPLACE, 'telephonenumber', value)] supplier.modify_s(entryDN, mod) - + loop = 0 while loop <= attempts: ent = consumer.getEntry(entryDN, ldap.SCOPE_BASE, "(objectclass=*)", ['telephonenumber']) @@ -383,7 +301,8 @@ def _do_update_entry(supplier=None, consumer=None, attempts=10): loop += 1 supplier.log.debug("test_do_update: receive %s (expected %s)" % (read_val, value)) assert (loop <= attempts) - + + def _pause_M2_to_M1(topology): topology.master1.log.info("\n\n######################### Pause RA M2->M1 ######################\n") ents = topology.master2.agreement.list(suffix=SUFFIX) @@ -397,6 +316,7 @@ def _resume_M1_to_M2(topology): assert len(ents) == 1 topology.master1.agreement.resume(ents[0].dn) + def _pause_M1_to_M2(topology): topology.master1.log.info("\n\n######################### Pause RA M1->M2 ######################\n") ents = topology.master1.agreement.list(suffix=SUFFIX) @@ -409,31 +329,33 @@ def _resume_M2_to_M1(topology): ents = topology.master2.agreement.list(suffix=SUFFIX) assert len(ents) == 1 topology.master2.agreement.resume(ents[0].dn) - + + def test_ticket47988_1(topology): ''' Check that replication is working and pause replication M2->M1 ''' _header(topology, 'test_ticket47988_1') - + topology.master1.log.debug("\n\nCheck that replication is working and pause replication M2->M1\n") _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5) _pause_M2_to_M1(topology) - + + def test_ticket47988_2(topology): ''' Update M1 schema and trigger update M1->M2 So M1 should learn new/extended definitions that are in M2 schema ''' _header(topology, 'test_ticket47988_2') - + topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n") master1_schema_csn = topology.master1.schema.get_schema_csn() master2_schema_csn = topology.master2.schema.get_schema_csn() topology.master1.log.debug("\nBefore updating the schema on M1\n") topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) - + # Here M1 should no, should check M2 schema and learn _do_update_schema(topology.master1) master1_schema_csn = topology.master1.schema.get_schema_csn() @@ -442,11 +364,11 @@ def test_ticket47988_2(topology): topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) assert (master1_schema_csn) - + # to avoid linger effect where a replication session is reused without checking the schema _pause_M1_to_M2(topology) _resume_M1_to_M2(topology) - + #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify") #time.sleep(60) _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=15) @@ -458,23 +380,25 @@ def test_ticket47988_2(topology): assert (master1_schema_csn) assert (master2_schema_csn) + def test_ticket47988_3(topology): ''' Resume replication M2->M1 and check replication is still working ''' _header(topology, 'test_ticket47988_3') - + _resume_M2_to_M1(topology) _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5) _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5) + def test_ticket47988_4(topology): ''' Check schemaCSN is identical on both server And save the nsschemaCSN to later check they do not change unexpectedly ''' _header(topology, 'test_ticket47988_4') - + master1_schema_csn = topology.master1.schema.get_schema_csn() master2_schema_csn = topology.master2.schema.get_schema_csn() topology.master1.log.debug("\n\nMaster1 nsschemaCSN: %s" % master1_schema_csn) @@ -482,16 +406,17 @@ def test_ticket47988_4(topology): assert (master1_schema_csn) assert (master2_schema_csn) assert (master1_schema_csn == master2_schema_csn) - + topology.master1.saved_schema_csn = master1_schema_csn topology.master2.saved_schema_csn = master2_schema_csn - + + def test_ticket47988_5(topology): ''' Check schemaCSN do not change unexpectedly ''' _header(topology, 'test_ticket47988_5') - + _do_update_entry(supplier=topology.master1, consumer=topology.master2, attempts=5) _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=5) master1_schema_csn = topology.master1.schema.get_schema_csn() @@ -501,25 +426,26 @@ def test_ticket47988_5(topology): assert (master1_schema_csn) assert (master2_schema_csn) assert (master1_schema_csn == master2_schema_csn) - + assert (topology.master1.saved_schema_csn == master1_schema_csn) assert (topology.master2.saved_schema_csn == master2_schema_csn) - + + def test_ticket47988_6(topology): ''' Update M1 schema and trigger update M2->M1 So M2 should learn new/extended definitions that are in M1 schema ''' - + _header(topology, 'test_ticket47988_6') - + topology.master1.log.debug("\n\nUpdate M1 schema and an entry on M1\n") master1_schema_csn = topology.master1.schema.get_schema_csn() master2_schema_csn = topology.master2.schema.get_schema_csn() topology.master1.log.debug("\nBefore updating the schema on M1\n") topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) - + # Here M1 should no, should check M2 schema and learn _do_update_schema(topology.master1, range=5999) master1_schema_csn = topology.master1.schema.get_schema_csn() @@ -528,11 +454,11 @@ def test_ticket47988_6(topology): topology.master1.log.debug("Master1 nsschemaCSN: %s" % master1_schema_csn) topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) assert (master1_schema_csn) - + # to avoid linger effect where a replication session is reused without checking the schema _pause_M1_to_M2(topology) _resume_M1_to_M2(topology) - + #topo.master1.log.debug("\n\nSleep.... attach the debugger dse_modify") #time.sleep(60) _do_update_entry(supplier=topology.master2, consumer=topology.master1, attempts=15) @@ -543,15 +469,18 @@ def test_ticket47988_6(topology): topology.master1.log.debug("Master2 nsschemaCSN: %s" % master2_schema_csn) assert (master1_schema_csn) assert (master2_schema_csn) - + + def test_ticket47988_final(topology): - topology.master1.delete() - topology.master2.delete() + topology.master1.delete() + topology.master2.delete() + log.info('Testcase PASSED') + def run_isolated(): ''' run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..) - To run isolated without py.test, you need to + To run isolated without py.test, you need to - edit this file and comment '@pytest.fixture' line before 'topology' function. - set the installation prefix - run this program @@ -560,7 +489,7 @@ def run_isolated(): global installation2_prefix installation1_prefix = None installation2_prefix = None - + topo = topology(True) test_ticket47988_init(topo) test_ticket47988_1(topo)