From 81c6e66514ab743d55a7ee4adb5a566f4d796fe0 Mon Sep 17 00:00:00 2001 From: Thierry Bordaz Date: Dec 22 2016 17:46:17 +0000 Subject: Ticket 49073: nsDS5ReplicatedAttributeListTotal fails when excluding no attribute Bug Description: When nsDS5ReplicatedAttributeListTotal defines an empty list of excluded attribute this is to send all entry attributes during the total initialization. When evaluating the attributes to send (total init), if the attribute list is empty, the replica agreement assumes that it was not defined and fallback to nsDS5ReplicatedAttributeList value. Fix Description: When nsDS5ReplicatedAttributeListTotal is defined, sets a flag to considere its value even if its value is empty https://bugzilla.redhat.com/show_bug.cgi?id=1405257 Reviewed by: Mark Reynolds (Thanks Mark !) Platforms tested: F23 Flag Day: no Doc impact: no --- diff --git a/dirsrvtests/tests/tickets/ticket49073_test.py b/dirsrvtests/tests/tickets/ticket49073_test.py new file mode 100644 index 0000000..0c594a9 --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49073_test.py @@ -0,0 +1,258 @@ +import os +import sys +import time +import ldap +import logging +import pytest +from lib389 import DirSrv, Entry, tools, tasks +from lib389.tools import DirSrvTools +from lib389._constants import * +from lib389.properties import * +from lib389.tasks import * +from lib389.utils import * + +DEBUGGING = False +GROUP_DN = ("cn=group," + DEFAULT_SUFFIX) + +if DEBUGGING: + logging.getLogger(__name__).setLevel(logging.DEBUG) +else: + logging.getLogger(__name__).setLevel(logging.INFO) +log = logging.getLogger(__name__) + + +class TopologyReplication(object): + def __init__(self, master1, master2): + master1.open() + self.master1 = master1 + master2.open() + self.master2 = master2 + + +@pytest.fixture(scope="module") +def topology(request): + """Create Replication Deployment""" + + # Creating master 1... + if DEBUGGING: + master1 = DirSrv(verbose=True) + else: + master1 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_1 + args_instance[SER_PORT] = PORT_MASTER_1 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master1.allocate(args_master) + instance_master1 = master1.exists() + if instance_master1: + master1.delete() + master1.create() + master1.open() + master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1) + + # Creating master 2... + if DEBUGGING: + master2 = DirSrv(verbose=True) + else: + master2 = DirSrv(verbose=False) + args_instance[SER_HOST] = HOST_MASTER_2 + args_instance[SER_PORT] = PORT_MASTER_2 + args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2 + args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX + args_master = args_instance.copy() + master2.allocate(args_master) + instance_master2 = master2.exists() + if instance_master2: + master2.delete() + master2.create() + master2.open() + master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2) + + def fin(): + """If we are debugging just stop the instances, + otherwise remove them + """ + + if DEBUGGING: + master1.stop() + master2.stop() + else: + #master1.delete() + #master2.delete() + pass + + request.addfinalizer(fin) + + # Create all the agreements + + # Creating agreement from master 1 to master 2 + properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties) + if not m1_m2_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m1_m2_agmt) + + # Creating agreement from master 2 to master 1 + properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port), + RA_BINDDN: defaultProperties[REPLICATION_BIND_DN], + RA_BINDPW: defaultProperties[REPLICATION_BIND_PW], + RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD], + RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]} + m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties) + if not m2_m1_agmt: + log.fatal("Fail to create a master -> master replica agreement") + sys.exit(1) + log.debug("%s created" % m2_m1_agmt) + + # Allow the replicas to get situated with the new agreements... + time.sleep(5) + + # Initialize all the agreements + master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + master1.waitForReplInit(m1_m2_agmt) + + # Check replication is working... + if master1.testReplication(DEFAULT_SUFFIX, master2): + log.info('Replication is working.') + else: + log.fatal('Replication is not working.') + assert False + + # Clear out the tmp dir + master1.clearTmpDir(__file__) + + return TopologyReplication(master1, master2) + +def _add_group_with_members(topology): + # Create group + try: + topology.master1.add_s(Entry((GROUP_DN, + {'objectclass': 'top groupofnames'.split(), + 'cn': 'group'}))) + except ldap.LDAPError as e: + log.fatal('Failed to add group: error ' + e.message['desc']) + assert False + + # Add members to the group - set timeout + log.info('Adding members to the group...') + for idx in range(1, 5): + try: + MEMBER_VAL = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology.master1.modify_s(GROUP_DN, + [(ldap.MOD_ADD, + 'member', + MEMBER_VAL)]) + except ldap.LDAPError as e: + log.fatal('Failed to update group: member (%s) - error: %s' % + (MEMBER_VAL, e.message['desc'])) + assert False + +def _check_memberof(master, presence_flag): + # Check that members have memberof attribute on M1 + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + ent = master.getEntry(USER_DN, ldap.SCOPE_BASE, "(objectclass=*)") + if presence_flag: + assert ent.hasAttr('memberof') and ent.getValue('memberof') == GROUP_DN + else: + assert not ent.hasAttr('memberof') + except ldap.LDAPError as e: + log.fatal('Failed to retrieve user (%s): error %s' % (USER_DN, e.message['desc'])) + assert False + +def _check_entry_exist(master, dn): + attempt = 0 + while attempt <= 10: + try: + dn + ent = master.getEntry(dn, ldap.SCOPE_BASE, "(objectclass=*)") + break + except ldap.NO_SUCH_OBJECT: + attempt = attempt + 1 + time.sleep(1) + except ldap.LDAPError as e: + log.fatal('Failed to retrieve user (%s): error %s' % (dn, e.message['desc'])) + assert False + assert attempt != 10 + +def test_ticket49073(topology): + """Write your replication test here. + + To access each DirSrv instance use: topology.master1, topology.master2, + ..., topology.hub1, ..., topology.consumer1,... + + Also, if you need any testcase initialization, + please, write additional fixture for that(include finalizer). + """ + topology.master1.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.master1.restart(timeout=10) + topology.master2.plugins.enable(name=PLUGIN_MEMBER_OF) + topology.master2.restart(timeout=10) + + # Configure fractional to prevent total init to send memberof + ents = topology.master1.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + log.info('update %s to add nsDS5ReplicatedAttributeListTotal' % ents[0].dn) + topology.master1.modify_s(ents[0].dn, + [(ldap.MOD_REPLACE, + 'nsDS5ReplicatedAttributeListTotal', + '(objectclass=*) $ EXCLUDE '), + (ldap.MOD_REPLACE, + 'nsDS5ReplicatedAttributeList', + '(objectclass=*) $ EXCLUDE memberOf')]) + topology.master1.restart(timeout=10) + + # + # create some users and a group + # + log.info('create users and group...') + for idx in range(1, 5): + try: + USER_DN = ("uid=member%d,%s" % (idx, DEFAULT_SUFFIX)) + topology.master1.add_s(Entry((USER_DN, + {'objectclass': 'top extensibleObject'.split(), + 'uid': 'member%d' % (idx)}))) + except ldap.LDAPError as e: + log.fatal('Failed to add user (%s): error %s' % (USER_DN, e.message['desc'])) + assert False + + _check_entry_exist(topology.master2, "uid=member4,%s" % (DEFAULT_SUFFIX)) + _add_group_with_members(topology) + _check_entry_exist(topology.master2, GROUP_DN) + + # Check that for regular update memberof was on both side (because plugin is enabled both) + time.sleep(5) + _check_memberof(topology.master1, True) + _check_memberof(topology.master2, True) + + + # reinit with fractional definition + ents = topology.master1.agreement.list(suffix=SUFFIX) + assert len(ents) == 1 + topology.master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2) + topology.master1.waitForReplInit(ents[0].dn) + + # Check that for total update memberof was on both side + # because memberof is NOT excluded from total init + time.sleep(5) + _check_memberof(topology.master1, True) + _check_memberof(topology.master2, True) + + if DEBUGGING: + # Add debugging steps(if any)... + pass + + +if __name__ == '__main__': + # Run isolated + # -s for DEBUG mode + CURRENT_FILE = os.path.realpath(__file__) + pytest.main("-s %s" % CURRENT_FILE) + diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c index 067bf39..6aee261 100644 --- a/ldap/servers/plugins/replication/repl5_agmt.c +++ b/ldap/servers/plugins/replication/repl5_agmt.c @@ -78,6 +78,7 @@ typedef struct repl5agmt { Slapi_DN *replarea; /* DN of replicated area */ char **frac_attrs; /* list of fractional attributes to be replicated */ char **frac_attrs_total; /* list of fractional attributes to be replicated for total update protocol */ + PRBool frac_attr_total_defined; /* TRUE if frac_attrs_total is defined */ Schedule *schedule; /* Scheduling information */ int auto_initialize; /* 1 = automatically re-initialize replica */ const Slapi_DN *dn; /* DN of replication agreement entry */ @@ -621,6 +622,7 @@ agmt_delete(void **rap) slapi_ch_free_string(&ra->binddn); slapi_ch_array_free(ra->frac_attrs); slapi_ch_array_free(ra->frac_attrs_total); + ra->frac_attr_total_defined = PR_FALSE; if (NULL != ra->creds) { @@ -1044,8 +1046,7 @@ agmt_get_fractional_attrs_total(const Repl_Agmt *ra) { char ** return_value = NULL; PR_ASSERT(NULL != ra); - if (NULL == ra->frac_attrs_total) - { + if (!ra->frac_attr_total_defined) { return agmt_get_fractional_attrs(ra); } PR_Lock(ra->lock); @@ -1074,7 +1075,7 @@ int agmt_is_fractional_attr_total(const Repl_Agmt *ra, const char *attrname) { int return_value; PR_ASSERT(NULL != ra); - if (NULL == ra->frac_attrs_total) + if (!ra->frac_attr_total_defined) { return agmt_is_fractional_attr(ra, attrname); } @@ -1611,6 +1612,7 @@ agmt_set_replicated_attributes_total_from_entry(Repl_Agmt *ra, const Slapi_Entry { slapi_ch_array_free(ra->frac_attrs_total); ra->frac_attrs_total = NULL; + ra->frac_attr_total_defined = PR_FALSE; } if (NULL != sattr) { @@ -1620,6 +1622,9 @@ agmt_set_replicated_attributes_total_from_entry(Repl_Agmt *ra, const Slapi_Entry { const char *val = slapi_value_get_string(sval); return_value = agmt_parse_excluded_attrs_config_attr(val,&(ra->frac_attrs_total)); + if (return_value == 0) { + ra->frac_attr_total_defined = PR_TRUE; + } } } PR_Unlock(ra->lock); @@ -1675,6 +1680,7 @@ agmt_set_replicated_attributes_total_from_attr(Repl_Agmt *ra, Slapi_Attr *sattr) { slapi_ch_array_free(ra->frac_attrs_total); ra->frac_attrs_total = NULL; + ra->frac_attr_total_defined = PR_FALSE; } if (NULL != sattr) { @@ -1684,6 +1690,9 @@ agmt_set_replicated_attributes_total_from_attr(Repl_Agmt *ra, Slapi_Attr *sattr) { const char *val = slapi_value_get_string(sval); return_value = agmt_parse_excluded_attrs_config_attr(val,&(ra->frac_attrs_total)); + if (return_value == 0) { + ra->frac_attr_total_defined = PR_TRUE; + } } } PR_Unlock(ra->lock); @@ -1706,9 +1715,9 @@ agmt_validate_replicated_attributes(Repl_Agmt *ra, int total) char **frac_attrs = NULL; /* If checking for total update, use the total attr list - * if it exists. If oen is not set, use the incremental + * if it exists. If total attr list is not set, use the incremental * attr list. */ - if (total && ra->frac_attrs_total) + if (total && ra->frac_attr_total_defined) { frac_attrs = ra->frac_attrs_total; }