From 7a0be4e41dc34a0659215b00dc13859ce28db154 Mon Sep 17 00:00:00 2001 From: Ludwig Krispenz Date: Feb 12 2020 08:03:29 +0000 Subject: Ticket - 49623-cont cenotaph errors on modrdn operations Bug: In modrdn operations a cenotaph entries are created to track the time when an entry had existed. But in cases where rentries were renamed in cycles reusing the dns again and again this failed with an error: "faild to add cenotaph" Fix: Previous versions of cenotaphs with the same dn are not used (or maybe in very unlikely scenarios) so there is no need to change the dn construction to be able to keep all versions of the same cenotaph. Instead, if the creation of the cenotaph fails because it already exists, the existin cenotaph is moodified with the lifespan data of the cenotaph that was tried to add. Reviewed by: Thierry, thanks --- diff --git a/dirsrvtests/tests/tickets/ticket49623_2_test.py b/dirsrvtests/tests/tickets/ticket49623_2_test.py new file mode 100644 index 0000000..1d3167d --- /dev/null +++ b/dirsrvtests/tests/tickets/ticket49623_2_test.py @@ -0,0 +1,66 @@ +# --- BEGIN COPYRIGHT BLOCK --- +# Copyright (C) 2020 Red Hat, Inc. +# All rights reserved. +# +# License: GPL (version 3 or any later version). +# See LICENSE for details. +# --- END COPYRIGHT BLOCK --- +# +import os +import ldap +import pytest +import subprocess +from lib389.tasks import * +from lib389.utils import * +from lib389.topologies import topology_m1 +from lib389.idm.user import UserAccounts +from lib389._constants import DEFAULT_SUFFIX +from contextlib import contextmanager + +pytestmark = pytest.mark.tier1 + +logging.getLogger(__name__).setLevel(logging.DEBUG) +log = logging.getLogger(__name__) + + +@pytest.mark.ds49623 +@pytest.mark.bz1790986 +def test_modrdn_loop(topology_m1): + """Test that renaming the same entry multiple times reusing the same + RDN multiple times does not result in cenotaph error messages + + :id: 631b2be9-5c03-44c7-9853-a87c923d5b30 + + :setup: Single master instance + + :steps: 1. Add an entry with RDN start rdn + 2. Rename the entry to rdn change + 3. Rename the entry to start again + 4. Rename the entry to rdn change + 5. check for cenotaph error messages + :expectedresults: + 1. No error messages + """ + + topo = topology_m1.ms['master1'] + TEST_ENTRY_RDN_START = 'start' + TEST_ENTRY_RDN_CHANGE = 'change' + TEST_ENTRY_NAME = 'tuser' + users = UserAccounts(topo, DEFAULT_SUFFIX) + user_properties = { + 'uid': TEST_ENTRY_RDN_START, + 'cn': TEST_ENTRY_NAME, + 'sn': TEST_ENTRY_NAME, + 'uidNumber': '1001', + 'gidNumber': '2001', + 'homeDirectory': '/home/{}'.format(TEST_ENTRY_NAME) + } + + tuser = users.create(properties=user_properties) + tuser.rename('uid={}'.format(TEST_ENTRY_RDN_CHANGE), newsuperior=None, deloldrdn=True) + tuser.rename('uid={}'.format(TEST_ENTRY_RDN_START), newsuperior=None, deloldrdn=True) + tuser.rename('uid={}'.format(TEST_ENTRY_RDN_CHANGE), newsuperior=None, deloldrdn=True) + + log.info("Check the log messages for cenotaph error") + error_msg = ".*urp_fixup_add_cenotaph - failed to add cenotaph, err= 68" + assert not topo.ds_error_log.match(error_msg) diff --git a/ldap/servers/plugins/replication/urp.c b/ldap/servers/plugins/replication/urp.c index d0a486d..79a817c 100644 --- a/ldap/servers/plugins/replication/urp.c +++ b/ldap/servers/plugins/replication/urp.c @@ -852,7 +852,7 @@ urp_post_delete_operation(Slapi_PBlock *pb) } static int -urp_fixup_add_cenotaph (Slapi_PBlock *pb, char *sessionid, CSN *opcsn) +urp_fixup_add_cenotaph(Slapi_PBlock *pb, char *sessionid, CSN *opcsn) { Slapi_PBlock *add_pb; Slapi_Entry *cenotaph = NULL; @@ -890,7 +890,7 @@ urp_fixup_add_cenotaph (Slapi_PBlock *pb, char *sessionid, CSN *opcsn) /* slapi_sdn_free(&pre_sdn); */ cenotaph = slapi_entry_alloc(); - slapi_entry_init(cenotaph, newdn, NULL); + slapi_entry_init(cenotaph, slapi_ch_strdup(newdn), NULL); dncsn = (CSN *)entry_get_dncsn (pre_entry); slapi_entry_add_string(cenotaph, SLAPI_ATTR_OBJECTCLASS, "extensibleobject"); @@ -912,12 +912,46 @@ urp_fixup_add_cenotaph (Slapi_PBlock *pb, char *sessionid, CSN *opcsn) OP_FLAG_REPL_FIXUP|OP_FLAG_NOOP|OP_FLAG_CENOTAPH_ENTRY|SLAPI_OP_FLAG_BYPASS_REFERRALS); slapi_add_internal_pb(add_pb); slapi_pblock_get(add_pb, SLAPI_PLUGIN_INTOP_RESULT, &ret); + slapi_pblock_destroy(add_pb); + + if (ret == LDAP_ALREADY_EXISTS) { + /* the cenotaph already exists, probably because of a loop + * in renaming entries. Update it with new csns + */ + slapi_log_err(SLAPI_LOG_REPL, sessionid, + "urp_fixup_add_cenotaph - cenotaph (%s) already exists, updating\n", newdn); + Slapi_PBlock *mod_pb = slapi_pblock_new(); + Slapi_Mods smods; + Slapi_DN *sdn = slapi_sdn_new_dn_byval(newdn); + slapi_mods_init(&smods, 4); + slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "cenotaphfrom", csn_as_string(dncsn, PR_FALSE, csnstr)); + slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "cenotaphto", csn_as_string(opcsn, PR_FALSE, csnstr)); + slapi_mods_add_string(&smods, LDAP_MOD_REPLACE, "nstombstonecsn", csn_as_string(opcsn, PR_FALSE, csnstr)); + + slapi_modify_internal_set_pb_ext( + mod_pb, + sdn, + slapi_mods_get_ldapmods_byref(&smods), + NULL, /* Controls */ + NULL, + repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION), + OP_FLAG_REPL_FIXUP|OP_FLAG_NOOP|OP_FLAG_CENOTAPH_ENTRY|SLAPI_OP_FLAG_BYPASS_REFERRALS); + + slapi_modify_internal_pb(mod_pb); + slapi_pblock_get(mod_pb, SLAPI_PLUGIN_INTOP_RESULT, &ret); + if (ret != LDAP_SUCCESS) { + slapi_log_err(SLAPI_LOG_ERR, sessionid, + "urp_fixup_add_cenotaph - failed to modify cenotaph, err= %d\n", ret); + } + slapi_mods_done(&smods); + slapi_sdn_free(&sdn); + slapi_pblock_destroy(mod_pb); - if (ret != LDAP_SUCCESS) { + } else if (ret != LDAP_SUCCESS) { slapi_log_err(SLAPI_LOG_ERR, sessionid, "urp_fixup_add_cenotaph - failed to add cenotaph, err= %d\n", ret); } - slapi_pblock_destroy(add_pb); + slapi_ch_free_string(&newdn); return ret; }