#50944 Issue 50937 - Update CLI/UI for new backend split configuration
Closed a year ago by spichugi. Opened 2 years ago by mreynolds.
mreynolds/389-ds-base issue50937  into  master

@@ -7,7 +7,6 @@ 

  # See LICENSE for details.

  # --- END COPYRIGHT BLOCK ---

  

- import os

  import ldap

  import ldap.dn

  from ldap import filter as ldap_filter
@@ -15,7 +14,7 @@ 

  import json

  from functools import partial

  from lib389._entry import Entry

- from lib389._constants import DIRSRV_STATE_ONLINE, SER_ROOT_DN, SER_ROOT_PW

+ from lib389._constants import DIRSRV_STATE_ONLINE

  from lib389.utils import (

          ensure_bytes, ensure_str, ensure_int, ensure_list_bytes, ensure_list_str,

          ensure_list_int, display_log_value, display_log_data
@@ -245,7 +244,7 @@ 

              raise ValueError("Invalid state. Cannot get presence on instance that is not ONLINE")

          self._log.debug("%s present(%r) %s" % (self._dn, attr, value))

  

-         e = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],

+         self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter, attrlist=[attr, ],

                                          serverctrls=self._server_controls, clientctrls=self._client_controls,

                                          escapehatch='i am sure')[0]

          values = self.get_attr_vals_bytes(attr)
@@ -589,6 +588,26 @@ 

              # This could have unforseen consequences ...

              return attrs_dict

  

+     def get_all_attrs_utf8(self, use_json=False):

+         """Get a dictionary having all the attributes of the entry

+ 

+         :returns: Dict with real attributes and operational attributes

+         """

+ 

+         self._log.debug("%s get_all_attrs" % (self._dn))

+         if self._instance.state != DIRSRV_STATE_ONLINE:

+             raise ValueError("Invalid state. Cannot get properties on instance that is not ONLINE")

+         else:

+             # retrieving real(*) and operational attributes(+)

+             attrs_entry = self._instance.search_ext_s(self._dn, ldap.SCOPE_BASE, self._object_filter,

+                                                       attrlist=["*", "+"], serverctrls=self._server_controls,

+                                                       clientctrls=self._client_controls, escapehatch='i am sure')[0]

+             # getting dict from 'entry' object

+             r = {}

+             for (k, vo) in attrs_entry.data.items():

+                 r[k] = ensure_list_str(vo)

+             return r

+ 

      def get_attrs_vals(self, keys, use_json=False):

          self._log.debug("%s get_attrs_vals(%r)" % (self._dn, keys))

          if self._instance.state != DIRSRV_STATE_ONLINE:

file modified
+98 -13
@@ -11,7 +11,7 @@ 

  import ldap

  from lib389._constants import *

  from lib389.properties import *

- from lib389.utils import normalizeDN, ensure_str, ensure_bytes

+ from lib389.utils import normalizeDN, ensure_str, ensure_bytes,  assert_c

  from lib389 import Entry

  

  # Need to fix this ....
@@ -26,7 +26,7 @@ 

  # We need to be a factor to the backend monitor

  from lib389.monitor import MonitorBackend

  from lib389.index import Index, Indexes, VLVSearches, VLVSearch

- from lib389.tasks import ImportTask, ExportTask, CleanAllRUVTask, Tasks

+ from lib389.tasks import ImportTask, ExportTask, Tasks

  from lib389.encrypted_attributes import EncryptedAttr, EncryptedAttrs

  

  
@@ -341,11 +341,11 @@ 

  

      def getProperties(self, suffix=None, backend_dn=None, bename=None,

                        properties=None):

-         raise NotImplemented

+         raise NotImplementedError

  

      def setProperties(self, suffix=None, backend_dn=None, bename=None,

                        properties=None):

-         raise NotImplemented

+         raise NotImplementedError

  

      def toSuffix(self, entry=None, name=None):

          '''
@@ -933,9 +933,12 @@ 

  

  

  class DatabaseConfig(DSLdapObject):

-     """Chaining Default Config settings DSLdapObject with:

-     - must attributes = ['cn']

-     - RDN attribute is 'cn'

+     """Backend Database configuration

+ 

+     The entire database configuration consists of the  main global configuration entry,

+     and the underlying DB library configuration: whither BDB or LMDB.  The combined

+     configuration should be presented as a single entity so the end user does not need

+     to worry about what library is being used, and just focus on the configuration.

  

      :param instance: An instance

      :type instance: lib389.DirSrv
@@ -943,14 +946,96 @@ 

      :type dn: str

      """

  

-     _must_attributes = ['cn']

- 

-     def __init__(self, instance, dn=None):

+     def __init__(self, instance, dn="cn=config,cn=ldbm database,cn=plugins,cn=config"):

          super(DatabaseConfig, self).__init__(instance, dn)

          self._rdn_attribute = 'cn'

          self._must_attributes = ['cn']

+         self._global_attrs = [

+             'nsslapd-lookthroughlimit',

+             'nsslapd-mode',

+             'nsslapd-idlistscanlimit',

+             'nsslapd-directory',

+             'nsslapd-import-cachesize',

+             'nsslapd-idl-switch',

+             'nsslapd-search-bypass-filter-test',

+             'nsslapd-search-use-vlv-index',

+             'nsslapd-exclude-from-export',

+             'nsslapd-serial-lock',

+             'nsslapd-subtree-rename-switch',

+             'nsslapd-pagedlookthroughlimit',

+             'nsslapd-pagedidlistscanlimit',

+             'nsslapd-rangelookthroughlimit',

+             'nsslapd-backend-opt-level',

+             'nsslapd-backend-implement',

+         ]

+         self._db_attrs = {

+             'bdb':

+                 [

+                     'nsslapd-dbcachesize',

+                     'nsslapd-db-logdirectory',

+                     'nsslapd-db-home-directory',

+                     'nsslapd-db-durable-transaction',

+                     'nsslapd-db-transaction-wait',

+                     'nsslapd-db-checkpoint-interval',

+                     'nsslapd-db-compactdb-interval',

+                     'nsslapd-db-transaction-batch-val',

+                     'nsslapd-db-transaction-batch-min-wait',

+                     'nsslapd-db-transaction-batch-max-wait',

+                     'nsslapd-db-logbuf-size',

+                     'nsslapd-db-locks',

+                     'nsslapd-db-private-import-mem',

+                     'nsslapd-import-cache-autosize',

+                     'nsslapd-cache-autosize',

+                     'nsslapd-cache-autosize-split',

+                     'nsslapd-import-cachesize',

+                     'nsslapd-search-bypass-filter-test',

+                     'nsslapd-serial-lock',

+                     'nsslapd-db-deadlock-policy',

+                 ],

+             'lmdb': []

+         }

          self._create_objectclasses = ['top', 'extensibleObject']

          self._protected = True

-         # Have to set cn=bdb, but when we can choose between bdb and lmdb we'll

-         # have some hoops to jump through.

-         self._dn = "cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config"

+         # This could be "bdb" or "lmdb", use what we have configured in the global config

+         self._db_lib = self.get_attr_val_utf8_l('nsslapd-backend-implement')

+         self._dn = "cn=config,cn=ldbm database,cn=plugins,cn=config"

+         self._db_dn = f"cn={self._db_lib},cn=config,cn=ldbm database,cn=plugins,cn=config"

+         self._globalObj = DSLdapObject(self._instance, dn=self._dn)

+         self._dbObj = DSLdapObject(self._instance, dn=self._db_dn)

+         # Assert there is no overlap in different config sets

+         assert_c(len(set(self._global_attrs).intersection(set(self._db_attrs['bdb']), set(self._db_attrs['lmdb']))) == 0)

+ 

+     def get(self):

+         """Get the combined config entries"""

+         # Get and combine both sets of attributes

+         global_attrs = self._globalObj.get_attrs_vals_utf8(self._global_attrs)

+         db_attrs = self._dbObj.get_attrs_vals_utf8(self._db_attrs[self._db_lib])

+         combined_attrs = {**global_attrs, **db_attrs}

+         return combined_attrs

+ 

+     def display(self):

+         """Display the combined configuration"""

+         global_attrs = self._globalObj.get_attrs_vals_utf8(self._global_attrs)

+         db_attrs = self._dbObj.get_attrs_vals_utf8(self._db_attrs[self._db_lib])

+         combined_attrs = {**global_attrs, **db_attrs}

+         for (k, vo) in combined_attrs.items():

+             if len(vo) == 0:

+                 vo = ""

+             else:

+                 vo = vo[0]

+             self._instance.log.info(f'{k}: {vo}')

+ 

+     def set(self, value_pairs):

+         for attr, val in value_pairs:

+             attr = attr.lower()

+             if attr in self._global_attrs:

+                 global_config = DSLdapObject(self._instance, dn=self._dn)

+                 global_config.replace(attr, val)

+             elif attr in self._db_attrs['bdb']:

+                 db_config = DSLdapObject(self._instance, dn=self._db_dn)

+                 db_config.replace(attr, val)

+             elif attr in self._db_attrs['lmdb']:

+                 pass

+             else:

+                 # Unknown attribute

+                 raise ValueError("Can not update database configuration with unknown attribute: " + attr)

@@ -138,13 +138,13 @@ 

  

      be_list.sort()

      if args.json:

-         print(json.dumps({"type": "list", "items": be_list}, indent=4))

+         log.info(json.dumps({"type": "list", "items": be_list}, indent=4))

      else:

          if len(be_list) > 0:

              for be in be_list:

-                 print(be)

+                 log.info(be)

          else:

-             print("No backends")

+             log.info("No backends")

  

  

  def backend_get(inst, basedn, log, args):
@@ -200,7 +200,7 @@ 

              # Unsupported rdn

              raise ValueError("Suffix RDN is not supported for creating suffix object.  Only 'dc', 'o', 'ou', and 'cn' are supported.")

  

-     print("The database was sucessfully created")

+     log.info("The database was sucessfully created")

  

  

  def _recursively_del_backends(be):
@@ -227,7 +227,7 @@ 

      _recursively_del_backends(be)

      be.delete()

  

-     print("The database, and any sub-suffixes, were sucessfully deleted")

+     log.info("The database, and any sub-suffixes, were sucessfully deleted")

  

  

  def backend_import(inst, basedn, log, args):
@@ -244,7 +244,7 @@ 

      result = task.get_exit_code()

  

      if task.is_complete() and result == 0:

-         print("The import task has finished successfully")

+         log.info("The import task has finished successfully")

      else:

          raise ValueError("Import task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log())))

  
@@ -272,7 +272,7 @@ 

      result = task.get_exit_code()

  

      if task.is_complete() and result == 0:

-         print("The export task has finished successfully")

+         log.info("The export task has finished successfully")

      else:

          raise ValueError("Export task failed\n-------------------------\n{}".format(ensure_str(task.get_task_log())))

  
@@ -329,15 +329,15 @@ 

      if len(subsuffixes) > 0:

          subsuffixes.sort()

          if args.json:

-             print(json.dumps({"type": "list", "items": subsuffixes}, indent=4))

+             log.info(json.dumps({"type": "list", "items": subsuffixes}, indent=4))

          else:

              for sub in subsuffixes:

-                 print(sub)

+                 log.info(sub)

      else:

          if args.json:

-             print(json.dumps({"type": "list", "items": []}, indent=4))

+             log.info(json.dumps({"type": "list", "items": []}, indent=4))

          else:

-             print("No sub-suffixes under this backend")

+             log.info("No sub-suffixes under this backend")

  

  

  def build_node(suffix, be_name, subsuf=False, link=False, replicated=False):
@@ -476,15 +476,15 @@ 

          be.enable()

      if args.disable:

          be.disable()

-     print("The backend configuration was sucessfully updated")

+     log.info("The backend configuration was successfully updated")

  

  

  def db_config_get(inst, basedn, log, args):

      db_cfg = DatabaseConfig(inst)

      if args.json:

-         print(db_cfg.get_all_attrs_json())

+         log.info(json.dumps({"type": "entry", "attrs": db_cfg.get()}, indent=4))

      else:

-         print(db_cfg.display())

+         db_cfg.display()

  

  

  def db_config_set(inst, basedn, log, args):
@@ -498,17 +498,18 @@ 

              # We don't support deleting attributes or setting empty values in db

              continue

          else:

-             replace_list.append((attr, value))

+             replace_list.append([attr, value])

      if len(replace_list) > 0:

-         db_cfg.replace_many(*replace_list)

+         db_cfg.set(replace_list)

      elif not did_something:

          raise ValueError("There are no changes to set in the database configuration")

  

-     print("Successfully updated database configuration")

+     log.info("Successfully updated database configuration")

+ 

  

  def _format_status(log, mtype, json=False):

      if json:

-         print(mtype.get_status_json())

+         log.info(mtype.get_status_json())

      else:

          status_dict = mtype.get_status()

          log.info('dn: ' + mtype._dn)
@@ -517,6 +518,7 @@ 

              for vi in v:

                  log.info('{}: {}'.format(k, vi))

  

+ 

  def get_monitor(inst, basedn, log, args):

      if args.suffix is not None:

          # Get a suffix/backend monitor entry
@@ -535,7 +537,7 @@ 

  def backend_add_index(inst, basedn, log, args):

      be = _get_backend(inst, args.be_name)

      be.add_index(args.attr, args.index_type, args.matching_rule, reindex=args.reindex)

-     print("Successfully added index")

+     log.info("Successfully added index")

  

  

  def backend_set_index(inst, basedn, log, args):
@@ -562,7 +564,7 @@ 

  

      if args.reindex:

          be.reindex(attrs=[args.attr])

-     print("Index successfully updated")

+     log.info("Index successfully updated")

  

  

  def backend_get_index(inst, basedn, log, args):
@@ -576,9 +578,9 @@ 

                  # Append decoded json object, because we are going to dump it later

                  results.append(json.loads(entry))

              else:

-                 print(index.display())

+                 log.info(index.display())

      if args.json:

-         print(json.dumps({"type": "list", "items": results}, indent=4))

+         log.info(json.dumps({"type": "list", "items": results}, indent=4))

  

  

  def backend_list_index(inst, basedn, log, args):
@@ -593,25 +595,25 @@ 

                  results.append(json.loads(index.get_all_attrs_json()))

          else:

              if args.just_names:

-                 print(index.get_attr_val_utf8_l('cn'))

+                 log.info(index.get_attr_val_utf8_l('cn'))

              else:

-                 print(index.display())

+                 log.info(index.display())

  

      if args.json:

-         print(json.dumps({"type": "list", "items": results}, indent=4))

+         log.info(json.dumps({"type": "list", "items": results}, indent=4))

  

  

  def backend_del_index(inst, basedn, log, args):

      be = _get_backend(inst, args.be_name)

      for attr in args.attr:

          be.del_index(attr)

-         print("Successfully deleted index \"{}\"".format(attr))

+         log.info("Successfully deleted index \"{}\"".format(attr))

  

  

  def backend_reindex(inst, basedn, log, args):

      be = _get_backend(inst, args.be_name)

      be.reindex(attrs=args.attr, wait=args.wait)

-     print("Successfully reindexed database")

+     log.info("Successfully reindexed database")

  

  

  def backend_attr_encrypt(inst, basedn, log, args):
@@ -622,16 +624,16 @@ 

          for attr in args.add_attr:

              be.add_encrypted_attr(attr)

          if len(args.add_attr) > 1:

-             print("Successfully added encrypted attributes")

+             log.info("Successfully added encrypted attributes")

          else:

-             print("Successfully added encrypted attribute")

+             log.info("Successfully added encrypted attribute")

      if args.del_attr is not None:

          for attr in args.del_attr:

              be.del_encrypted_attr(attr)

          if len(args.del_attr) > 1:

-             print("Successfully deleted encrypted attributes")

+             log.info("Successfully deleted encrypted attributes")

          else:

-             print("Successfully deleted encrypted attribute")

+             log.info("Successfully deleted encrypted attribute")

      if args.list:

          results = be.get_encrypted_attrs(args.just_names)

          if args.json:
@@ -641,17 +643,17 @@ 

              else:

                  for result in results:

                      json_results.append(json.loads(result.get_all_attrs_json()))

-             print(json.dumps({"type": "list", "items": json_results}, indent=4))

+             log.info(json.dumps({"type": "list", "items": json_results}, indent=4))

  

          else:

              if len(results) == 0:

-                 print("There are no encrypted attributes for this backend")

+                 log.info("There are no encrypted attributes for this backend")

              else:

                  for attr in results:

                      if args.just_names:

-                         print(attr)

+                         log.info(attr)

                      else:

-                         print(attr.display())

+                         log.info(attr.display())

  

  

  def backend_list_vlv(inst, basedn, log, args):
@@ -675,24 +677,24 @@ 

                  results.append(entry)

          else:

              if args.just_names:

-                 print(vlv.get_attr_val_utf8_l('cn'))

+                 log.info(vlv.get_attr_val_utf8_l('cn'))

              else:

                  raw_entry = vlv.get_attrs_vals(VLV_SEARCH_ATTRS)

-                 print('dn: ' + vlv.dn)

+                 log.info('dn: ' + vlv.dn)

                  for k, v in list(raw_entry.items()):

-                     print('{}: {}'.format(ensure_str(k), ensure_str(v[0])))

+                     log.info('{}: {}'.format(ensure_str(k), ensure_str(v[0])))

                  indexes = vlv.get_sorts()

                  sorts = []

-                 print("Sorts:")

+                 log.info("Sorts:")

                  for idx in indexes:

                      entry = idx.get_attrs_vals(VLV_INDEX_ATTRS)

-                     print(' - dn: ' + idx.dn)

+                     log.info(' - dn: ' + idx.dn)

                      for k, v in list(entry.items()):

-                         print(' - {}: {}'.format(ensure_str(k), ensure_str(v[0])))

-                     print()

+                         log.info(' - {}: {}'.format(ensure_str(k), ensure_str(v[0])))

+                     log.info()

  

      if args.json:

-         print(json.dumps({"type": "list", "items": results}, indent=4))

+         log.info(json.dumps({"type": "list", "items": results}, indent=4))

  

  

  def backend_get_vlv(inst, basedn, log, args):
@@ -707,9 +709,9 @@ 

                  results.append(json.loads(entry))

              else:

                  raw_entry = vlv.get_attrs_vals(VLV_SEARCH_ATTRS)

-                 print('dn: ' + vlv._dn)

+                 log.info('dn: ' + vlv._dn)

                  for k, v in list(raw_entry.items()):

-                     print('{}: {}'.format(ensure_str(k), ensure_str(v[0])))

+                     log.info('{}: {}'.format(ensure_str(k), ensure_str(v[0])))

              # Print indexes

              indexes = vlv.get_sorts()

              for idx in indexes:
@@ -718,14 +720,14 @@ 

                      results.append(json.loads(entry))

                  else:

                      raw_entry = idx.get_attrs_vals(VLV_INDEX_ATTRS)

-                     print('Sorts:')

-                     print(' - dn: ' + idx._dn)

+                     log.info('Sorts:')

+                     log.info(' - dn: ' + idx._dn)

                      for k, v in list(raw_entry.items()):

-                         print(' - {}: {}'.format(ensure_str(k), ensure_str(v[0])))

-                     print()

+                         log.info(' - {}: {}'.format(ensure_str(k), ensure_str(v[0])))

+                     log.info()

  

              if args.json:

-                 print(json.dumps({"type": "list", "items": results}, indent=4))

+                 log.info(json.dumps({"type": "list", "items": results}, indent=4))

  

  

  def backend_create_vlv(inst, basedn, log, args):
@@ -735,7 +737,7 @@ 

               'vlvscope': args.search_scope,

               'vlvfilter': args.search_filter}

      be.add_vlv_search(args.name, props)

-     print("Successfully created new VLV Search entry, now you can add indexes to it.")

+     log.info("Successfully created new VLV Search entry, now you can add indexes to it.")

  

  

  def backend_edit_vlv(inst, basedn, log, args):
@@ -757,14 +759,14 @@ 

          raise ValueError("There are no changes to set in the VLV search entry")

      if args.reindex:

          vlv_search.reindex()

-     print("Successfully updated VLV search entry")

+     log.info("Successfully updated VLV search entry")

  

  

  def backend_del_vlv(inst, basedn, log, args):

      be = _get_backend(inst, args.be_name)

      vlv_search = be.get_vlv_searches(vlv_name=args.name)

      vlv_search.delete_all()

-     print("Successfully deleted VLV search and its indexes")

+     log.info("Successfully deleted VLV search and its indexes")

  

  

  def backend_create_vlv_index(inst, basedn, log, args):
@@ -773,14 +775,14 @@ 

      vlv_search.add_sort(args.index_name, args.sort)

      if args.index_it:

          vlv_search.reindex(args.be_name, vlv_index=args.index_name)

-     print("Successfully created new VLV index entry")

+     log.info("Successfully created new VLV index entry")

  

  

  def backend_delete_vlv_index(inst, basedn, log, args):

      be = _get_backend(inst, args.be_name)

      vlv_search = be.get_vlv_searches(vlv_name=args.parent_name)

      vlv_search.delete_sort(args.index_name, args.sort)

-     print("Successfully deleted VLV index entry")

+     log.info("Successfully deleted VLV index entry")

  

  

  def backend_reindex_vlv(inst, basedn, log, args):
@@ -788,7 +790,7 @@ 

      suffix = be.get_suffix()

      vlv_search = be.get_vlv_searches(vlv_name=args.parent_name)

      vlv_search.reindex(suffix, vlv_index=args.index_name)

-     print("Successfully reindexed VLV indexes")

+     log.info("Successfully reindexed VLV indexes")

  

  

  def create_parser(subparsers):

Description:

In preparation for the move to LMDB the global database configuration has been split into twoi (or more) entries under cn=config. This patch changes how the gets/sets work to make both of these entries appear as one configuration unit. This is done by dynamically setting the backend configuration entry dn with what is set in nsslapd-backend-implement.

Also cleaned up python warnings and replaced some prints with log.info()

relates: https://pagure.io/389-ds-base/issue/50937

The code LGTM but may be @firstyear would like to take a look at DatabaseConfig(DSLdapObject) implementation. Maybe I missed something in its idea.

One small nitpick though... You named the Commit after the Issue but there is no UI work in the PR.
It is confusing, IMHO...

The code LGTM but may be @firstyear would like to take a look at DatabaseConfig(DSLdapObject) implementation. Maybe I missed something in its idea.

Okay so the issue is that prior to 1.4.2 we had one config entry for the global database:

cn=config,cn=ldbm database,cn=plugins,cn=config

But in 1.4.2 we split this single entry into two entries:

  • cn=config,cn=ldbm database,cn=plugins,cn=config
  • cn=bdb,cn=config,cn=ldbm database,cn=plugins,cn=config

and eventually we'll have:

  • cn=lmdb,,cn=config,cn=ldbm database,cn=plugins,cn=config

This fix basically merges these two entries into a single manageable object. This also fixes the UI, which is currently broken. Currently the UI only allows you to manage the cn=bdb entry, and not the global entry. So this is now fixed in this patch by just merging the config.

Then, to the user this is still one config object you need to manage, and the user does not need to worry about if it's BDB or LMDB, or whatever the underlying db library is. It's just one "entry" that handles the global database configuration for the server. Otherwise we need to add new CLI options for each database type, in an already crowded CLI option list. So I decided it would be easier for users, and cleaner for developers to just merge the pairing. Hope that helps clear up what this patch is trying to do.

If we already have get() method, can we also add display() method to DatabaseConfig object? I think it'll be better to have the tool in the API and not implementing the for loop here...

Looks l like I can clean up the lib389 part a bit and make it cleaner, so a new commit is coming...

If we already have get() method, can we also add display() method to DatabaseConfig object? I think it'll be better to have the tool in the API and not implementing the for loop here...

Yeah sounds good, I'll get that added!

rebased onto 58ce18e0f49ab6ef2a4c696fdd74c08cac53a8ab

2 years ago

changes made please review....

Probably worth checking that the server "enforces" that cn=bdb is the only valid cn on a bdb config.

It could be worth a safety check to ensure that set(_global_attrs) and set(_db_attrs) has no overlap, else this function may be ambiguous about what we are changing.

I think generally I'm happy with this. I think the "combined" set and get seems confusing, but also seems like "the right thing" here because a bdb only exists with the backend config so I think it's the right answer here. It could cause some confusion internally though with get/set vs get_attr etc.

I was expecting this list follows bdb_config_param but it does not. For example nsslapd-dbncache, nsslapd-db-page-size... are not in that list. Is it expected ?
If it should follow bdb_config_param, could you order the list like in bdb_config_param struct.

Same as below, should it match ldbm_config ?

I was expecting this list follows bdb_config_param but it does not. For example nsslapd-dbncache, nsslapd-db-page-size... are not in that list. Is it expected ?
If it should follow bdb_config_param, could you order the list like in bdb_config_param struct.

Well right now the CLI does not have "set" options for every single BDB setting. Instead we limited the settings to the most commonly used ones. So this list in sync with what the CLI can do. I could add more attributes here but it wouldn't really do anything since there is no way to set attributes like "nsslapd-maxpassbeforemerge" via dsconf. If we want to add every single attribute then we should do so in a different ticket.

Thanks @mreynolds for the explanation. You are correct it is not useful to support all corner cases db setting in a CLI. You have my ACK

Probably worth checking that the server "enforces" that cn=bdb is the only valid cn on a bdb config.

It is

It could be worth a safety check to ensure that set(_global_attrs) and set(_db_attrs) has no overlap, else this function may be ambiguous about what we are changing.

Sorry I'm not sure what you asking me to do. Do you want me to add a check that the attribute being updated it not in two lists? That doesn't seem necessary as the lists are maintained in the object itself. Please clarify, thanks!

It could be worth a safety check to ensure that set(_global_attrs) and set(_db_attrs) has no overlap, else this function may be ambiguous about what we are changing.

Sorry I'm not sure what you asking me to do. Do you want me to add a check that the attribute being updated it not in two lists? That doesn't seem necessary as the lists are maintained in the object itself. Please clarify, thanks!

Ahhh, yes. So maybe something like:

def __init__(self, ...): 
    assert_c(len(set(self._global_attrs).intersection(set(self._db_attrs)) == 0)

Probably just once in the init to make sure as programmers we got this right, so that def set and def get is sure that it's attributes are only from "one source".

rebased onto 535e737e3d06ab9e6857f693b31009476303594d

2 years ago

rebased onto a5e0fef

2 years ago

It could be worth a safety check to ensure that set(_global_attrs) and set(_db_attrs) has no overlap, else this function may be ambiguous about what we are changing.
Sorry I'm not sure what you asking me to do. Do you want me to add a check that the attribute being updated it not in two lists? That doesn't seem necessary as the lists are maintained in the object itself. Please clarify, thanks!

Ahhh, yes. So maybe something like:
def init(self, ...):
assert_c(len(set(self._global_attrs).intersection(set(self._db_attrs)) == 0)

Done, merging...

Pull-Request has been merged by mreynolds

2 years ago

Thanks mate, I'm glad you saw the invisible Ack that I forgot to put in (OPPS!!!). Ack for certain :)

Thanks mate, I'm glad you saw the invisible Ack that I forgot to put in (OPPS!!!). Ack for certain :)

Since you did not have any other concerns I assumed it was a ACK :-p Plus I wanted to get upstream builds done, but looks like master branch is currently broken :-( BUILD_NUM is not getting set on F33, investigating...

BUILD_NUM is not getting set on F33, investigating...

Can't locate FileHandle.pm in @INC (you may need to install the FileHandle module) (@INC contains: /usr/local/lib64/perl5/5.30 /usr/local/share/perl5/5.30 /usr/lib64/perl5/vendor_perl /usr/share/perl5/vendor_perl /usr/lib64/perl5 /usr/share/perl5) at ./buildnum.pl line 22.

Looks like a missing perl dependency.

389-ds-base is moving from Pagure to Github. This means that new issues and pull requests
will be accepted only in 389-ds-base's github repository.

This pull request has been cloned to Github as issue and is available here:
- https://github.com/389ds/389-ds-base/issues/3997

If you want to continue to work on the PR, please navigate to the github issue,
download the patch from the attachments and file a new pull request.

Thank you for understanding. We apologize for all inconvenience.

Pull-Request has been closed by spichugi

a year ago