#50494 Add three new CI test case for different issues.
Closed 3 years ago by spichugi. Opened 4 years ago by aadhikari.
aadhikari/389-ds-base bz-automation  into  master

@@ -464,6 +464,38 @@ 

          topo_tls_ldapi.resume_all_replicas()

  

  

+ def test_dsreplcheck_with_password_file(topo_tls_ldapi, tmpdir):

+     """Check ds-replcheck works if password file is provided

+     with -y option.

+ 

+     :id: 0d847ec7-6eaf-4cb5-a9c6-e4a5a1778f93

+     :setup: Two master replication

+     :steps:

+         1. Create a password file with the default password of the server.

+         2. Run ds-replcheck with -y option (used to pass password file)

+     :expectedresults:

+         1. It should be successful

+         2. It should be successful

+     """

+     m1 = topo_tls_ldapi.ms["master1"]

+     m2 = topo_tls_ldapi.ms["master2"]

+ 

+     ds_replcheck_path = os.path.join(m1.ds_paths.bin_dir, 'ds-replcheck')

+     f = tmpdir.mkdir("my_dir").join("password_file.txt")

+     f.write(PW_DM)

+ 

+     if ds_is_newer("1.4.1.2"):

+         tool_cmd = [ds_replcheck_path, 'online', '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-y', f.strpath,

+                     '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport),

+                     '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)]

+     else:

+         tool_cmd = [ds_replcheck_path, '-b', DEFAULT_SUFFIX, '-D', DN_DM, '-y', f.strpath,

+                     '-m', 'ldaps://{}:{}'.format(m1.host, m1.sslport),

+                     '-r', 'ldaps://{}:{}'.format(m2.host, m2.sslport)]

+ 

+     subprocess.Popen(tool_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding='utf-8')

+ 

+ 

  if __name__ == '__main__':

      # Run isolated

      # -s for DEBUG mode

@@ -12,6 +12,12 @@ 

  from lib389.topologies import topology_st as topo

  from lib389.dbgen import dbgen

  from lib389._constants import DEFAULT_SUFFIX

+ from lib389.tasks import *

+ from lib389.idm.user import UserAccounts

+ import threading

+ import time

+ 

+ from lib389.idm.directorymanager import DirectoryManager

  

  pytestmark = pytest.mark.tier1

  
@@ -30,6 +36,88 @@ 

  TEST_DEFAULT_NAME = "default"

  

  

+ class AddDelUsers(threading.Thread):

+     def __init__(self, inst):

+         threading.Thread.__init__(self)

+         self.daemon = True

+         self.inst = inst

+         self._should_stop = False

+         self._ran = False

+ 

+     def run(self):

+         # Add 1000 entries

+         log.info('Run.')

+         conn = DirectoryManager(self.inst.standalone).bind()

+ 

+         time.sleep(30)

+         log.info('Adding users.')

+         for i in range(1000):

+             user = UserAccounts(conn, DEFAULT_SUFFIX)

+             users = user.create_test_user(uid=i)

+             users.delete()

+             self._ran = True

+             if self._should_stop:

+                 break

+         if not self._should_stop:

+             raise RuntimeError('We finished too soon.')

+         conn.close()

+ 

+     def stop(self):

+         self._should_stop = True

+ 

+     def has_started(self):

+         return self._ran

+ 

+ 

+ def test_replay_import_operation(topo):

+     """ Check after certain failed import operation, is it

+      possible to replay an import operation

+ 

+     :id: 5f5ca532-8e18-4f7b-86bc-ac585215a473

+     :feature: Import

+     :setup: Standalone instance

+     :steps:

+         1. Export the backend into an ldif file

+         2. Perform high load of operation on the server (Add/Del users)

+         3. Perform an import operation

+         4. Again perform an import operation (same as 3)

+     :expectedresults:

+         1. It should be successful

+         2. It should be successful

+         3. It should be unsuccessful, should give OPERATIONS_ERROR

+         4. It should be successful now

+     """

+     log.info("Exporting LDIF online...")

+     ldif_dir = topo.standalone.get_ldif_dir()

+     export_ldif = ldif_dir + '/export.ldif'

+ 

+     r = ExportTask(topo.standalone)

+     r.export_suffix_to_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX)

+     r.wait()

+     add_del_users1 = AddDelUsers(topo)

+     add_del_users1.start()

+ 

+     log.info("Importing LDIF online, should raise operation error.")

+ 

+     trials = 0

+     while not add_del_users1.has_started() and trials < 10:

+         trials += 1

+         time.sleep(1)

+         r = ImportTask(topo.standalone)

+         try:

+             r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX)

+         except ldap.OPERATIONS_ERROR:

+             break

+         log.info(f'Looping. Tried {trials} times so far.')

+     add_del_users1.stop()

+     add_del_users1.join()

+ 

+     log.info("Importing LDIF online")

+ 

+     r = ImportTask(topo.standalone)

+     r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX)

+ 

+ 

  def test_import_be_default(topo):

      """ Create a backend using the name "default". previously this name was

      used int
@@ -129,7 +217,7 @@ 

      log.info('Adding suffix:{} and backend: {}'.format(TEST_SUFFIX2, TEST_BACKEND2))

      backends = Backends(topo.standalone)

      backend = backends.create(properties={'nsslapd-suffix': TEST_SUFFIX2,

-                                            'name': TEST_BACKEND2})

+                                           'name': TEST_BACKEND2})

  

      log.info('Create LDIF file and import it')

      ldif_dir = topo.standalone.get_ldif_dir()

@@ -12,14 +12,14 @@ 

  from lib389.idm.user import TEST_USER_PROPERTIES, UserAccounts

  from lib389.pwpolicy import PwPolicyManager

  from lib389.utils import *

- from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db

+ from lib389.topologies import topology_m2 as topo_m2, TopologyMain, topology_m3 as topo_m3, create_topology, _remove_ssca_db, topology_i2 as topo_i2

  from lib389._constants import *

  from lib389.idm.organizationalunit import OrganizationalUnits

  from lib389.idm.user import UserAccount

  from lib389.idm.group import Groups, Group

  from lib389.idm.domain import Domain

  from lib389.idm.directorymanager import DirectoryManager

- from lib389.replica import Replicas, ReplicationManager, Changelog5

+ from lib389.replica import Replicas, ReplicationManager, Changelog5, BootstrapReplicationManager

  from lib389.agreement import Agreements

  from lib389 import pid_from_file

  
@@ -181,6 +181,57 @@ 

      pwp.create_subtree_policy(parent, policy_props)

  

  

+ def test_special_symbol_replica_agreement(topo_i2):

+     """ Check if agreement starts with "cn=->..." then

+     after upgrade does it get removed.

+     

+     :id: 68aa0072-4dd4-4e33-b107-cb383a439125

+     :setup: two standalone instance

+     :steps:

+         1. Create and Enable Replication on standalone2 and role as consumer

+         2. Create and Enable Replication on standalone1 and role as master

+         3. Create a Replication agreement starts with "cn=->..."

+         4. Perform an upgrade operation over the master

+         5. Check if the agreement is still present or not.

+     :expectedresults:

+         1. It should be successful

+         2. It should be successful

+         3. It should be successful

+         4. It should be successful

+         5. It should be successful

+     """

+ 

+     master = topo_i2.ins["standalone1"]

+     consumer = topo_i2.ins["standalone2"]

+     consumer.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=ReplicaRole.CONSUMER, replicaId=CONSUMER_REPLICAID)

+     repl = ReplicationManager(DEFAULT_SUFFIX)

+     repl.create_first_master(master)

+ 

+     properties = {RA_NAME: '-\\3meTo_{}:{}'.format(consumer.host,

+                                                    str(consumer.port)),

+                   RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],

+                   RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],

+                   RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],

+                   RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}

+ 

+     master.agreement.create(suffix=SUFFIX,

+                             host=consumer.host,

+                             port=consumer.port,

+                             properties=properties)

+ 

+     master.agreement.init(SUFFIX, consumer.host, consumer.port)

+ 

+     replica_server = Replicas(master).get(DEFAULT_SUFFIX)

+ 

+     master.upgrade('online')

+ 

+     agmt = replica_server.get_agreements().list()[0]

+ 

+     assert agmt.get_attr_val_utf8('cn') == '-\\3meTo_{}:{}'.format(consumer.host,

+                                                                    str(consumer.port))

+ 

+ 

+ 

  def test_double_delete(topo_m2, create_entry):

      """Check that double delete of the entry doesn't crash server

  

Description: Added new test cases for the following issue:

https://pagure.io/389-ds-base/issue/50028
https://pagure.io/389-ds-base/issue/49946
https://pagure.io/389-ds-base/issue/50117

Individual commit messages are there to differentiate.

Should I also remove the file at the end of the code, or this is fine.

Really wanna avoid this, the problem is the load operation on one kicks off with a bit of delay so sometimes it won't raise an error. Can give a condition to check whether the operation has started or not, not sure will that help.

Should I also remove the file at the end of the code, or this is fine.

Yes, but the best would be to use a pytest facility: https://docs.pytest.org/en/latest/tmpdir.html

same goes for this, there is a slight delay even after waiting for the operation to finish.

this is added as an extra precaution that the original replication doesn't mess with the other one, though the code works without this.

Really wanna avoid this, the problem is the load operation on one kicks off with a bit of delay so sometimes it won't raise an error. Can give a condition to check whether the operation has started or not, not sure will that help.

Just quickly looking, currently I see two options:
- start adding few users in the core test function, then init the import task, then start the thread function. This gives you the initial load.
- use async def with single yield in between two fors mangling the users, and in the test function do like async for ... with body running the import task.

rebased onto 5ef38f6a86c759806e87a8e5951115a1ec82e59e

4 years ago

@mhonek Hey I got rid of the second sleep by using asyncio.wait_for, but for the other one as per your suggestion, I tried using initial load (by adding users) but that didn't help. It's still failing with didn't raise ldap.OPERATIONS_ERROR.

@mhonek BTW, I was wondering do you still want me to use async def ? The issue with the other sleep is taken care of, so do we still need that? Also if yes then, please tell me how I could do that, because as far as I see it does the same thing as multithreading. Please feel free to guide me here :) I may missing out something, Thanks!

First, since you rebased I cannot tell what are the changes you made as I don't have the previous version.

Second, the asyncio as currently used in the code is incorrect (which is what a runtime warning tells you too; the thing hasn't been awaited). There's a lot more you'd need to change to correctly use it. Better stay with threading. The first sleep is kinda ok. I'd do something like this (although not entirely correct, it works):

diff --git a/dirsrvtests/tests/suites/import/regression_test.py b/dirsrvtests/tests/suites/import/regression_test.py
index dda9f0d25..bb94d4f3a 100644
--- a/dirsrvtests/tests/suites/import/regression_test.py
+++ b/dirsrvtests/tests/suites/import/regression_test.py
@@ -42,6 +42,7 @@ class AddDelUsers(threading.Thread):
         threading.Thread.__init__(self)
         self.daemon = True
         self.inst = inst
+        self._should_stop = False

     def run(self):
         # Add 1000 entries
@@ -52,8 +53,15 @@ class AddDelUsers(threading.Thread):
             user = UserAccounts(conn, DEFAULT_SUFFIX)
             users = user.create_test_user(uid=i)
             users.delete()
+            if self._should_stop:
+                break
+        if not self._should_stop:
+            raise RuntimeError('We finished too soon.')
         conn.close()

+    def stop(self):
+        self._should_stop = True
+

 def test_import_be_default(topo):
     """ Create a backend using the name "default". previously this name was
@@ -207,8 +215,9 @@ def test_replay_import_operation(topo):
     r = ImportTask(topo.standalone)
     with pytest.raises(ldap.OPERATIONS_ERROR):
         r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX)
+    add_del_users1.stop()
     add_del_users1.join()
-    asyncio.wait_for(lambda: add_del_users1._is_stopped, timeout=20)
+
     log.info("Importing LDIF online")
     r = ImportTask(topo.standalone)
     r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX)

Third, the string representation of the DN in test_special_symbol_replica_agreement is incorrect. It causes any subsequent removal to fail. This helps:

diff --git a/dirsrvtests/tests/suites/replication/regression_test.py b/dirsrvtests/tests/suites/replication/regression_test.py
index fd091ecb7..e6acddffd 100644
--- a/dirsrvtests/tests/suites/replication/regression_test.py
+++ b/dirsrvtests/tests/suites/replication/regression_test.py
@@ -726,7 +726,7 @@ def test_special_symbol_replica_agreement(topo_m2):
         topo_m2.ms["master2"].modify_s(DN, [(ldap.MOD_REPLACE,
                                                  'nsDS5ReplicaBindDN', ensure_bytes(defaultProperties[REPLICATION_BIND_DN]))])

-        properties = {RA_NAME: '->meTo_{}:{}'.format(topo_m2.ms["master2"].host,
+        properties = {RA_NAME: '-\\3EmeTo_{}:{}'.format(topo_m2.ms["master2"].host,
                                                    str(topo_m2.ms["master2"].port)),
                       RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
                       RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
@@ -747,7 +747,7 @@ def test_special_symbol_replica_agreement(topo_m2):
         topo_m2.ms["master2"].restart()
         agmt = replica_server.get_agreements().list()[0]

-        assert agmt.get_attr_val_utf8('cn') == '->meTo_{}:{}'.format(topo_m2.ms["master2"].host,
+        assert agmt.get_attr_val_utf8('cn') == '-\\3EmeTo_{}:{}'.format(topo_m2.ms["master2"].host,
                                                                 str(topo_m2.ms["master2"].port))
         agmt.delete()

... but, anyway, this fails to runUpgrade for me; not sure what's going on...

@mhonek oops! sorry to not mention this before but runUpgrade is broken and I fixed it in another PR: https://pagure.io/389-ds-base/pull-request/50287#_3__52 Also thank you so much for the fix, BTW should I not rebase things then push it?

rebased onto 4255c6ac2094de0ad0b519c47c78e7fb39b1b74e

4 years ago

@mhonek hey, I have updated the code with the changes suggested by you, also I have mentioned about the cause of failing runUpgrade in the above comment. Thanks for all the help!

Once we have the ACK! will rebase it.

rebased onto 9e7f9835602e5f71eb1bcc9ca3b94f999990eead

4 years ago

rebased onto b78ccf1ef8bb9cf4a3de17900ece67135ef75f7c

4 years ago

Sorry it took so long... I've rebased to current master and encountered some failures:

suites/import/regression_test.py::test_replay_import_operation FAILED

Run by itself the test passes (most of the time). However, when run after some other test it usually fails for me; it is highly unstable, e.g. sometimes the import passes when it should not but the add/delete user fails with Operation Error, etc. We should think of a different implementation but sadly I have currently no good options...

suites/replication/regression_test.py::test_special_symbol_replica_agreement FAILED

Again, run by itself the test passes. However, it fails when run in a queue of other tests. Also, after consulting with Simon we agreed the test should not reuse any existing topology (topo_m2 in this case) but rather a topology should create the required topology from scratch and then properly discard it -- please, try this approach.

Sorry it took so long... I've rebased to current master and encountered some failures:
suites/import/regression_test.py::test_replay_import_operation FAILED
Run by itself the test passes (most of the time). However, when run after some other test it usually fails for me; it is highly unstable, e.g. sometimes the import passes when it should not but the add/delete user fails with Operation Error, etc. We should think of a different implementation but sadly I have currently no good options...

No problem, take your time.

suites/replication/regression_test.py::test_special_symbol_replica_agreement FAILED
Again, run by itself the test passes. However, it fails when run in a queue of other tests. Also, after consulting with Simon we agreed the test should not reuse any existing topology (topo_m2 in this case) but rather a topology should create the required topology from scratch and then properly discard it -- please, try this approach.

I will start working on this, so are you guys are suggesting to create a new topology like topo_m2 but it should create 2 separate DS instances?

In https://pagure.io/389-ds-base/pull-request/50494#comment-96314 @mhonek report the test is unstable.
The test looks very good but being dynamic there is a chance it succeeds on a buggy version.
To trigger the bug you need to apply load during import task. The load looks good because ADD/DEL are usually long lasting operation.

IMHO the load could be improved, using several AddDelUsers thread so that there are more likely a pending operation when the import starts.
Also it will always exist a small windows for the import to be scheduled at a time there is no pending operation, would it be possible to iterate the preparation phase (load+import) until operation_error. If it never hits operation_error, the test could report no_result.
Finally if an import starts, new add/del will rejected until import completes, the AddDelUser loop could abort without error when add/del fail that means the load was not fast enough to trigger import failure.

  • increasing load, I think moving from 1 thread to 2-3 threads should be good
  • Both items (load and import) are in competition. If one wins the other fails. To verify the bug, a first import should fail and the second should succeeds
    the test should iterate until a first import fails. The failure means the import was started while ADD/DEL were running.
    the bug verification requires this initial failure. So after add_del_users1.start() loops until r.import_suffix_from_ldif returns OPERATION_ERROR.

Then when the import failed it is ready to test the fix.
You may start a second import and it should succeed.

I think it is enough to verify the bug.
I suggested to run load during the second import but it is not necessary to verify the bug.

increasing load, I think moving from 1 thread to 2-3 threads should be good
Both items (load and import) are in competition. If one wins the other fails. To verify the bug, a first import should fail and the second should succeeds
the test should iterate until a first import fails. The failure means the import was started while ADD/DEL were running.
the bug verification requires this initial failure. So after add_del_users1.start() loops until r.import_suffix_from_ldif returns OPERATION_ERROR.

Then when the import failed it is ready to test the fix.
You may start a second import and it should succeed.
I think it is enough to verify the bug.
I suggested to run load during the second import but it is not necessary to verify the bug.

Thanks for your suggestions, I will make the changes :)

rebased onto 8d63d9ddfec969826da3fd34afc51115c0697659

4 years ago

@mhonek I got why this test was failing when we ran it with all the other test cases, I still need to investigate but it was because of other test cases. If I run the test case first then it passed all the time. Also, I have improved the test case by not using hardcoded sleep rather than a conditional one. I have inclueded a new param which I am using as a flag.

I am not python expert but I am unsure there is a loop on the above import
It should not stop add_del_users1 until the above import fails. The failure of the first import is a condition to test if the second import succeeds

@tbordaz I know this is different from the approach that you have mentioned. For example: I am not using any add-on load by increasing the number of threads. Also, I am using iteration but the approach is a bit different. What is happening is in the second thread when I create a connect for the LDAP it takes a bit of time but in this time the other thread is running the import operation which is not an ideal case or time to even start with test scenarios, So I included a check part in the form of a Flag, the flag is only set once the user creation process starts. Also if it is still False it will wait for a specific time to be True and to avoid infinite loop condition. Let know what you think about this!

@aadhikari sorry I misunderstood your question. Yes I noticed this iteration to give load to start before attempting the import. I find it very nice. It could be difficult to synchronize several threads and as I am lazy I often rely on sleep but your solution is much more elegant.

My understanding is the new solution with the flag waits for the first user addition request to start. This is OK but we still may easily get into race condition when the import would just succeed or the user-adder would raise OperationError instead.

I like what Thierry proposed before, the thing with the loop -- that is, just let the code try harder. I.e. we should loop the actual code that does the import+(OperationsError assertion) until we get the expected result (with a reasonable counter timeout, of course). WDYT?

3 new commits added

  • Issue 50117 - Add a new CI test case
  • Issue 49946 - Add a new CI test case
  • Issue 50028 - Add a new CI test case
4 years ago

@mhonek As per yours and @tbordaz review, I have added the check part inside the loop where it which checks for the OperationsError for the import operation again and again till it does not throws an exception and wait for it till it is timeout. Let me know are you ok with this or not? Also, I am double checking it with the flag so the operation will only start when the load is in action but by any chance, we miss that, then that part will be handled in the try an exception block.

Note: I've been testing rebased on 7cb0a1f.

Ad Import test

LGTM. I'd just suggest some minor updates below (rename flag to _ran, and timeout to trials, for better transparency; change the actual max. trials' count to 10 -- if that wouldn't be enough something needs to be done anyway (360*5 seconds is too much); also, the last line missing LF character). Let me know what you think.

commit fc34d78bcc00f5fd259d0a3dfbbd4ec68f7a3879 (HEAD -> testing-50494)
Author: Matus Honek <mhonek@redhat.com>
Date:   Thu Jan 9 08:57:01 2020 +0000

    import/regression_test.py::test_replay_import_operation suggestions

diff --git a/dirsrvtests/tests/suites/import/regression_test.py b/dirsrvtests/tests/suites/import/regression_test.py
index ba0dc120e..9fb35c4b3 100644
--- a/dirsrvtests/tests/suites/import/regression_test.py
+++ b/dirsrvtests/tests/suites/import/regression_test.py
@@ -42,17 +42,20 @@ class AddDelUsers(threading.Thread):
         self.daemon = True
         self.inst = inst
         self._should_stop = False
-        self.flag = False
+        self._ran = False

     def run(self):
         # Add 1000 entries
+        log.info('Run.')
         conn = DirectoryManager(self.inst.standalone).bind()

+        time.sleep(30)
+        log.info('Adding users.')
         for i in range(1000):
             user = UserAccounts(conn, DEFAULT_SUFFIX)
             users = user.create_test_user(uid=i)
             users.delete()
-            self.flag = True
+            self._ran = True
             if self._should_stop:
                 break
         if not self._should_stop:
@@ -62,8 +65,8 @@ class AddDelUsers(threading.Thread):
     def stop(self):
         self._should_stop = True

-    def get_flag(self):
-        return self.flag
+    def has_started(self):
+        return self._ran


 def test_replay_import_operation(topo):
@@ -96,15 +99,16 @@ def test_replay_import_operation(topo):

     log.info("Importing LDIF online, should raise operation error.")

-    timeout = 0
-    while not add_del_users1.get_flag() and timeout < 360:
-        timeout += 1
-        time.sleep(5)
+    trials = 0
+    while not add_del_users1.has_started() and trials < 10:
+        trials += 1
+        time.sleep(1)
         r = ImportTask(topo.standalone)
         try:
             r.import_suffix_from_ldif(ldiffile=export_ldif, suffix=DEFAULT_SUFFIX)
         except ldap.OPERATIONS_ERROR:
             break
+        log.info(f'Looping. Tried {trials} times so far.')
     add_del_users1.stop()
     add_del_users1.join()

@@ -298,4 +302,4 @@ if __name__ == '__main__':
     # Run isolated
     # -s for DEBUG mode
     CURRENT_FILE = os.path.realpath(__file__)
-    pytest.main("-s {}".format(CURRENT_FILE))
\ No newline at end of file
+    pytest.main("-s {}".format(CURRENT_FILE))

Ad test_special_symbol_replica_agreement

We still need to address this as discussed before -- don't use topo_m2 but rather create separate instances and instantiate agreement as required, then discard.

rebased onto e078814383ad30a35c7c4b32af04b1a8b1344b82

4 years ago

@mhonek I have made all the changes and fixed test_special_symbol_replica_agreement, please review. Also, I understood why it was failing when run with the whole test file. The issue is with one of the servers: Info Could not open a connection to the server at <host_name> port 39003 as 'cn=Directory Manager'. This could be seen in the logs!

LGTM, please rebase and fixup commits into one. Thanks!

rebased onto 0362fa3

4 years ago

Pull-Request has been merged by vashirov

4 years ago

389-ds-base is moving from Pagure to Github. This means that new issues and pull requests
will be accepted only in 389-ds-base's github repository.

This pull request has been cloned to Github as issue and is available here:
- https://github.com/389ds/389-ds-base/issues/3551

If you want to continue to work on the PR, please navigate to the github issue,
download the patch from the attachments and file a new pull request.

Thank you for understanding. We apologize for all inconvenience.

Pull-Request has been closed by spichugi

3 years ago